id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
47181
|
<filename>apimodule/auctionhouse.py
import requests
def getDumpFile(apiKey):
requestUri = "https://eu.api.battle.net/wow/auction/data/azjol-nerub?locale=en_US&apikey=%s" % apiKey
r = requests.get(requestUri);
jsonData = r.json()
try:
fileData = jsonData["files"][0]
return fileData
except:
return None
return jsonData
|
StarcoderdataPython
|
3261542
|
#Ingresando datos
print("Ingrese los kilometros recorridos con su motocicleta:")
km=float(input())
print("Ingrese la cantidad de litros que consumió durante su recorrido:")
lt=float(input())
print(f"Kilometros rescorridos: {km}")
print(f"Litros de combustible gastados: {lt}")
print("El consumo por kilometro es de ", km/lt)
|
StarcoderdataPython
|
1740043
|
<reponame>PillarDevelopment/sto
import binascii
import calendar
import datetime
from logging import Logger
from typing import Optional
from eth_account import Account
from eth_utils import from_wei, to_bytes
from web3 import Web3, HTTPProvider
from sto.ethereum.utils import check_good_node_url, check_good_private_key, create_web3
class NodeNotSynced(Exception):
pass
class NeedMoney(Exception):
pass
def diagnose(logger: Logger, node_url: str, private_key_hex: str) -> Optional[Exception]:
"""Run Ethereum connection and account diagnostics.
Check that the user has properly configured Ethereum node and private key.
Never fails. Exceptions are written to the logger output and returned.
"""
try:
check_good_node_url(node_url)
logger.info("Attempting to connect to Ethereum node %s", node_url)
web3 = create_web3(node_url)
logger.info("Connected to Ethereum node software %s", web3.version.node)
block_num = web3.eth.blockNumber
d = datetime.datetime.utcnow()
unix_time = calendar.timegm(d.utctimetuple())
block_info = web3.eth.getBlock(block_num)
last_time = block_info["timestamp"]
if last_time == 0:
raise NodeNotSynced("Looks like your node has not yet been synced.")
ago = unix_time - last_time
logger.info("Current Ethereum node block number: %d, last block %d seconds ago - compare this to data on https://etherscan.io", block_num, ago)
if ago < 0:
raise NodeNotSynced("Last block in the future? Do we have a clock with a wrong timezone somewhere?")
if ago > 1800:
raise NodeNotSynced("Looks like your node has not received a block for half an hour. It is most likely unsynced at the moment.")
check_good_private_key(private_key_hex)
logger.info("Using private key %s...", private_key_hex[0:3])
account = Account.privateKeyToAccount(to_bytes(hexstr=private_key_hex))
balance = web3.eth.getBalance(account.address)
logger.info("Address %s has ETH balance of %f", account.address, from_wei(balance, "ether"))
if balance == 0:
raise NeedMoney("Your Ethereum account {} needs to have ETH in order to use this tool".format(account.address))
except Exception as e:
logger.error("Diagnostics failure")
logger.exception(e)
return e
|
StarcoderdataPython
|
3323597
|
__author__ = '(Multiple)'
__project__ = "Food-Pantry-Inventory"
__creation_date__ = "06/03/2019"
from django.test import TestCase
from fpiweb.forms import \
BuildPalletForm,\
NewBoxForm
from fpiweb.models import Box, BoxType
class NewBoxFormTest(TestCase):
fixtures = ('BoxType', 'Constraints')
def test_save(self):
box_type = BoxType.objects.get(box_type_code='Evans')
post_data = {
'box_number': '27',
'box_type': box_type.pk,
}
form = NewBoxForm(post_data)
self.assertTrue(
form.is_valid(),
f"{form.errors} {form.non_field_errors()}",
)
form.save(commit=True)
box = form.instance
self.assertIsNotNone(box)
self.assertIsNotNone(box.pk)
self.assertEqual(box_type.box_type_qty, box.quantity)
class BuildPalletFormTest(TestCase):
def test_is_valid__location_not_specified(self):
form = BuildPalletForm()
self.assertFalse(form.is_valid())
|
StarcoderdataPython
|
3352272
|
# -*- coding:utf-8 -*-
class DocumentTraceFile(object):
"""
"""
def __init__(self, tracing_file_path, is_index_file=False):
if tracing_file_path:
self.file_path = tracing_file_path
self.is_index_file = is_index_file
else:
raise Exception("tracing_file_path is None.")
|
StarcoderdataPython
|
169622
|
<filename>lib/fama/gene_assembler/contig.py
"""This module describes Contig class"""
from collections import defaultdict
class Contig:
"""Contig objects stores data about an assembled contig: sequence,
number of mapped reads, list of aligned reads, sizes of alignment etc.
Attributes:
contig_id (str): contig identifier
sequence (str): contig sequence
read_count (:obj:defaultdict[str, int]): key is sample identifier,
value is count of reads for the sample
read_segments (:obj:defaultdict[str, int]): key is sample identifier,
value is length of all aligned segments for the sample
reads (list of str): identifiers of reads mapped to the contig
genes (dict[str,:obj:Gene]): genes predicted in the contig
"""
def __init__(self, contig_id='', sequence=''):
"""
Args:
contig_id (str): contig identifier
sequence (str): contig sequence
"""
self.contig_id = contig_id
self.sequence = sequence
self.read_count = defaultdict(int) # Count of reads for each sample
self.read_segments = defaultdict(int) # Length of all aligned segments in each sample
self.reads = []
self.genes = {}
def update_coverage(self, sample, segment_length):
"""Updates read_count and read_segments attributes"""
if sample in self.read_count:
self.read_count[sample] += 1
else:
self.read_count[sample] = 1
if sample in self.read_segments:
self.read_segments[sample] += segment_length
else:
self.read_segments[sample] = segment_length
def get_coverage(self, sample=None):
"""Returns read coverage for a given sample or total coverage"""
result = 0.0
if self.sequence == '':
pass
elif sample:
if sample in self.read_count and sample in self.read_segments:
result = self.read_segments[sample] / len(self.sequence)
else:
result = sum(self.read_segments.values()) / len(self.sequence)
return result
def add_gene(self, gene):
"""Adds Gene object to dictionary of genes"""
self.genes[gene.gene_id] = gene
def get_rpkm(self, sample_readcount, sample=None):
"""Returns RPKM score for a given sample or all samples"""
result = 0.0
try:
if sample in self.read_count:
result = self.read_count[sample] * 1000000000 / \
len(self.sequence) / sample_readcount
elif sample is None:
result = len(self.reads) * 1000000000 / len(self.sequence) / \
sample_readcount
except ZeroDivisionError:
print('ZeroDivisionError for contig',
self.contig_id, str(len(self.sequence)), 'bp',
sample_readcount, 'reads', 'sample', sample)
raise
return result
def get_rpm(self, sample_readcount, sample=None):
"""Returns RPM score for a given sample or all samples"""
result = 0.0
if sample in self.read_count:
result = self.read_count[sample] * 1000000 / sample_readcount
elif sample is None:
result = len(self.reads) * 1000000 / sample_readcount
return result
def get_read_count(self, sample=None):
"""Returns raw read count for a given sample or all samples"""
result = 0.0
if sample in self.read_count:
result = self.read_count[sample]
elif sample is None:
result = len(self.reads)
return result
|
StarcoderdataPython
|
1623122
|
<filename>src/data_processing/plot/__main__.py
#!/usr/bin/python
import sys, os
sys.path.append('/home/karim/workspace/vscode-python/ADNI_Data_processing/src/data_processing')
import config.config_read as rsd
import services.tools as tls
import io_data.data_acces_file as daf
import matplotlib.pyplot as plt
import numpy as np
#------------------------------------------------------------------------------------------
# Plot slices from the selected ROI
#------------------------------------------------------------------------------------------
def get_sag_slices(data_L, data_R, sag_l, sag_r):
selected_data_L = data_L[sag_l[0]:sag_l[1], :, :]
selected_data_R = data_R[sag_r[0]:sag_r[1], :, :]
return selected_data_L, selected_data_R
def get_cor_slices(data_L, data_R, cor_l, cor_r):
selected_data_L = data_L[:, cor_l[0]:cor_l[1], :]
selected_data_R = data_R[:, cor_r[0]:cor_r[1], :]
return selected_data_L, selected_data_R
def get_axi_slices(data_L, data_R, axi_l, axi_r):
selected_data_L = data_L[:, :, axi_l[0]:axi_l[1]]
selected_data_R = data_R[:, :, axi_r[0]:axi_r[1]]
return selected_data_L, selected_data_R
def plot_ROI_all(data_roi_L, data_roi_R, left_dims, right_dims):
sag_l, cor_l, axi_l = left_dims
sag_r, cor_r, axi_r = right_dims
sag_L, sag_R = get_sag_slices(data_roi_L, data_roi_R, sag_l, sag_r)
cor_L, cor_R = get_cor_slices(data_roi_L, data_roi_R, cor_l, cor_r)
axi_L, axi_R = get_axi_slices(data_roi_L, data_roi_R, axi_l, axi_r)
# plot 2D slice from ROI (m-1, m, m+1)
for i in range(3):
plt.subplot(3, 6, i+1)
plt.imshow(sag_L[i, :, :], cmap='gray', origin="lower")
plt.subplot(3, 6, 4+i)
plt.imshow(sag_R[i, :, :], cmap='gray', origin="lower")
plt.subplot(3, 6, 6+i+1)
plt.imshow(cor_L[:, i, :], cmap='gray', origin="lower")
plt.subplot(3, 6, 6+4+i)
plt.imshow(cor_R[:, i, :], cmap='gray', origin="lower")
plt.subplot(3, 6, 12+i+1)
plt.imshow(axi_L[:, :, i], cmap='gray', origin="lower")
plt.subplot(3, 6, 12+4+i)
plt.imshow(axi_R[:, :, i], cmap='gray', origin="lower")
plt.show()
def get_pickle_from_folder(folder):
res = []
for root, dirs, files in os.walk(folder):
for file in files:
if file.endswith('pkl'):
res.append(os.path.join(root, file))
return res
#------------------------------------------------------------------------------------------
# function::__main__ ::
#------------------------------------------------------------------------------------------
def main():
binaries_classes = ['AD-NC', 'AD-MCI', 'MCI-NC']
data_params = rsd.get_all_data_params()
root_path = data_params['adni_data_des']
name_cnv = root_path + tls.get_convention_name(data_params) + '/' + str(data_params['ROI_list'][data_params['ROI_selection']] + '/' + data_params['3D_or_2D'])
line = name_cnv + '/' + binaries_classes[0] + '/test/'
list_files = get_pickle_from_folder(line)
for i in list_files:
model = daf.read_model(i)
print(" HIPP_L : {} - HIPP_R: {} - Vector: {} - Label: {}".format(model.hippLeft.shape, model.hippRight.shape, model.hippMetaDataVector, model.hippLabel))
# print(model)
left_dims, right_dims = [[13,16],[13,16],[13,16]], [[13,16],[13,16],[13,16]]
plot_ROI_all(model.hippLeft, model.hippRight, left_dims, right_dims)
#------------------------------------------------------------------------------------------
# Start ->>>->>>
#------------------------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1637837
|
<filename>geometry.py
# -*- encoding=utf8 -*-
__author__ = "<NAME>"
import math
def calc_degrees(point1, point2):
"""
计算角度
:param point1:
:param point2:
:return:
"""
if point1[1] == point2[1]:
return 0
if point1[0] == point2[0]:
return 90
if point1[0] < point2[0]:
left = point1
right = point2
else:
left = point2
right = point1
result = math.degrees(math.atan(math.fabs(right[1] - left[1]) / math.fabs(right[0] - left[0])))
return result if left[1] > right[1] else result * -1
def calc_a(degrees_a, hypotenuse):
"""
计算直角三角形的A边,相对坐标长度
∠b
*
A * * C
* *
∠c ******** ∠a
B
:param degrees_a: A角
:param hypotenuse: 斜边
:return: A边长度
"""
return math.sin(math.radians(degrees_a)) * hypotenuse
def calc_b(degrees_a, hypotenuse):
"""
计算直角三角形的B边
∠b
*
A * * C
* *
∠c ******** ∠a
B
:param degrees_a: A角
:param hypotenuse: 斜边
:return: B边长度
"""
return math.cos(math.radians(degrees_a)) * hypotenuse
def calc_center_point(point_a, point_b):
"""
已知两点坐标,计算中间点坐标
:param point_a: A点坐标
:param point_b: B点坐标
:return: 中心点坐标
"""
return (point_a[0] + point_b[0]) // 2, \
(point_a[1] + point_b[1]) // 2
def calc_distance(point_a, point_b):
"""
计算两点之间的长度
:param point_a: A坐标
:param point_b: B坐标
:return: 两点之间的长度
"""
return ((point_a[1] - point_b[1]) ** 2 + (point_a[0] - point_b[0]) ** 2) ** 0.5
def calc_equilateral_triangle_point(point_a, point_b, degrees_a=None, top=True):
"""
计算等边三角形第三个点的坐标
:param point_a: A点坐标
:param point_b: B点坐标
:param degrees_a: A与B的夹角
:param top: 第三个点是在上面还是下面,默认上面
:return: C点坐标
"""
if not degrees_a:
degrees_a = calc_degrees(point_a, point_b)
length = calc_distance(point_a, point_b)
radians = math.radians(degrees_a + 60)
x_base = length * math.cos(radians)
y_base = length * math.sin(radians)
if top:
return point_a[0] + x_base, point_a[1] - y_base
else:
return point_b[0] - x_base, point_b[1] + y_base
def calc_isosceles_right_triangle_point_b(point_a, point_c, top=True):
"""
计算等腰直角三角形的B点
∠a
*
right_angle_length * *
* *
********* ∠b
∠c
:param point_a: A点坐标
:param point_c: C点坐标
:param top: B点坐标是否在上面
:return: B点坐标
"""
right_angle_length = calc_distance(point_a, point_c)
degrees = calc_degrees(point_a, point_c)
hypotenuse = (right_angle_length ** 2 * 2) ** 0.5
# A与C点的相对于水平线的夹角
degrees_a_to_c = 45 + degrees if top else 45 - degrees
relative_y = calc_a(degrees_a_to_c, hypotenuse)
relative_x = calc_b(degrees_a_to_c, hypotenuse)
if top:
return point_a[0] + relative_x, point_a[1] - relative_y
else:
return point_a[0] + relative_x, point_a[1] + relative_y
|
StarcoderdataPython
|
4804929
|
import scipy.interpolate
import numpy as np
import xarray as xr
import os
import matplotlib.pyplot as plt
from regrid import get_ease_coords
def read_mask():
"""
Returns Northern hemisphere high resolution LOCI mask
"""
diri = '/oldhome/apbarret/projects/ancillary/masks'
fili = 'Nh_loci_land50_coast0km.1441x1441.bin'
xdim = 1441
ydim = 1441
mask = np.fromfile( os.path.join(diri, fili), dtype='uint8').reshape(xdim,ydim)
# Set mask to 0 for land [0] (and ice [101]) and 1 for ocean [255]. Cells off-globe
# are set to NaN
newmask = np.where( (mask == 0) | (mask == 101), 0, np.NaN )
newmask = np.where( (mask == 255), 1, newmask )
return newmask
def get_input_mask():
"""
Returns points (npoints,2) and values (npoints) arrays for BU LOCI mask.
"""
mask = read_mask()
coords = get_ease_coords('Nh')
values = mask[ np.isfinite(mask) ].flatten().astype('uint8')
points = np.array( [coords[0][ np.isfinite(mask) ].flatten(),
coords[1][ np.isfinite(mask) ].flatten()] ).T
return points, values
def main(verbose=True):
if verbose: print ('Getting mask and coordinates')
points, values = get_input_mask()
if verbose: print ('Getting EASE grid definition')
dstCoord = get_ease_coords('Na12')
if verbose: print ('Regridding mask')
mask = scipy.interpolate.griddata(points, values, dstCoord, method='nearest')
np.save('BU_loci_Na12.npy', mask)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3332429
|
<reponame>parada3desu/deepcomparer.py
import unittest
from deepcomparer import deep_compare
class TestCompareTuple(unittest.TestCase):
def test_empty_tuple(self):
"""
Test empty tuple
"""
data1: tuple = ()
data2: tuple = ()
result = deep_compare(data1, data2)
self.assertTrue(result)
def test_empty_tuple_inside_tuple(self):
"""
Test empty tuple inside tuple
"""
data1: tuple = (())
data2: tuple = (())
result = deep_compare(data1, data2)
self.assertTrue(result)
def test_same_size_tuples_with_different_values_with_many_values(self):
"""
Test same size tuples with different values
"""
data1: tuple = (1, 2)
data2: tuple = (2, 2, 3)
result = deep_compare(data1, data2)
self.assertFalse(result)
def test_same_size_tuples_with_many_values(self):
"""
Test same size tuples with different values
"""
data1: tuple = (1, 2)
data2: tuple = (1, 2)
result = deep_compare(data1, data2)
self.assertTrue(result)
def test_empty_tuple_inside_tuple_with_many_values(self):
"""
Test empty tuple inside tuple
"""
data1: tuple = ((3, 2), (1, 2))
data2: tuple = ((3, 2), (1, 2))
result = deep_compare(data1, data2)
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
120390
|
from django.db.models import IntegerChoices, TextChoices
from django.utils.translation import gettext_lazy as _
class WinterStorageMethod(TextChoices):
ON_TRESTLES = "on_trestles", _("On trestles")
ON_TRAILER = "on_trailer", _("On a trailer")
UNDER_TARP = "under_tarp", _("Under a tarp")
class ApplicationStatus(TextChoices):
PENDING = "pending", _("Pending")
OFFER_GENERATED = "offer_generated", _("Offer generated")
OFFER_SENT = "offer_sent", _("Offer sent")
NO_SUITABLE_BERTHS = "no_suitable_berths", _("No suitable berths")
HANDLED = "handled", _("Handled")
REJECTED = "rejected", _("Rejected")
EXPIRED = "expired", _("Expired")
class ApplicationAreaType(TextChoices):
MARKED = "marked", _("Marked")
UNMARKED = "unmarked", _("Unmarked")
class ApplicationPriority(IntegerChoices):
LOW = 0, _("Low")
MEDIUM = 1, _("Medium")
HIGH = 2, _("High")
|
StarcoderdataPython
|
1706183
|
<filename>BGWpy/core/__init__.py
from . import util
from .writable import *
from .F90io import *
from .runscript import *
from .task import *
from .workflow import *
|
StarcoderdataPython
|
3229517
|
import os
import json
from django.apps import AppConfig
import torch
from . import AI
class MainConfig(AppConfig):
name = 'main'
@staticmethod
def get_rating(sentence):
return AI.get(sentence)
|
StarcoderdataPython
|
4815057
|
<reponame>decached/taylr<filename>lib/exceptions.py
class APIException(Exception):
"""API Exception, which is to be used for all failed responses."""
def __init__(self, code, msg, details=None):
self.code = code
self.msg = msg
self.details = details
def __str__(self):
return repr({"code": self.code, "msg": self.msg, "details": self.details})
|
StarcoderdataPython
|
3397530
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def main():
L = 10.
ne = 30
a = 1.
cfl = 0.5
Dmax = L/2.
tmax = Dmax/a
x = np.linspace(0., L, ne+1)
dx = x[1]-x[0]
u1 = np.zeros(len(x))
u2 = np.zeros(len(x))
line, = plt.plot(x,u2,'o-')
plt.xlabel('x')
plt.ylabel('u')
plt.ylim(-0.2,1.2)
plt.grid(True)
plt.draw()
t = 0
dt = cfl*dx/a
while t<tmax:
t+=dt
u2[0] = 1. # BC
for i in xrange(1,len(x)):
u2[i] = u1[i] - cfl * (u1[i]-u1[i-1])
line.set_ydata(u2)
plt.title('t = %f' % t)
plt.draw()
plt.pause(0.001)
u1,u2 = u2,u1
if __name__ == "__main__":
main()
plt.show()
#raw_input('<ENTER TO QUIT>')
|
StarcoderdataPython
|
3341672
|
i = int(input())
for x in range(i):
n = int(input())
a = map(int,input().split(' '))
a.sort()
ans = 0
cur = 1
for y in range(len(a)):
if (a[y] >= cur):
ans += 1
cur += 1
print("Case #"+str(x+1),ans)
|
StarcoderdataPython
|
4835010
|
from unittest import TestCase
class TestSendReceive(TestCase):
pass
|
StarcoderdataPython
|
125280
|
from tests.base import DBTestCase
from tests.example_app.tables import Manager
class TestToDict(DBTestCase):
def test_to_dict(self):
"""
Make sure that `to_dict` works correctly.
"""
self.insert_row()
instance = Manager.objects().first().run_sync()
dictionary = instance.to_dict()
self.assertEqual(dictionary, {"id": 1, "name": "Guido"})
def test_filter_rows(self):
"""
Make sure that `to_dict` works correctly with a subset of columns.
"""
self.insert_row()
instance = Manager.objects().first().run_sync()
dictionary = instance.to_dict(Manager.name)
self.assertEqual(dictionary, {"name": "Guido"})
def test_to_dict_aliases(self):
"""
Make sure that `to_dict` works correctly with aliases.
"""
self.insert_row()
instance = Manager.objects().first().run_sync()
dictionary = instance.to_dict(
Manager.id, Manager.name.as_alias("title")
)
self.assertEqual(dictionary, {"id": 1, "title": "Guido"})
|
StarcoderdataPython
|
3398657
|
import importlib
def python_module_exists(module_name: str) -> bool:
spam_spec = importlib.util.find_spec(module_name)
return spam_spec is not None
|
StarcoderdataPython
|
34714
|
<gh_stars>0
#!/usr/bin/env python3
import os
import sys
from shutil import copyfile, move
import argparse
from glob import glob
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from Metronome import distributed_execution
def generate_astar_configs(domain_paths, domain_type):
config_list = []
for domain_path in domain_paths:
# strip leading . char from domain
domain_path_tmp = domain_path[1:] if domain_path[0] == '.' else domain_path
config = dict()
config['algorithmName'] = 'A_STAR'
config['actionDuration'] = 1
config['domainName'] = domain_type
config['terminationType'] = 'EXPANSION'
config['lookaheadType'] = 'DYNAMIC'
config['commitmentStrategy'] = 'SINGLE'
config['heuristicMultiplier'] = 1.0
config['domainPath'] = domain_path_tmp
config_list.append(config)
return config_list
def generate_agrd_configs(domain_paths, domain_type, goals):
config_list = []
for domain_path in domain_paths:
# strip leading . char from domain
domain_path_tmp = domain_path[1:] if domain_path[0] == '.' else domain_path
config = dict()
config['algorithmName'] = 'NAIVE_OPTIMAL_AGRD'
config['actionDuration'] = 1
config['interventionCost'] = 1
config['domainName'] = domain_type
config['terminationType'] = 'EXPANSION'
config['subjectAlgorithm'] = 'NAIVE_DYNAMIC'
config['timeLimit'] = 3600_000_000_000 # 3600 second (60 min) timeout
config['maxDepth'] = 1000
config['goalPriors'] = [1 / goals for _ in range(goals)]
config['subjectGoal'] = 0
config['domainPath'] = domain_path_tmp
config_list.append(config)
return config_list
def filter_domains(generated_domain_paths, base_domain_name, domain_type='GRID_WORLD', domain_ext='.vw',
out_path='./filtered', write=True):
this_cwd = os.getcwd()
success_index = 0
if not os.path.exists(out_path):
os.makedirs(out_path)
configs = generate_astar_configs(generated_domain_paths, domain_type)
print('Begin filtering of generated domains')
os.chdir('../..')
results = distributed_execution(configs, this_cwd)
os.chdir(this_cwd)
success_domains = []
for result in results:
if (result['success']):
print(f'Domain {result["configuration"]["domainPath"]} is solvable')
success_domains.append(result["configuration"]["domainPath"])
if write:
new_file_name = os.path.join(out_path, base_domain_name + str(success_index) + domain_ext)
print(f'Outputting to {new_file_name}')
move('.' + result['configuration']['domainPath'], new_file_name)
success_index += 1
else:
print(result['errorMessage'])
print(f'Domain {result["configuration"]["domainPath"]} was not successfully solved')
return success_domains
def get_with_default(d, key, default_value=None, default_producer=None):
if key not in d:
d[key] = default_value if default_producer is None else default_producer()
return d[key]
def get_with_default_list(d, key):
return get_with_default(d, key, default_value=[])
def get_with_default_dict(d, key):
return get_with_default(d, key, default_value=dict())
def get_depth_upper_bound(result):
most = 0
second_most = 0
idx = 0
while f'Goal_{idx}' in result:
cost = result[f'Goal_{idx}']
if cost > most:
second_most = most
most = cost
elif cost > second_most:
second_most = cost
idx += 1
return second_most
def filter_agrd_chunk(config, chunk_instances, inactive_out_dir, followup_out_dir):
this_cwd = os.getcwd()
base_domain_name = config['base_domain_name']
domain_ext = config['domain_ext']
path_to_instance = {
os.path.join(
config['source_dir'],
filename
): filename
for filename in chunk_instances
}
configs = generate_agrd_configs(path_to_instance.keys(), config['domain_type'], config['num_goals'])
os.chdir('../..')
results = distributed_execution(configs, this_cwd)
os.chdir(this_cwd)
successes_by_depth_bound = dict()
timeouts_by_depth_bound = dict()
for result in results:
result['depthUpperBound'] = get_depth_upper_bound(result)
instance_path = result["configuration"]["domainPath"]
if instance_path[0] != '.':
instance_path = '.' + instance_path
instance_filename = path_to_instance[instance_path]
if result['success'] and result.get('observerIsActive', 0) > 0:
print(f'Observer was active in domain {instance_path}')
get_with_default_list(successes_by_depth_bound, result['depthUpperBound'])\
.append((instance_path, instance_filename, base_domain_name, domain_ext))
else:
if result['success']:
print(f'Observer was inactive in domain {instance_path}')
move(instance_path, os.path.join(inactive_out_dir, instance_filename))
else:
err_msg = result["errorMessage"]
print(f'Failed to solve domain {instance_path} with error {err_msg}')
lower_err = err_msg.lower()
if 'timeout' in lower_err:
get_with_default_list(timeouts_by_depth_bound, result['depthUpperBound'])\
.append((instance_path, instance_filename, base_domain_name, domain_ext))
elif 'dead end' in lower_err or 'subject transitioned' in lower_err or 'follow-up' in lower_err:
# follow up on instances that fail for reasons that shouldn't happen...
move(instance_path, os.path.join(followup_out_dir, instance_filename))
else:
move(instance_path, os.path.join(inactive_out_dir, instance_filename))
return successes_by_depth_bound, timeouts_by_depth_bound
def move_agrd_filter_results(successes_info_by_depth_bound, timeouts_info_by_depth_bound):
"""Moves successes to new directory, but only if all instances
at the relevant depth bound succeeded"""
# loop through timeouts first to purge successes dict
meta_files_by_out = dict()
for depth_bound, timeout_info in timeouts_info_by_depth_bound.items():
for out_dir, timeout_list in timeout_info.items():
print(f'Moving timeout instances at depth bound {depth_bound} for out dir {out_dir}')
timeout_dir = os.path.join(out_dir, 'timeout')
meta_file = get_with_default(
meta_files_by_out, out_dir,
default_producer=lambda: open(os.path.join(out_dir, 'stats.log'), 'w'))
success_info = get_with_default_dict(successes_info_by_depth_bound, depth_bound)
successes_list = get_with_default_list(success_info, out_dir)
num_timeouts = len(timeout_list)
num_successes = len(successes_list)
total = num_timeouts + num_successes
fraction_timeout = float(num_timeouts) / float(total)
meta_log_text = f'Depth Bound {depth_bound}: '\
f'{num_successes} successes, {num_timeouts} timeouts, {fraction_timeout} timeout fraction'
to_timeout_dir = timeout_list[:]
if num_timeouts <= 3 and fraction_timeout <= 0.01:
# tolerate up to 3 timeouts up to 1% of instances
meta_log_text += ' (ignoring timeouts, writing successes)'
else:
to_timeout_dir += successes_list
success_info[out_dir] = [] # wipe the list so we don't write to success dir later
meta_file.write(meta_log_text + '\n')
for instance_path, instance_filename, _, _ in to_timeout_dir:
move(instance_path, os.path.join(timeout_dir, instance_filename))
for file in meta_files_by_out.values():
file.write('\n=====================================\n\n')
success_indices = {}
for depth_bound, success_info in successes_info_by_depth_bound.items():
for out_dir, successes_list in success_info.items():
if len(successes_list) == 0:
continue
print(f'Moving successful instances at depth bound {depth_bound} for out dir {out_dir}')
meta_file = get_with_default(
meta_files_by_out, out_dir,
default_producer=lambda: open(os.path.join(out_dir, 'stats.log'), 'w'))
meta_file.write(f'Depth Bound {depth_bound}: {len(successes_list)} successes\n')
for instance_path, _, base_domain_name, domain_ext in successes_list:
prefix = os.path.join(out_dir, base_domain_name)
new_file_path = prefix + str(get_with_default(success_indices, prefix, 0)) + domain_ext
success_indices[prefix] += 1
move(instance_path, new_file_path)
for file in meta_files_by_out.values():
file.close()
def filter_active_observer(domain_configs, chunk_size=1000):
"""Filter to only those where the observer is active.
Dict schema:
source_dir: str of the source directory
base_domain_name: str prefix for all instance filenames
num_instances: number of instances being filtered with this config
num_goals: number of goals in each instance (must be same across all instances)
domain_type: 'GRID_WORLD', 'LOGISTICS', etc
domain_ext: '.vw', '.logistics', etc
out_dir: str of the output directory
"""
successes_info_by_depth_bound = dict()
timeouts_info_by_depth_bound = dict()
for config in domain_configs:
base_domain_name = config['base_domain_name']
domain_ext = config['domain_ext']
out_dir = config['out_dir']
src_dir = config['source_dir']
if src_dir[-1] != '/':
src_dir += '/'
print(f'Filtering {base_domain_name} instances')
timeout_out_dir = os.path.join(out_dir, 'timeout')
if not os.path.exists(timeout_out_dir):
os.makedirs(timeout_out_dir)
inactive_out_dir = os.path.join(out_dir, 'failed')
if not os.path.exists(inactive_out_dir):
os.makedirs(inactive_out_dir)
followup_out_dir = os.path.join(out_dir, 'follow-up')
if not os.path.exists(followup_out_dir):
os.makedirs(followup_out_dir)
domain_instance_filenames = [
filepath[len(src_dir):]
for filepath in glob(src_dir + base_domain_name + '*' + domain_ext)
]
idx = 0
while len(domain_instance_filenames) > idx:
# new_file_path = os.path.join(active_out_dir, base_domain_name + str(success_index) + domain_ext)
chunk_instances = domain_instance_filenames[idx:idx + chunk_size]
print(f'Begin filtering {base_domain_name} {idx} through '
f'{min(idx + chunk_size - 1, len(domain_instance_filenames) - 1)}')
tmp_successes, tmp_failures = filter_agrd_chunk(config, chunk_instances, inactive_out_dir, followup_out_dir)
for key, value in tmp_successes.items():
all_success_info = get_with_default_dict(successes_info_by_depth_bound, key)
group_success_list = get_with_default_list(all_success_info, out_dir)
group_success_list += value
for key, value in tmp_failures.items():
all_failure_info = get_with_default_dict(timeouts_info_by_depth_bound, key)
group_failure_list = get_with_default_list(all_failure_info, out_dir)
group_failure_list += value
idx += chunk_size
move_agrd_filter_results(successes_info_by_depth_bound, timeouts_info_by_depth_bound)
def run_filter_observer(args):
domain_identifier = args.domain_identifier
configs = []
if domain_identifier == 'uniform':
for size in range(7, 11):
base_domain_name = f'uniform{size}_{size}-'
for goals in range(2, 5):
dir_name = f'./gridworld/{goals}goal/filtered'
num_instances = len(glob(os.path.join(dir_name, base_domain_name) + '*'))
configs.append({
'source_dir': dir_name,
'base_domain_name': base_domain_name,
'num_instances': num_instances,
'num_goals': goals,
'domain_type': 'GRID_WORLD',
'domain_ext': '.vw',
'out_dir': f'./agrd/uniform/{goals}goal'
})
elif domain_identifier == 'rooms':
for idx in range(10):
base_domain_name = f'64room_tiny_00{idx}-scn'
for goals in range(2, 5):
dir_name = f'./gridmap/{goals}goal/filtered'
num_instances = len(glob(os.path.join(dir_name, base_domain_name) + '*'))
configs.append({
'source_dir': dir_name,
'base_domain_name': base_domain_name,
'num_instances': num_instances,
'num_goals': goals,
'domain_type': 'GRID_WORLD',
'domain_ext': '.vw',
'out_dir': f'./agrd/rooms/{goals}goal'
})
elif domain_identifier == 'logistics':
pass
for locs in range(7, 12):
# for locs in range(9, 10):
for goals in range(2, 5):
# for goals in range(4, 5):
base_domain_name = f'geometric_0.4dist_{goals}goal_{locs}loc_3pkg_1trk_'
dir_name = f'./logistics/{goals}goal'
num_instances = len(glob(os.path.join(dir_name, base_domain_name) + '*'))
if num_instances == 0:
continue
configs.append({
'source_dir': dir_name,
'base_domain_name': base_domain_name,
'num_instances': num_instances,
'num_goals': goals,
'domain_type': 'LOGISTICS',
'domain_ext': '.logistics',
'out_dir': f'./agrd/logistics/{goals}goal'
# 'out_dir': f'./temp/logistics/{goals}goal'
})
else:
raise Exception(f'Unknown domain identifier: {domain_identifier}')
# log_config = {
# 'source_dir': './logistics',
# 'base_domain_name': 'geometric_0.4dist_3goal_15loc_3pkg_1trk_',
# 'num_instances': 2,
# 'num_goals': 3,
# 'domain_type': 'LOGISTICS',
# 'domain_ext': '.logistics',
# 'out_dir': './test/logistics'
# }
filter_active_observer(configs, 1000)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Quick and dirty CLI for filtering AGRD instances by only '
'those where the observer can actually do something. '
'To use, edit the file')
# AS OF 1/6/20, valid options are 'logistics', 'rooms', 'uniform'
parser.add_argument('domain_identifier', type=str,
help='String identifier for your set of domains.')
run_filter_observer(parser.parse_args())
|
StarcoderdataPython
|
4821915
|
<filename>app/models/order.py
# -*- encoding: utf-8 -*-
"""
@File : order.py
@Time : 2020/4/24 13:53
@Author : Tianjin
@Email : <EMAIL>
@Software: PyCharm
"""
from lin.interface import InfoCrud as Base
from sqlalchemy import Column, Integer, ForeignKey, Float, Boolean
class Order(Base):
__tablename__ = "Order"
id = Column("id", Integer, primary_key=True, autoincrement=True, comment="订单id")
commId = Column("commId", Integer, ForeignKey("Commodity.id"), nullable=False, comment="商品id")
money = Column("money", Float, nullable=False, comment="商品金额")
payment = Column("payment", Boolean, nullable=False, comment="是否支付")
receiving = Column("receiving", Boolean, nullable=False, comment="是否收货")
refund = Column("refund", Boolean, nullable=False, comment="是否退款")
conut = Column("count", Integer, nullable=False, comment="商品数量")
|
StarcoderdataPython
|
1690591
|
<reponame>martincochran/score-minion
#!/usr/bin/env python
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import unittest
import test_env_setup
import list_id_bimap
class ListIdBiMapTest(unittest.TestCase):
def testIsomorphism(self):
"""Verify list is consistent on all known lists."""
for list_id in list_id_bimap.ListIdBiMap.ALL_LISTS:
division, age_bracket, league = self._GetListProperties(list_id)
self.assertEquals(list_id, self._GetListId(division, age_bracket, league))
def _GetListProperties(self, list_id):
"""Convenience method to save characters in the test cases.
Returns the structured list properties for the given list id.
"""
return list_id_bimap.ListIdBiMap.GetStructuredPropertiesForList(list_id)
def _GetListId(self, division, age_bracket, league):
"""Convenience method to save characters in the test cases.
Returns the list id for the given structured properties.
"""
return list_id_bimap.ListIdBiMap.GetListId(division, age_bracket, league)
|
StarcoderdataPython
|
1781884
|
from datetime import datetime, timedelta
import os
import uuid
import jwt
import json
import requests
from functools import wraps
from urlparse import parse_qs, parse_qsl
from urllib import urlencode
from flask import Flask, g, send_file, request, redirect, url_for, jsonify, send_from_directory, Response
from requests_oauthlib import OAuth1
from jwt import DecodeError, ExpiredSignature
import sys
from model.user import User
from service.waston import RelationshipExtraction, PersonalityInsights
import tweepy
import facebook as fb
from py2neo import Graph, Node, Relationship
reload(sys)
sys.setdefaultencoding("utf-8")
app = Flask(__name__)
auth = tweepy.OAuthHandler("VGbGua2zcrvAt8q7rFzYcF7Pp", "<KEY>")
auth.set_access_token("<KEY>",
"<KEY>")
api = tweepy.API(auth)
graph = Graph("https://551293319a25f:2nKvJUg3aU5jNdRuRjGdIKEG5Kf5GoP4tyrI8WQc@neo-551293319a25f-364459c455.do-stories.graphstory.com:7473/db/data")
#graph = Graph()
app = Flask(__name__)
app.config.from_pyfile('flaskapp.cfg')
relationshipExtration = RelationshipExtraction(user="de7bcf6d-01d9-4226-932f-b283302af6a2",
password="<PASSWORD>")
personalityInsights = PersonalityInsights(user="0<PASSWORD>", password="<PASSWORD>")
profileee = {
"id": "sbh",
"source": "*UNKNOWN*",
"tree": {
"children": [
{
"children": [
{
"category": "personality",
"children": [
{
"category": "personality",
"children": [
{
"category": "personality",
"id": "Adventurousness",
"name": "Adventurousness",
"percentage": 0.7160526489554446,
"sampling_error": 0.10711332800000001
},
{
"category": "personality",
"id": "Artistic interests",
"name": "Artistic interests",
"percentage": 0.3181832458983307,
"sampling_error": 0.20641926400000002
},
{
"category": "personality",
"id": "Emotionality",
"name": "Emotionality",
"percentage": 0.2515172364058379,
"sampling_error": 0.115189568
},
{
"category": "personality",
"id": "Imagination",
"name": "Imagination",
"percentage": 0.8641701428422862,
"sampling_error": 0.145667632
},
{
"category": "personality",
"id": "Intellect",
"name": "Intellect",
"percentage": 0.8908186242106095,
"sampling_error": 0.128763392
},
{
"category": "personality",
"id": "Liberalism",
"name": "Authority-challenging",
"percentage": 0.898454169007272,
"sampling_error": 0.168568352
}
],
"id": "Openness",
"name": "Openness",
"percentage": 0.8507771685899128,
"sampling_error": 0.130284112
},
{
"category": "personality",
"children": [
{
"category": "personality",
"id": "Achievement striving",
"name": "Achievement striving",
"percentage": 0.6585127054883937,
"sampling_error": 0.13753696
},
{
"category": "personality",
"id": "Cautiousness",
"name": "Cautiousness",
"percentage": 0.8063779039161849,
"sampling_error": 0.160483392
},
{
"category": "personality",
"id": "Dutifulness",
"name": "Dutifulness",
"percentage": 0.3181116384939571,
"sampling_error": 0.206189696
},
{
"category": "personality",
"id": "Orderliness",
"name": "Orderliness",
"percentage": 0.3165304417521884,
"sampling_error": 0.135897936
},
{
"category": "personality",
"id": "Self-discipline",
"name": "Self-discipline",
"percentage": 0.3153744839220475,
"sampling_error": 0.16816399999999998
},
{
"category": "personality",
"id": "Self-efficacy",
"name": "Self-efficacy",
"percentage": 0.7088051511152613,
"sampling_error": 0.175623264
}
],
"id": "Conscientiousness",
"name": "Conscientiousness",
"percentage": 0.5435483133388536,
"sampling_error": 0.152381088
},
{
"category": "personality",
"children": [
{
"category": "personality",
"id": "Activity level",
"name": "Activity level",
"percentage": 0.35820197882231886,
"sampling_error": 0.216853088
},
{
"category": "personality",
"id": "Assertiveness",
"name": "Assertiveness",
"percentage": 0.2838920867984583,
"sampling_error": 0.208304352
},
{
"category": "personality",
"id": "Cheerfulness",
"name": "Cheerfulness",
"percentage": 0.14455233450895522,
"sampling_error": 0.16188628800000002
},
{
"category": "personality",
"id": "Excitement-seeking",
"name": "Excitement-seeking",
"percentage": 0.17235759199332115,
"sampling_error": 0.161049568
},
{
"category": "personality",
"id": "Friendliness",
"name": "Outgoing",
"percentage": 0.21600564357324195,
"sampling_error": 0.1768172
},
{
"category": "personality",
"id": "Gregariousness",
"name": "Gregariousness",
"percentage": 0.13842598921316177,
"sampling_error": 0.196135264
}
],
"id": "Extraversion",
"name": "Extraversion",
"percentage": 0.23726267395633333,
"sampling_error": 0.18413272
},
{
"category": "personality",
"children": [
{
"category": "personality",
"id": "Altruism",
"name": "Altruism",
"percentage": 0.2754933265004837,
"sampling_error": 0.202923504
},
{
"category": "personality",
"id": "Cooperation",
"name": "Cooperation",
"percentage": 0.6307012919481465,
"sampling_error": 0.188078544
},
{
"category": "personality",
"id": "Modesty",
"name": "Modesty",
"percentage": 0.22002846111606778,
"sampling_error": 0.195163392
},
{
"category": "personality",
"id": "Morality",
"name": "Uncompromising",
"percentage": 0.29521221940977527,
"sampling_error": 0.17199344
},
{
"category": "personality",
"id": "Sympathy",
"name": "Sympathy",
"percentage": 0.8963908479208201,
"sampling_error": 0.20470824
},
{
"category": "personality",
"id": "Trust",
"name": "Trust",
"percentage": 0.4465068968715436,
"sampling_error": 0.19620072
}
],
"id": "Agreeableness",
"name": "Agreeableness",
"percentage": 0.197592943914049,
"sampling_error": 0.17107476800000002
},
{
"category": "personality",
"children": [
{
"category": "personality",
"id": "Anger",
"name": "Fiery",
"percentage": 0.46038608777319856,
"sampling_error": 0.107145328
},
{
"category": "personality",
"id": "Anxiety",
"name": "Prone to worry",
"percentage": 0.2925613308885644,
"sampling_error": 0.120479872
},
{
"category": "personality",
"id": "Depression",
"name": "Melancholy",
"percentage": 0.4058642362110278,
"sampling_error": 0.14824848
},
{
"category": "personality",
"id": "Immoderation",
"name": "Immoderation",
"percentage": 0.16728870664411802,
"sampling_error": 0.10801417599999999
},
{
"category": "personality",
"id": "Self-consciousness",
"name": "Self-consciousness",
"percentage": 0.5367949472615889,
"sampling_error": 0.170005808
},
{
"category": "personality",
"id": "Vulnerability",
"name": "Susceptible to stress",
"percentage": 0.41333637268687284,
"sampling_error": 0.123342656
}
],
"id": "Neuroticism",
"name": "Emotional range",
"percentage": 0.4782057339658776,
"sampling_error": 0.10990580800000001
}
],
"id": "Openness_parent",
"name": "Openness",
"percentage": 0.8507771685899128
}
],
"id": "personality",
"name": "Big 5 "
},
{
"children": [
{
"category": "needs",
"children": [
{
"category": "needs",
"id": "Challenge",
"name": "Challenge",
"percentage": 0.9454631311618445,
"sampling_error": 0.571675504
},
{
"category": "needs",
"id": "Closeness",
"name": "Closeness",
"percentage": 0.37430779536880737,
"sampling_error": 0.664986656
},
{
"category": "needs",
"id": "Curiosity",
"name": "Curiosity",
"percentage": 0.8974415472114874,
"sampling_error": 0.601831648
},
{
"category": "needs",
"id": "Excitement",
"name": "Excitement",
"percentage": 0.46240646412915437,
"sampling_error": 0.597841328
},
{
"category": "needs",
"id": "Harmony",
"name": "Harmony",
"percentage": 0.8769879010128687,
"sampling_error": 0.656984848
},
{
"category": "needs",
"id": "Ideal",
"name": "Ideal",
"percentage": 0.06182001068498667,
"sampling_error": 0.5687984159999999
},
{
"category": "needs",
"id": "Liberty",
"name": "Liberty",
"percentage": 0.8842968320182879,
"sampling_error": 0.5426626720000001
},
{
"category": "needs",
"id": "Love",
"name": "Love",
"percentage": 0.4198132194180422,
"sampling_error": 0.695262304
},
{
"category": "needs",
"id": "Practicality",
"name": "Practicality",
"percentage": 0.2510956138665641,
"sampling_error": 0.632711856
},
{
"category": "needs",
"id": "Self-expression",
"name": "Self-expression",
"percentage": 0.8612635859116711,
"sampling_error": 0.618786896
},
{
"category": "needs",
"id": "Stability",
"name": "Stability",
"percentage": 0.7455088528118455,
"sampling_error": 0.6569906719999999
},
{
"category": "needs",
"id": "Structure",
"name": "Structure",
"percentage": 0.9856815998415455,
"sampling_error": 0.023924848
}
],
"id": "Structure_parent",
"name": "Structure",
"percentage": 0.9856815998415455
}
],
"id": "needs",
"name": "Needs"
},
{
"children": [
{
"category": "values",
"children": [
{
"category": "values",
"id": "Conservation",
"name": "Conservation",
"percentage": 0.1590310821416115,
"sampling_error": 0.228751744
},
{
"category": "values",
"id": "Openness to change",
"name": "Openness to change",
"percentage": 0.5969809255902321,
"sampling_error": 0.241299504
},
{
"category": "values",
"id": "Hedonism",
"name": "Hedonism",
"percentage": 0.11680072978737648,
"sampling_error": 0.234514768
},
{
"category": "values",
"id": "Self-enhancement",
"name": "Self-enhancement",
"percentage": 0.7972447610009217,
"sampling_error": 0.22006544
},
{
"category": "values",
"id": "Self-transcendence",
"name": "Self-transcendence",
"percentage": 0.5762760484207429,
"sampling_error": 0.21310896
}
],
"id": "Hedonism_parent",
"name": "Hedonism",
"percentage": 0.11680072978737648
}
],
"id": "values",
"name": "Values"
}
],
"id": "r",
"name": "root"
},
"word_count": 2953
}
def create_token(user):
payload = {
'sub': user['id'],
'iat': datetime.now(),
'exp': datetime.now() + timedelta(days=14)
}
token = jwt.encode(payload, app.config['TOKEN_SECRET'])
return token.decode('unicode_escape')
def parse_token(req):
token = req.headers.get('Authorization').split()[1]
return jwt.decode(token, app.config['TOKEN_SECRET'])
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not request.headers.get('Authorization'):
response = jsonify(message='Missing authorization header')
response.status_code = 401
return response
try:
payload = parse_token(request)
except DecodeError:
response = jsonify(message='Token is invalid')
response.status_code = 401
return response
except ExpiredSignature:
response = jsonify(message='Token has expired')
response.status_code = 401
return response
g.user_id = payload['sub']
return f(*args, **kwargs)
return decorated_function
# Routes
@app.route('/')
def index():
return send_file('static/index.html')
@app.route('/barviz')
def bar():
return send_file('static/bar.html')
@app.route('/api/me')
@login_required
def me():
user = User().get_user_by_id(graph, g.user_id)
return jsonify(User().to_json(user))
@app.route('/api/me/update', methods=['PUT'])
@login_required
def update_me():
user = User().get_user_by_id(graph, g.user_id)
user["email"] = request.json['email']
user["display_name"] = request.json['displayName']
user.push()
return jsonify(User().to_json(user))
@app.route('/api/v1/savesearch', methods=['POST'])
def save_search():
return jsonify({"message":"cool"})
@app.route('/auth/login', methods=['POST'])
def login():
user = User().get_user_by_email(graph, request.json['email'])
print(user['password'])
if not user or not User().check_password(user, request.json['password']):
response = jsonify(message='Wrong Email or Password')
response.status_code = 401
return response
token = create_token(user)
return jsonify(token=token)
@app.route('/auth/signup', methods=['POST'])
def signup():
user = User().save_user(graph, email=request.json['email'], name=request.json['displayName'],
password=request.json['password'])
token = create_token(user)
return jsonify(token=token)
@app.route('/auth/facebook', methods=['POST'])
def facebook():
access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token'
graph_api_url = 'https://graph.facebook.com/v2.3/me'
params = {
'client_id': request.json['clientId'],
'redirect_uri': request.json['redirectUri'],
'client_secret': app.config['FACEBOOK_SECRET'],
'code': request.json['code']
}
# Step 1. Exchange authorization code for access token.
r = requests.get(access_token_url, params=params)
access_token = dict(parse_qsl(r.text))
token = json.loads(r.text)
print token
facebook_token = Node("FBToken", access_token=token['access_token'], expires_in=token['expires_in'])
# Step 2. Retrieve information about the current user.
r = requests.get(graph_api_url, params=token)
profile = json.loads(r.text)
print profile
# Step 3. (optional) Link accounts.
if request.headers.get('Authorization'):
user = graph.find_one("User", "facebook", profile['id'])
if user:
response = jsonify(message='There is already a Facebook account that belongs to you')
response.status_code = 409
return response
payload = parse_token(request)
user = User().get_user_by_id(graph, payload['sub'])
if not user:
response = jsonify(message='User not found')
response.status_code = 400
return response
u = Node("User", id=str(uuid.uuid4()), email=profile['email'], facebook=profile['id'],
display_name=profile['name'])
graph.create(u)
token = create_token(u)
return jsonify(token=token)
# Step 4. Create a new account or return an existing one.
user = graph.find_one("User", "facebook", profile['id'])
if user:
token = create_token(user)
return jsonify(token=token)
u = Node("User", id=str(uuid.uuid4()), email=profile['email'], facebook=profile['id'], display_name=profile['name'])
user_facebook_token = Relationship(u, "HAS", facebook_token, since=datetime.now())
graph.create(user_facebook_token)
token = create_token(u)
return jsonify(token=token)
@app.route('/auth/google', methods=['POST'])
def google():
access_token_url = 'https://accounts.google.com/o/oauth2/token'
people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'
payload = dict(client_id=request.json['clientId'],
redirect_uri=request.json['redirectUri'],
client_secret=app.config['GOOGLE_SECRET'],
code=request.json['code'],
grant_type='authorization_code')
# Step 1. Exchange authorization code for access token.
r = requests.post(access_token_url, data=payload)
token = json.loads(r.text)
headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])}
# Step 2. Retrieve information about the current user.
r = requests.get(people_api_url, headers=headers)
profile = json.loads(r.text)
print profile
user = graph.find_one("User", "google", profile['sub'])
if user:
token = create_token(user)
return jsonify(token=token)
u = Node("User", id=str(uuid.uuid4()), email=profile['email'], google=profile['sub'], display_name=profile['name'])
graph.create(u)
token = create_token(u)
return jsonify(token=token)
@app.route('/auth/linkedin', methods=['POST'])
def linkedin():
access_token_url = 'https://www.linkedin.com/uas/oauth2/accessToken'
people_api_url = 'https://api.linkedin.com/v1/people/~:(id,first-name,last-name,email-address)'
payload = dict(client_id=request.json['clientId'],
redirect_uri=request.json['redirectUri'],
client_secret=app.config['LINKEDIN_SECRET'],
code=request.json['code'],
grant_type='authorization_code')
# Step 1. Exchange authorization code for access token.
r = requests.post(access_token_url, data=payload)
access_token = json.loads(r.text)
params = dict(oauth2_access_token=access_token['access_token'],
format='json')
# Step 2. Retrieve information about the current user.
r = requests.get(people_api_url, params=params)
profile = json.loads(r.text)
print profile
user = graph.find_one("User", 'linkedin', profile['id'])
if user:
token = create_token(user)
return jsonify(token=token)
u = Node("User", id=str(uuid.uuid4()), email=profile['emailAddress'], linkedin=profile['id'],
display_name=profile['lastName'] + " " + profile['firstName'])
graph.create(u)
token = create_token(u)
return jsonify(token=token)
@app.route('/api/v1/getpeople')
def searchUser():
users = api.search_users(request.args['name'])
return jsonify({'users': [
{'name': user.name,
'description': user.description,
'profile_image_url': user.profile_image_url,
'screen_name': user.screen_name
} for user in users
]})
@app.route('/api/v1/gettwitteruser')
def searchtwitterUser():
users = api.search_users(request.args['name'])
return jsonify({'users': [
user for user in users
]})
@app.route('/api/v1/interests/<screen_name>')
def parseInterests(screen_name):
# tweets = api.user_timeline(id=screen_name, )
tweets = tweepy.Cursor(api.user_timeline, id=screen_name).items(40)
text = ""
for tweet in tweets:
text += tweet.text + "\n " + "\n"
relationship = relationshipExtration.extractRelationship(text)
nodes, edges = relationshipExtration.parseMentions(relationship)
return jsonify({'nodes': nodes,
'edges': edges})
# @app.route('/api/v1/personality/<screen_name>')
# def getPersonaliy(screen_name):
# # tweets = api.user_timeline(id=screen_name, )
# tweets = tweepy.Cursor(api.user_timeline, id=screen_name).items(200)
# text = ""
# for tweet in tweets:
# text += tweet.text + "\n " + "\n"
# profile = personalityInsights.getProfile(text)
# nodes, edges = personalityInsights.flattenPortrait(profile["tree"])
#
# return jsonify({'nodes': nodes,'edges': edges})
@app.route('/api/v1/personality')
def getPersonaliy():
#nodes, edges = personalityInsights.flattenPortrait(profileee["tree"])
nodes,edges=personalityInsights.flattenPortrait2(profileee["tree"])
#return jsonify({'nodes': nodes,'edges': edges})
return jsonify({'nodes':nodes, 'edges':edges})
@app.route('/api/v1/bar')
def getBar():
data= personalityInsights.datadata(profileee["tree"])
return jsonify({'data': data})
@app.route('/api/v1/viz/<screen_name>')
def getViz(screen_name):
# tweets = api.user_timeline(id=screen_name, )
tweets = tweepy.Cursor(api.user_timeline, id=screen_name).items(200)
text = ""
for tweet in tweets:
text += tweet.text + "\n " + "\n"
profile = personalityInsights.getProfile(text)
viz = personalityInsights.requestVisualization(profile)
return viz
@app.route('/api/v1/fbpost')
def getfbposts():
graph = fb.GraphAPI(
access_token='<KEY>')
friends = graph.get_connections(id='me', connection_name='friends')
return jsonify({'fiends': friends})
@app.route('/<path:resource>')
def serveStaticResource(resource):
return send_from_directory('static/', resource)
@app.route("/test")
def test():
return "<strong>It's Alive!</strong>"
if __name__ == '__main__':
app.run(host='0.0.0.0',port=80)
|
StarcoderdataPython
|
3253383
|
''' Test the trajectory optimization procedures.
'''
import tensorflow as tf
class TestTrajOpt(tf.test.TestCase):
''' Test the trajectory optimization procedures.
'''
def test_naive_trajopt(self):
''' Test the naive_trajopt function.
'''
pass
if __name__ == '__main__':
tf.test.main()
|
StarcoderdataPython
|
1694212
|
<reponame>incuna/incuna-groups
from crispy_forms.bootstrap import FormActions, StrictButton
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout
from django import forms
from django.core.urlresolvers import reverse_lazy
from . import models
class BaseAddCommentForm(forms.ModelForm):
"""A base class for forms that create models inheriting from BaseComment."""
def __init__(self, *args, **kwargs):
super(BaseAddCommentForm, self).__init__(*args, **kwargs)
self.helper = self.build_helper()
def build_helper(self):
"""An overridable method that creates a crispy_forms layout helper."""
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-lg-2'
helper.field_class = 'col-lg-8'
helper.layout = Layout(
'body',
FormActions(
StrictButton('Post comment', type='submit'),
),
)
return helper
class Meta:
fields = ('body',)
class AddTextComment(BaseAddCommentForm):
"""A form that posts TextComments."""
class Meta(BaseAddCommentForm.Meta):
model = models.TextComment
class AddTextCommentWithAttachment(BaseAddCommentForm):
file = forms.FileField()
def build_helper(self):
helper = super(AddTextCommentWithAttachment, self).build_helper()
helper.layout = Layout(
'body',
'file',
FormActions(
StrictButton('Upload and post', type='submit'),
),
)
return helper
class Meta:
model = models.TextComment
fields = ('body', 'file',)
labels = {
'body': 'Comment',
'file': 'Attachment',
}
class DiscussionCreate(forms.Form):
comment = forms.CharField(widget=forms.Textarea)
name = forms.CharField(max_length=255)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-lg-2'
helper.field_class = 'col-lg-8'
helper.layout = Layout(
'name',
'comment',
FormActions(
StrictButton('Create Discussion', type='submit'),
),
)
class SubscribeForm(forms.Form):
subscribe = forms.BooleanField(widget=forms.HiddenInput(), required=False)
class Meta:
fields = ('subscribe',)
def __init__(self, user, instance, url_name, *args, **kwargs):
"""
Build the layout to reflect the action the form will take.
Accepts (and requires) a user and an instance being subscribed to
as keyword arguments.
"""
to_subscribe = not instance.is_subscribed(user)
initial_values = kwargs.setdefault('initial', {})
initial_values['subscribe'] = to_subscribe
super(SubscribeForm, self).__init__(*args, **kwargs)
button_text = 'Subscribe' if to_subscribe else 'Unsubscribe'
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.layout = Layout(
FormActions(
StrictButton(button_text, type='submit'),
),
)
self.helper.form_action = reverse_lazy(url_name, kwargs={'pk': instance.pk})
|
StarcoderdataPython
|
33401
|
<reponame>celord/mealprep
from django import forms
from django.forms import ModelForm
from .models import Plan
class DateInput(forms.DateInput):
input_type = "date"
class AddPlanForm(ModelForm):
class Meta:
model = Plan
# fields = fields = '__all__'
fields = [
"date",
"meal",
"mealtype",
"photo",
]
widgets = {
"date": DateInput(),
# 'comments': Textarea(attrs={'cols': 80, 'rows': 20}),
}
|
StarcoderdataPython
|
144621
|
import pytest
import torch
from torch import nn
from daceml.pytorch import DaceModule
from daceml.testing import torch_tensors_close
@pytest.mark.gpu
def test_dropout_fwd_training():
p = 0.5
module = nn.Dropout(p=p).cuda().train()
dace_module = DaceModule(module,
dummy_inputs=(torch.ones(10, 10).cuda(), ),
training=True)
# dropout will set some of these to zero
test_data = torch.randint(1, 10, (10, 10)).float().cuda()
print(test_data)
out = dace_module(torch.clone(test_data))
zeroed = out == 0
scale = 1 / (1 - p)
torch_tensors_close("output", test_data[~zeroed] * scale, out[~zeroed])
print(out)
@pytest.mark.gpu
@pytest.mark.parametrize("p", [0, 0.99, 0.6, 0.5])
def test_dropout_bwd(p):
module = nn.Dropout(p=p).cuda().train()
dace_module = DaceModule(module,
dummy_inputs=(torch.ones(10, 10).cuda(), ),
backward=True,
training=True)
test_data = torch.randint(1, 10, (10, 10)).float().cuda()
test_data.requires_grad = True
dy = torch.rand_like(test_data)
out = dace_module(torch.clone(test_data))
zeroed = out == 0
scale = 1 / (1 - p)
# check that fwd was correct
torch_tensors_close("output", test_data[~zeroed] * scale, out[~zeroed])
out.backward(dy)
# check that the gradient is correct:
zeros = torch.zeros_like(test_data.grad)
# check that zeroed values are zero in the grad
torch_tensors_close("grad_zeroed", zeros[zeroed], test_data.grad[zeroed])
# check that non-zeroed values are correct
torch_tensors_close("grad_zeroed", dy[~zeroed] * scale,
test_data.grad[~zeroed])
|
StarcoderdataPython
|
3245897
|
# MIT License
#
# Copyright (c) 2018 Image & Vision Computing Lab, Institute of Information Science, Academia Sinica
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ====================================================================================================
import numpy as np
import sys
np.set_printoptions(threshold=np.nan)
from read_bin import load_bin,load_txt
def weight_loader(config):
MERGER_NAME = config.net
task1,task2 = MERGER_NAME.split("_")
PATH = config.weight_dir
# Load Vgg Well-trained
conv_num = 2
all_num = 4
sound_w = []
sound_b = []
PATH_M1 = PATH + config.net + '/' + task1 + '/'
w1_name = load_txt(PATH_M1 + "/weight.txt")
b1_name = load_txt(PATH_M1 + "/bias.txt")
for i in range(conv_num):
sound_w.append(load_bin(PATH_M1 + w1_name[i]).transpose(2,3,1,0))
sound_b.append(load_bin(PATH_M1 + b1_name[i]))
for i in range(conv_num,all_num):
sound_w.append(load_bin(PATH_M1 + w1_name[i]).transpose(1,0))
sound_b.append(load_bin(PATH_M1 + b1_name[i]))
sound_w[2] = sound_w[2].reshape(64,8,8,1024).transpose(1,2,0,3).reshape(4096,1024) #caffe(ch*h*w,out) -> tensorflow(h*w*ch,out)
# Load ZF Well-trained
conv_num = 2
all_num = 4
fashion_w = []
fashion_b = []
PATH_M2 = PATH + config.net + '/' + task2 + '/'
w2_name = load_txt(PATH_M2 + "/weight.txt")
b2_name = load_txt(PATH_M2 + "/bias.txt")
for i in range(conv_num):
fashion_w.append(load_bin(PATH_M2 + w2_name[i]).transpose(2,3,1,0))
fashion_b.append(load_bin(PATH_M2 + b2_name[i]))
for i in range(conv_num,all_num):
fashion_w.append(load_bin(PATH_M2 + w2_name[i]).transpose(1,0))
fashion_b.append(load_bin(PATH_M2 + b2_name[i]))
fashion_w[2] = fashion_w[2].reshape(64,8,8,1024).transpose(1,2,0,3).reshape(4096,1024) #caffe(ch*h*w,out) -> tensorflow(h*w*ch,out)
# Load Merged Model
conv_num = 2
all_num = 3
M_codebook = []
M1_index = []
M2_index = []
PATH_MERGE = config.merger_dir
c_name = load_txt(PATH_MERGE + 'merged_codebook.txt')
i1_name = load_txt(PATH_MERGE + 'model1.txt')
i2_name = load_txt(PATH_MERGE + 'model2.txt')
for i in range(conv_num):
M_codebook.append(load_bin(PATH_MERGE + c_name[i]))
M1_index.append(np.array(load_bin(PATH_MERGE + i1_name[i],data_type="uint8")-1,dtype=np.int32))
M1_index[i] = M1_index[i].transpose(3,1,2,0).reshape([M1_index[i].shape[3],M1_index[i].shape[1]*M1_index[i].shape[2]*M1_index[i].shape[0]])
M2_index.append(np.array(load_bin(PATH_MERGE + i2_name[i],data_type="uint8")-1,dtype=np.int32))
M2_index[i] = M2_index[i].transpose(3,1,2,0).reshape([M2_index[i].shape[3],M2_index[i].shape[1]*M2_index[i].shape[2]*M2_index[i].shape[0]])
for i in range(conv_num,all_num):
M_codebook.append(load_bin(PATH_MERGE + c_name[i]))
M1_index.append(np.array(load_bin(PATH_MERGE + i1_name[i],data_type="uint8")-1,dtype=np.int32))
M1_index[i] = M1_index[i].transpose(1,0)
M2_index.append(np.array(load_bin(PATH_MERGE + i2_name[i],data_type="uint8")-1,dtype=np.int32))
M2_index[i] = M2_index[i].transpose(1,0)
M1_output_layer = load_bin(PATH_MERGE + 'M1_outputlayer.bin').transpose(1,0)
M2_output_layer = load_bin(PATH_MERGE + 'M2_outputlayer.bin').transpose(1,0)
print('----- Codebook Parameter Setting -----')
print('Codebook subspace: [%3d, %3d, %3d]'%(M_codebook[0].shape[2],M_codebook[1].shape[2],M_codebook[2].shape[2]))
print('Codeworkd numbers : [%3d, %3d, %3d]'%(M_codebook[0].shape[1],M_codebook[1].shape[1],M_codebook[2].shape[1]))
print('Max Iteration : %d'%config.max_step)
sys.stdout.write('Learning Rate : ')
print(config.lr_rate)
print('Batch Size : %d'%config.batch_size)
return M_codebook,sound_w,sound_b,M1_index,M1_output_layer,fashion_w,fashion_b,M2_index,M2_output_layer
|
StarcoderdataPython
|
4812275
|
<gh_stars>0
from ast import arg
from wifipumpkin3.core.common.terminal import ExtensionUI
from wifipumpkin3.core.utility.printer import (
setcolor,
display_messages,
display_tabulate,
)
# This file is part of the wifipumpkin3 Open Source Project.
# wifipumpkin3 is licensed under the Apache 2.0.
# Copyright 2020 P0cL4bs Team - <NAME> (mh4x0f)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Dhcpconf(ExtensionUI):
""" show/choise dhcp server configuration """
Name = "dhcpconf"
def __init__(self, parse_args=None, root=None):
self.parse_args = parse_args
self.root = root
self.register_command("do_dhcpconf", self.do_dhcpconf)
self.register_command("help_dhcpconf", self.help_dhcpconf)
self.ip_class = ["Class-A-Address","Class-B-Address", "Class-C-Address"]
super(Dhcpconf, self).__init__(parse_args=self.parse_args, root=self.root)
def help_dhcpconf(self):
self.show_help_command("help_dhcpconf_command")
def do_dhcpconf(self, args):
"""ap: show/choise dhcp server configuration """
status_ap = self.root.conf.get("accesspoint", "status_ap", format=bool)
if args:
try:
id_dhcp_option = int(args.split()[0])
selected_id_option = self.ip_class[id_dhcp_option]
for key in self.root.conf.get_all_childname(selected_id_option):
self.root.conf.set("dhcp", key,self.root.conf.get(selected_id_option, key))
if status_ap:
print(display_messages("OBS: this settings require restart the AP",error=True))
return
except Exception:
return print(
display_messages(
"the parameter id {} was not found.".format(
setcolor(args, color="orange")
),
error=True,
)
)
headers_table, output_table = (
["Id","Class", "IP address range", "Netmask" , "Router"],
[],
)
print(display_messages("DHCP Server Option:", info=True, sublime=True))
for ip_class in self.ip_class:
output_table.append(
[
self.ip_class.index(ip_class),
ip_class.split("-")[1],
self.root.conf.get(ip_class,"range"),
self.root.conf.get(ip_class, "netmask"),
self.root.conf.get(ip_class, "router"),
])
display_tabulate(headers_table, output_table, tablefmt="presto", newline=False)
print(display_messages("DHCP Server Settings:", info=True, sublime=True))
for config in self.root.conf.get_all_childname("dhcp"):
print(
" {}={}".format(setcolor(config, color="purple"), self.root.conf.get("dhcp", config))
)
print("\n")
|
StarcoderdataPython
|
1684945
|
import socket
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
import json
import logging
from ery4z_toolbox.utils import get_random_string, AESCipher
class Client:
"""General purpose client usable with the provided server class.
It support RSA and AES encryption depending on the server parameter.
"""
def __init__(
self, ip="127.0.0.1", key=None, port=1233, logger=None, auto_encrypt=False
):
"""Creator of the class
Args:
ip (str, optional): Server ip address. Defaults to "127.0.0.1".
key (rsa export, optional): RSA key in order to intialize the AES, if not provided they are generated Automaticaly. Defaults to None.
port (int, optional): Server port. Defaults to 1233.
logger (logger, optional): Optionnal Logger object overiding created ones. Defaults to None.
auto_encrypt (bool, optional): Automaticaly generate RSA and AES encrypted channel. Defaults to False.
"""
self.__host = ip
self.__port = port
self.last_packet = ""
if auto_encrypt:
RSAkey = RSA.generate(1024)
k = RSAkey.exportKey("PEM")
p = RSAkey.publickey().exportKey("PEM")
key = [k, p]
if key is not None:
if type(key) == list:
self.__my_private = key[0]
self.__my_public = key[1]
else:
self.__my_private = key
self.__my_public = None
self.__decryptor = PKCS1_OAEP.new(RSA.import_key(self.__private))
else:
self.__my_private = None
self.__my_public = None
self.__decryptor = None
self._is_encrypted = False
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__encryptor = None
self._logger = logger
if logger is None:
self.setup_default_logger()
def __force_auto_encrypt(self):
RSAkey = RSA.generate(1024)
k = RSAkey.exportKey("PEM")
p = RSAkey.publickey().exportKey("PEM")
key = [k, p]
if key is not None:
if type(key) == list:
self.__my_private = key[0]
self.__my_public = key[1]
else:
self.__my_private = key
self.__my_public = None
self.__decryptor = PKCS1_OAEP.new(RSA.import_key(self.__my_private))
else:
self.__my_private = None
self.__my_public = None
self.__decryptor = None
def setup_default_logger(self):
logger = logging.getLogger("client")
if logger.hasHandlers():
logger.handlers.clear()
logger.setLevel(logging.INFO)
fh = logging.FileHandler("client.log")
fh.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
self._logger = logger
def connect(self):
"""Connect to the server
Returns:
int: Error code if so
"""
try:
self.socket.connect((self.__host, self.__port))
except socket.error as e:
self._logger.error(str(e))
return 0
self._logger.info(f"Connected to {self.__host}:{self.__port}")
protocol_message = self.socket.recv(1024)[:-1].decode("utf-8")
self._logger.info(f"Received protocol message : {protocol_message}")
protocol_dict = json.loads(protocol_message)
try:
if protocol_dict["encryption"] == 1:
self._is_encrypted = True
self.__public_key = protocol_dict["public_key"]
except KeyError:
pass
if self._is_encrypted and (self.__public_key is not None):
self.__encryptor = PKCS1_OAEP.new(RSA.import_key(self.__public_key))
self._is_encrypted = True
else:
self._is_encrypted = False
if self._is_encrypted:
if self.__my_private == None:
self.__force_auto_encrypt()
protocol_message = json.dumps(
{"encryption": 1, "public_key": self.__my_public.decode("utf-8")}
)
self.socket.send(str.encode(protocol_message) + b"\0")
else:
protocol_message = json.dumps({"encryption": 0, "public_key": ""})
self.socket.send(str.encode(protocol_message) + b"\0")
# Establishing AES channel
if self._is_encrypted:
AES_protocol_message = self.socket.recv(1024)[:-1]
data = self.__decryptor.decrypt(AES_protocol_message).decode("utf-8")
data = json.loads(data)
AES_key = data["AES_key"]
self.AES_manager = AESCipher(KEY=AES_key)
self._logger.debug(f"Received message : 'AES_Key_Hidden'")
def send(self, message):
"""Send the provided message to the server
Args:
message (str): Message to be sent to the server.
"""
self._logger.info(f"Sending message : {message[:10]}...{message[-10:]}")
if self._is_encrypted:
n = 24
message += "\1"
chunks = [message[i : i + n] for i in range(0, len(message), n)]
chunk_index = 0
while chunk_index < len(chunks):
chunk = chunks[chunk_index]
self._logger.debug(f"Client send: {chunk}")
encrypted = self.AES_manager.encrypt(chunk)
self.socket.sendall(encrypted + b"\0")
if self.__receive_ack():
chunk_index += 1
else:
self.socket.sendall(bytes(message, "utf-8") + b"\0")
while not self.__receive_ack():
self.socket.sendall(bytes(message, "utf-8") + b"\0")
def __send_ack(self):
self._logger.debug(f"Client sending ack")
message = json.dumps({"method": "ack"})
if self._is_encrypted:
encrypted = self.AES_manager.encrypt(message + "\1")
self.socket.sendall(encrypted + b"\0")
else:
self.socket.sendall(bytes(message + "\1", "utf-8") + b"\0")
def __receive_ack(self):
self._logger.debug(f"Client waiting for ack")
encoded_data = b""
r = False
while not encoded_data.endswith(b"\0"):
self.socket.settimeout(2.0)
try:
recv_data = self.socket.recv(1024)
except Exception:
r = True
self.socket.settimeout(None)
if r:
return False
encoded_data = encoded_data + recv_data
encoded_data = encoded_data[:-1]
self._logger.debug(f"Client received raw: {encoded_data}")
if self._is_encrypted:
trame = self.AES_manager.decrypt(encoded_data).decode("utf-8")[:-1]
else:
trame = encoded_data.decode("utf-8")[:-1]
try:
data = json.loads(trame)
except Exception as e:
return False
else:
if data["method"] == "ack":
return True
else:
return False
def receive(self):
"""Receive a message from the server
Returns:
string: Message from the server (usualy json string ready to be loaded)
"""
stop = False
if self._is_encrypted:
data = ""
while not data.endswith("\1"):
encoded_data = b""
while not encoded_data.endswith(b"\0"):
recv_data = self.socket.recv(1024)
encoded_data = encoded_data + recv_data
if not recv_data:
stop = True
break
if stop:
return 0
encoded_data = encoded_data[:-1]
self._logger.debug(f"Client received raw: {encoded_data}")
n_d = self.AES_manager.decrypt(encoded_data).decode("utf-8")
self._logger.debug(f"Client received: {n_d}")
if n_d != self.last_packet:
data += n_d
self.last_packet = n_d
self.__send_ack()
data = data[:-1]
self._logger.info(f"Received message : {data[:10]}...{data[-10:]}")
else:
got_one = False
while not got_one:
data = b""
while not data.endswith(b"\0"):
recv_data = self.socket.recv(2048)
data = data + recv_data
if not recv_data:
stop = True
break
if stop:
break
data = data[:-1]
data = data.decode("utf-8")
if data != self.last_packet:
self.last_packet = data
got_one = True
self.__send_ack()
self._logger.info(f"Client received message : {data[:10]}...{data[-10:]}")
return data
def disconnect(self):
"""Disconnect from the server."""
self._logger.info(f"Connection with {self.__host}:{self.__port} closed")
self.socket.close()
def __del__(self):
self.disconnect()
if __name__ == "__main__":
myClient = Client()
myClient.connect()
while True:
message = input("To send: ")
data = {}
myClient.send(message)
response = myClient.receive()
print(response)
if message == "close":
break
|
StarcoderdataPython
|
3283866
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Plain text reporters:
:text: the default one grouping messages by module
:colorized: an ANSI colorized text reporter
"""
from __future__ import print_function
import warnings
from logilab.common.ureports import TextWriter
from logilab.common.textutils import colorize_ansi
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
import six
TITLE_UNDERLINES = ['', '=', '-', '.']
class TextReporter(BaseReporter):
"""reports messages and layouts in plain text"""
__implements__ = IReporter
name = 'text'
extension = 'txt'
line_format = '{C}:{line:3d},{column:2d}: {msg} ({symbol})'
def __init__(self, output=None):
BaseReporter.__init__(self, output)
self._modules = set()
self._template = None
def on_set_current_module(self, module, filepath):
self._template = six.text_type(self.linter.config.msg_template or self.line_format)
def write_message(self, msg):
"""Convenience method to write a formated message with class default template"""
self.writeln(msg.format(self._template))
def handle_message(self, msg):
"""manage message of different type and in the context of path"""
if msg.module not in self._modules:
if msg.module:
self.writeln('************* Module %s' % msg.module)
self._modules.add(msg.module)
else:
self.writeln('************* ')
self.write_message(msg)
def _display(self, layout):
"""launch layouts display"""
print(file=self.out)
TextWriter().format(layout, self.out)
class ParseableTextReporter(TextReporter):
"""a reporter very similar to TextReporter, but display messages in a form
recognized by most text editors :
<filename>:<linenum>:<msg>
"""
name = 'parseable'
line_format = '{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'
def __init__(self, output=None):
warnings.warn('%s output format is deprecated. This is equivalent '
'to --msg-template=%s' % (self.name, self.line_format))
TextReporter.__init__(self, output)
class VSTextReporter(ParseableTextReporter):
"""Visual studio text reporter"""
name = 'msvs'
line_format = '{path}({line}): [{msg_id}({symbol}){obj}] {msg}'
class ColorizedTextReporter(TextReporter):
"""Simple TextReporter that colorizes text output"""
name = 'colorized'
COLOR_MAPPING = {
"I" : ("green", None),
'C' : (None, "bold"),
'R' : ("magenta", "bold, italic"),
'W' : ("blue", None),
'E' : ("red", "bold"),
'F' : ("red", "bold, underline"),
'S' : ("yellow", "inverse"), # S stands for module Separator
}
def __init__(self, output=None, color_mapping=None):
TextReporter.__init__(self, output)
self.color_mapping = color_mapping or \
dict(ColorizedTextReporter.COLOR_MAPPING)
def _get_decoration(self, msg_id):
"""Returns the tuple color, style associated with msg_id as defined
in self.color_mapping
"""
try:
return self.color_mapping[msg_id[0]]
except KeyError:
return None, None
def handle_message(self, msg):
"""manage message of different types, and colorize output
using ansi escape codes
"""
if msg.module not in self._modules:
color, style = self._get_decoration('S')
if msg.module:
modsep = colorize_ansi('************* Module %s' % msg.module,
color, style)
else:
modsep = colorize_ansi('************* %s' % msg.module,
color, style)
self.writeln(modsep)
self._modules.add(msg.module)
color, style = self._get_decoration(msg.C)
msg = msg._replace(
**{attr: colorize_ansi(getattr(msg, attr), color, style)
for attr in ('msg', 'symbol', 'category', 'C')})
self.write_message(msg)
def register(linter):
"""Register the reporter classes with the linter."""
linter.register_reporter(TextReporter)
linter.register_reporter(ParseableTextReporter)
linter.register_reporter(VSTextReporter)
linter.register_reporter(ColorizedTextReporter)
|
StarcoderdataPython
|
143065
|
# -*- coding: utf-8 -*-
import os.path as op
import psutil
def _abspath(path):
return op.abspath(op.expanduser(path))
def gather_notebooks():
""" Gather processes of IPython Notebook
Return
------
notes : list of dict
each dict has following keys: "pid", "cwd", and "port"
Raises
------
RuntimeError
- No IPython Notebook servers are found
"""
notes = []
for p in psutil.process_iter():
name = p.name().lower()
if not (name.startswith("ipython") or name.startswith("python")):
continue
if "notebook" not in p.cmdline():
continue
for net in p.connections(kind="inet4"):
if net.status != "LISTEN":
continue
_, port = net.laddr
break
notes.append({
"pid": p.pid,
"cwd": p.cwd(),
"port": port,
})
if not notes:
raise RuntimeError("No IPython Notebook servers are found")
return notes
def resolve_url(ipynb_path, notebooks=None):
"""
Return valid URL for .ipynb
Parameters
----------
ipynb_path : str
path of existing .ipynb file
Raises
------
RuntimeError
- Existing notebook servers do not start
on the parent directory of .ipynb file.
"""
ipynb_path = _abspath(ipynb_path)
if not op.exists(ipynb_path):
raise RuntimeError("Notebook {} is not found.".format(ipynb_path))
if not notebooks:
notebooks = gather_notebooks()
for note in notebooks:
cwd = note["cwd"]
if cwd.endswith("/"):
cwd = cwd[:-1]
if not ipynb_path.startswith(cwd):
continue
note["postfix"] = ipynb_path[len(cwd) + 1:] # remove '/'
return "http://localhost:{port}/notebooks/{postfix}".format(**note)
raise RuntimeError("No valid Notebook found. "
"Please start notebook server first.")
|
StarcoderdataPython
|
3371443
|
import httpx
from fastapi import APIRouter, HTTPException
from fastapi.requests import Request
from fastapi.responses import JSONResponse, Response
from httpx import HTTPError
from app.log import logger
from settings import conf
router = APIRouter()
timeout = httpx.Timeout(30)
client = httpx.AsyncClient(timeout=timeout)
@router.post("/{data_product:path}", tags=["data_product"])
async def route_identities(data_product: str, request: Request) -> Response:
url = f"{conf.PRODUCT_GATEWAY_URL}/{data_product}"
if request.url.query:
url += f"?{request.url.query}"
json_payload = await request.json()
logger.debug("Fetching Data Product", url=url)
try:
resp = await client.post(url, json=json_payload)
except HTTPError:
logger.exception("Failed to fetch Data Product from the Product Gateway")
raise HTTPException(status_code=502)
return JSONResponse(resp.json(), status_code=resp.status_code)
|
StarcoderdataPython
|
1648691
|
"""Pack the modules contained in the controller directory."""
from typing import Tuple, Dict, List, Any, Union
ResponseTuple = Tuple[Union[Dict[str, List[Dict[str, Any]]],
str,
Dict[str, Any]], int]
|
StarcoderdataPython
|
107937
|
<filename>dbcut/sqlalchemy_utils.py
# -*- coding: utf-8 -*-
# This module comes from the sqlalchemy-utils package
# These functions have been slightly patched to support sqlalchemy 1.4+
import os
from copy import copy
import sqlalchemy as sa
from sqlalchemy.engine.interfaces import Dialect
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import OperationalError, ProgrammingError
from sqlalchemy.orm.exc import UnmappedInstanceError
from sqlalchemy.orm.session import object_session
from sqlalchemy.pool import NullPool
def get_bind(obj):
"""
Return the bind for given SQLAlchemy Engine / Connection / declarative
model object.
:param obj: SQLAlchemy Engine / Connection / declarative model object
::
from sqlalchemy_utils import get_bind
get_bind(session) # Connection object
get_bind(user)
"""
if hasattr(obj, "bind"):
conn = obj.bind
else:
try:
conn = object_session(obj).bind
except UnmappedInstanceError:
conn = obj
if not hasattr(conn, "execute"):
raise TypeError(
"This method accepts only Session, Engine, Connection and "
"declarative model objects."
)
return conn
def quote(mixed, ident):
"""
Conditionally quote an identifier.
::
from sqlalchemy_utils import quote
engine = create_engine('sqlite:///:memory:')
quote(engine, 'order')
# '"order"'
quote(engine, 'some_other_identifier')
# 'some_other_identifier'
:param mixed: SQLAlchemy Session / Connection / Engine / Dialect object.
:param ident: identifier to conditionally quote
"""
if isinstance(mixed, Dialect):
dialect = mixed
else:
dialect = get_bind(mixed).dialect
return dialect.preparer(dialect).quote(ident)
def database_exists(url):
"""Check if a database exists.
:param url: A SQLAlchemy engine URL.
Performs backend-specific testing to quickly determine if a database
exists on the server. ::
database_exists('postgresql://postgres@localhost/name') #=> False
create_database('postgresql://postgres@localhost/name')
database_exists('postgresql://postgres@localhost/name') #=> True
Supports checking against a constructed URL as well. ::
engine = create_engine('postgresql://postgres@localhost/name')
database_exists(engine.url) #=> False
create_database(engine.url)
database_exists(engine.url) #=> True
"""
url = copy(make_url(url))
database = url.database
dialect_name = url.get_dialect().name
def _sqlite_file_exists(database):
if not os.path.isfile(database) or os.path.getsize(database) < 100:
return False
with open(database, "rb") as f:
header = f.read(100)
return header[:16] == b"SQLite format 3\x00"
if dialect_name == "postgresql":
text = "SELECT 1 FROM pg_database WHERE datname='%s'" % database
for db in (database, "postgres", "template1", "template0", None):
url = _set_url_database(url, database=db)
engine = sa.create_engine(url, poolclass=NullPool)
try:
return bool(_get_scalar_result(engine, text))
except (ProgrammingError, OperationalError):
pass
return False
elif dialect_name == "mysql":
url = _set_url_database(url, database=None)
engine = sa.create_engine(url, poolclass=NullPool)
text = (
"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA "
"WHERE SCHEMA_NAME = '%s'" % database
)
return bool(_get_scalar_result(engine, text))
elif dialect_name == "sqlite":
url = _set_url_database(url, database=None)
engine = sa.create_engine(url, poolclass=NullPool)
if database:
return database == ":memory:" or _sqlite_file_exists(database)
else:
# The default SQLAlchemy database is in memory,
# and :memory is not required, thus we should support that use-case
return True
else:
text = "SELECT 1"
try:
engine = sa.create_engine(url, poolclass=NullPool)
return bool(_get_scalar_result(engine, text))
except (ProgrammingError, OperationalError):
return False
def create_database(url, encoding="utf8", template=None):
"""Issue the appropriate CREATE DATABASE statement.
:param url: A SQLAlchemy engine URL.
:param encoding: The encoding to create the database as.
:param template:
The name of the template from which to create the new database. At the
moment only supported by PostgreSQL driver.
To create a database, you can pass a simple URL that would have
been passed to ``create_engine``. ::
create_database('postgresql://postgres@localhost/name')
You may also pass the url from an existing engine. ::
create_database(engine.url)
Has full support for mysql, postgres, and sqlite. In theory,
other database engines should be supported.
"""
url = copy(make_url(url))
database = url.database
dialect_name = url.get_dialect().name
dialect_driver = url.get_dialect().driver
if dialect_name == "postgres":
url = _set_url_database(url, database="postgres")
elif dialect_name == "mssql":
url = _set_url_database(url, database="master")
elif not dialect_name == "sqlite":
url = _set_url_database(url, database=None)
if dialect_name == "mssql" and dialect_driver in {"pymssql", "pyodbc"}:
engine = sa.create_engine(url, connect_args={"autocommit": True})
elif dialect_name == "postgresql" and dialect_driver in {
"asyncpg",
"pg8000",
"psycopg2",
"psycopg2cffi",
}:
engine = sa.create_engine(url, isolation_level="AUTOCOMMIT")
else:
engine = sa.create_engine(url)
if dialect_name == "postgresql":
if not template:
template = "template1"
text = "CREATE DATABASE {0} ENCODING '{1}' TEMPLATE {2}".format(
quote(engine, database), encoding, quote(engine, template)
)
with engine.connect() as connection:
connection.execute(text)
elif dialect_name == "mysql":
text = "CREATE DATABASE {0} CHARACTER SET = '{1}'".format(
quote(engine, database), encoding
)
with engine.connect() as connection:
connection.execute(text)
elif dialect_name == "sqlite" and database != ":memory:":
if database:
with engine.connect() as connection:
connection.execute("CREATE TABLE DB(id int);")
connection.execute("DROP TABLE DB;")
else:
text = "CREATE DATABASE {0}".format(quote(engine, database))
with engine.connect() as connection:
connection.execute(text)
engine.dispose()
def drop_database(url):
"""Issue the appropriate DROP DATABASE statement.
:param url: A SQLAlchemy engine URL.
Works similar to the :ref:`create_database` method in that both url text
and a constructed url are accepted. ::
drop_database('postgresql://postgres@localhost/name')
drop_database(engine.url)
"""
url = copy(make_url(url))
database = url.database
dialect_name = url.get_dialect().name
dialect_driver = url.get_dialect().driver
if dialect_name == "postgres":
url = _set_url_database(url, database="postgres")
elif dialect_name == "mssql":
url = _set_url_database(url, database="master")
elif not dialect_name == "sqlite":
url = _set_url_database(url, database=None)
if dialect_name == "mssql" and dialect_driver in {"pymssql", "pyodbc"}:
engine = sa.create_engine(url, connect_args={"autocommit": True})
elif dialect_name == "postgresql" and dialect_driver in {
"asyncpg",
"pg8000",
"psycopg2",
"psycopg2cffi",
}:
engine = sa.create_engine(url, isolation_level="AUTOCOMMIT")
else:
engine = sa.create_engine(url)
if dialect_name == "sqlite" and database != ":memory:":
if database:
os.remove(database)
elif dialect_name == "postgresql":
with engine.connect() as connection:
# Disconnect all users from the database we are dropping.
version = connection.dialect.server_version_info
pid_column = "pid" if (version >= (9, 2)) else "procpid"
text = """
SELECT pg_terminate_backend(pg_stat_activity.%(pid_column)s)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = '%(database)s'
AND %(pid_column)s <> pg_backend_pid();
""" % {
"pid_column": pid_column,
"database": database,
}
connection.execute(text)
# Drop the database.
text = "DROP DATABASE {0}".format(quote(connection, database))
connection.execute(text)
else:
text = "DROP DATABASE {0}".format(quote(engine, database))
with engine.connect() as connection:
connection.execute(text)
engine.dispose()
def _set_url_database(url: sa.engine.url.URL, database):
if hasattr(sa.engine, "URL"):
ret = sa.engine.URL.create(
drivername=url.drivername,
username=url.username,
password=<PASSWORD>.password,
host=url.host,
port=url.port,
database=database,
query=url.query,
)
else: # SQLAlchemy <1.4
url.database = database
ret = url
assert ret.database == database, ret
return ret
def _get_scalar_result(engine, sql):
with engine.connect() as conn:
return conn.scalar(sql)
|
StarcoderdataPython
|
3335873
|
from flask import render_template, request, current_app, g, redirect, url_for
from maintain_frontend.decorators import requires_permission
from maintain_frontend.constants.permissions import Permissions
from maintain_frontend.view_modify_lon.validation.cancel_lon_validator import CancelLonValidator
from maintain_frontend.dependencies.storage_api.storage_api_service import StorageAPIService
from maintain_frontend.add_lon.services import process_lon_documents
from maintain_frontend.dependencies.audit_api.audit_api import AuditAPIService
from maintain_frontend.dependencies.maintain_api.maintain_api_service import MaintainApiService
from maintain_frontend.dependencies.search_api.local_land_charge_service import LocalLandChargeService
from maintain_frontend.exceptions import UploadDocumentError
from maintain_frontend.services.charge_services import get_lon_by_charge_id
from datetime import date
import json
def register_routes(bp):
bp.add_url_rule('/<charge_id>', view_func=cancel_get, methods=['GET'])
bp.add_url_rule('/<charge_id>', view_func=cancel_post, methods=['POST'])
bp.add_url_rule('/<charge_id>/confirmation', view_func=confirm, methods=['GET', 'POST'])
bp.add_url_rule('/<charge_id>/charge-cancelled', view_func=charge_cancelled, methods=['GET'])
# Cancel LON start Page
@requires_permission([Permissions.cancel_lon])
def cancel_get(charge_id):
# Retrieve Charge
local_land_charge_service = LocalLandChargeService(current_app.config)
display_id, charge_item = get_lon_by_charge_id(charge_id, local_land_charge_service)
return render_template('cancel_lon.html', charge_id=display_id)
# Cancel LON start Page
@requires_permission([Permissions.cancel_lon])
def cancel_post(charge_id):
local_land_charge_service = LocalLandChargeService(current_app.config)
display_id, charge_item = get_lon_by_charge_id(charge_id, local_land_charge_service)
cancel_options = request.form.getlist('cancel-options')
form_b = request.files.get('form-b-cancel-lon-file-input')
court_order = request.files.get('court-order-cancel-lon-file-input')
current_app.logger.info("Running validation")
validation_error_builder = CancelLonValidator.validate(request.form, request.files)
if validation_error_builder.errors:
current_app.logger.warning("Validation errors occurred")
return render_template('cancel_lon.html',
charge_id=display_id,
validation_errors=validation_error_builder.errors,
validation_summary_heading=validation_error_builder.summary_heading_text,
request_body=request.form), 400
files_to_upload = {}
if "Form B" in cancel_options and form_b:
files_to_upload['form-b'] = ('form_b.pdf', form_b, form_b.content_type)
if "Court Order" in cancel_options and court_order:
files_to_upload['court-order'] = ("court_order.pdf", court_order, court_order.content_type)
if files_to_upload:
upload_files(files_to_upload, charge_item, charge_id)
charge_item.end_date = date.today()
g.session.add_lon_charge_state = charge_item
g.session.commit()
return redirect(url_for('cancel_lon.confirm', charge_id=charge_id))
def upload_files(files, charge_item, charge_id):
storage_api_service = StorageAPIService(current_app.config)
subdirectory = charge_item.documents_filed['form-a'][0]['subdirectory']
upload_response = storage_api_service.save_files(files, process_lon_documents.bucket(),
[subdirectory], True)
if upload_response.status_code == 400:
raise UploadDocumentError("Virus scan failed. Upload a new document.",
url_for('cancel_lon.cancel_get', charge_id=charge_id))
new_documents_filed = upload_response.json()
merged_docs = charge_item.documents_filed.copy()
merged_docs.update(new_documents_filed)
charge_item.documents_filed = merged_docs
# Cancel LON start Page
@requires_permission([Permissions.cancel_lon])
def confirm(charge_id):
local_land_charge_service = LocalLandChargeService(current_app.config)
display_id, charge_item = get_lon_by_charge_id(charge_id, local_land_charge_service)
if request.method == 'GET':
return render_template('cancel_lon_confirm.html',
charge_id=display_id,
charge_item=charge_item,
geometry=json.dumps(charge_item.geometry))
if request.method == 'POST':
current_app.logger.info("Cancelling Charge - {}".format(display_id))
AuditAPIService.audit_event("Cancelling charge", supporting_info={'id': display_id})
MaintainApiService.update_charge(g.session.add_lon_charge_state)
# This is required because if the render_template is called from this post method then the flow won't be able
# to return to the confirmation page if the user goes to the feedback form from the confirmation page
return redirect(url_for('cancel_lon.charge_cancelled', charge_id=display_id))
# Cancel LON confirmed Page
@requires_permission([Permissions.cancel_lon])
def charge_cancelled(charge_id):
return render_template('charge_cancelled.html', charge_id=charge_id)
|
StarcoderdataPython
|
1642931
|
<filename>proj_issues/mcbv/edit.py<gh_stars>1-10
from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.utils.encoding import force_text
from django.db import models
from django.contrib import messages
from django.utils.functional import curry
from django.forms.formsets import formset_factory, BaseFormSet, all_valid
from django.forms.models import modelformset_factory
from base import TemplateResponseMixin, ContextMixin, View
from detail import SingleObjectMixin, SingleObjectTemplateResponseMixin, BaseDetailView, DetailView
from list import MultipleObjectMixin, ListView
class FormMixin(ContextMixin):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
form_kwarg_user = False # provide request user to form
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial.copy()
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class=None):
"""
Returns an instance of the form to be used in this view.
"""
form_class = form_class or self.get_form_class()
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = {'initial': self.get_initial()}
if self.form_kwarg_user:
kwargs['user'] = self.request.user
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_success_url(self):
"""
Returns the supplied success URL.
"""
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
"""
If the form or modelform are invalid, re-render the context data with the
data-filled form and errors.
"""
return self.get_context_data(form=form)
class FormSetMixin(FormMixin):
"""A mixin that provides a way to show and handle a formset in a request."""
formset_form_class = None
formset_initial = {}
formset_class = BaseFormSet
extra = 0
can_delete = False
# ignore_get_args = ("page", ) # TODO this may be better moved to the form class?
formset_kwarg_user = False # provide request user to form
success_url = None
def get_formset_initial(self):
return self.formset_initial.copy()
def get_formset_class(self):
return self.formset_class
def get_formset_form_class(self):
return self.formset_form_class
def get_formset(self, form_class=None):
form_class = form_class or self.formset_form_class
kwargs = dict()
Formset = formset_factory(form_class, extra=self.extra, can_delete=self.can_delete)
if self.form_kwarg_user:
kwargs["user"] = self.user
Formset.form = staticmethod(curry(form_class, **kwargs))
return Formset(**self.get_formset_kwargs())
def get_formset_kwargs(self):
kwargs = dict(initial=self.get_formset_initial())
if self.formset_kwarg_user:
kwargs["user"] = self.request.user
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_success_url(self):
if self.success_url:
# Forcing possible reverse_lazy evaluation
url = force_text(self.success_url)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def formset_valid(self, formset):
for form in formset:
if form.has_changed():
if form.cleaned_data.get("DELETE"):
self.process_delete(form)
else:
self.process_form(form)
return HttpResponseRedirect(self.get_success_url())
def process_form(self, form):
form.save()
def process_delete(self, form):
"""Process checked 'delete' box."""
pass
def formset_invalid(self, formset):
return self.get_context_data(formset=formset)
class ModelFormSetMixin(FormSetMixin):
formset_model = None
formset_queryset = None
def get_formset_queryset(self):
if self.formset_queryset is not None:
queryset = self.formset_queryset
if hasattr(queryset, '_clone'):
queryset = queryset._clone()
elif self.formset_model is not None:
queryset = self.formset_model._default_manager.all()
else:
raise ImproperlyConfigured("'%s' must define 'formset_queryset' or 'formset_model'"
% self.__class__.__name__)
return queryset
def get_formset(self, form_class=None):
form_class = form_class or self.formset_form_class
kwargs = dict()
Formset = modelformset_factory(self.formset_model, extra=self.extra, can_delete=self.can_delete)
if self.form_kwarg_user:
kwargs["user"] = self.user
Formset.form = staticmethod(curry(form_class, **kwargs))
return Formset(**self.get_formset_kwargs())
def get_formset_kwargs(self):
kwargs = {
'initial' : self.get_formset_initial(),
'queryset' : self.get_formset_queryset(),
}
if self.formset_kwarg_user:
kwargs["user"] = self.request.user
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def process_delete(self, form):
"""Process checked 'delete' box."""
form.instance.delete()
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
form_model = None
modelform_class = None
modelform_queryset = None
modelform_context_object_name = None
modelform_pk_url_kwarg = 'mfpk'
modelform_valid_msg = None
def get_modelform_class(self):
"""Returns the form class to use in this view."""
if self.modelform_class:
return self.modelform_class
else:
if self.form_model is not None:
# If a model has been explicitly provided, use it
model = self.form_model
elif hasattr(self, 'modelform_object') and self.modelform_object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.modelform_object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_modelform_queryset().model
return model_forms.modelform_factory(model)
def get_modelform(self, form_class=None):
form_class = form_class or self.get_modelform_class()
return form_class(**self.get_modelform_kwargs())
def get_modelform_kwargs(self):
"""Returns the keyword arguments for instantiating the form."""
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.modelform_object})
return kwargs
def get_success_url(self):
"""Returns the supplied URL."""
if self.success_url:
url = self.success_url % self.modelform_object.__dict__
else:
try:
url = self.modelform_object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def modelform_valid(self, modelform):
self.modelform_object = modelform.save()
if self.modelform_valid_msg:
messages.info(self.request, self.modelform_valid_msg)
return HttpResponseRedirect(self.get_success_url())
def modelform_invalid(self, modelform):
return self.get_context_data(modelform=modelform)
def get_modelform_context_data(self, **kwargs):
"""
If an object has been supplied, inject it into the context with the
supplied modelform_context_object_name name.
"""
context = {}
obj = self.modelform_object
if obj:
context['modelform_object'] = obj
if self.modelform_context_object_name:
context[self.modelform_context_object_name] = obj
elif isinstance(obj, models.Model):
context[obj._meta.object_name.lower()] = obj
context.update(kwargs)
return context
def get_modelform_object(self, queryset=None):
return self.get_object( queryset or self.get_modelform_queryset(), self.modelform_pk_url_kwarg )
def get_modelform_queryset(self):
if self.modelform_queryset:
return self.modelform_queryset._clone()
else:
return self.get_queryset(self.form_model)
class ProcessFormView(View):
"""
A mixin that renders a form on GET and processes it on POST.
"""
def form_get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
return self.get_context_data( form=self.get_form() )
def formset_get(self, request, *args, **kwargs):
return self.get_context_data( formset=self.get_formset() )
def modelform_get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates a blank version of the form.
"""
return self.get_modelform_context_data( modelform=self.get_modelform() )
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form = formset = modelform = None
if isinstance(self, DetailView):
self.detail_object = self.get_detail_object()
if isinstance(self, ListView):
self.object_list = self.get_list_queryset()
if isinstance(self, FormView):
form = self.get_form()
if isinstance(self, (FormSetView, ModelFormSetView)):
formset = self.get_formset()
if isinstance(self, UpdateView):
self.update_post(request, *args, **kwargs)
modelform = self.get_modelform()
if isinstance(self, CreateView):
self.create_post(request, *args, **kwargs)
modelform = self.get_modelform()
if (not form or form and form.is_valid()) and \
(not modelform or modelform and modelform.is_valid()) and \
(not formset or formset and formset.is_valid()):
if isinstance(self, FormView) : resp = self.form_valid(form)
if isinstance(self, (FormSetView, ModelFormSetView)) : resp = self.formset_valid(formset)
if isinstance(self, (UpdateView, CreateView)) : resp = self.modelform_valid(modelform)
return resp
else:
context = self.get_context_data()
update = context.update
if isinstance(self, FormView) : update(self.form_invalid(form))
if isinstance(self, (FormSetView, ModelFormSetView)) : update(self.formset_invalid(formset))
if isinstance(self, (UpdateView, CreateView)) : update(self.modelform_invalid(modelform))
return self.render_to_response(context)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
""" A base view for displaying a form """
class FormView(TemplateResponseMixin, BaseFormView):
""" A view for displaying a form, and rendering a template response. """
class BaseFormSetView(FormSetMixin, ProcessFormView):
"""A base view for displaying a formset."""
class FormSetView(TemplateResponseMixin, BaseFormSetView):
"""A view for displaying a formset, and rendering a template response."""
class BaseModelFormSetView(ModelFormSetMixin, ProcessFormView):
"""A base view for displaying a modelformset."""
class ModelFormSetView(TemplateResponseMixin, BaseModelFormSetView):
"""A view for displaying a modelformset, and rendering a template response."""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def create_get(self, request, *args, **kwargs):
self.modelform_object = None
return self.modelform_get(request, *args, **kwargs)
def create_post(self, request, *args, **kwargs):
self.modelform_object = None
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating a new object instance,
with a response rendered by template.
"""
template_name_suffix = '_modelform'
def get_template_names(self):
return self._get_template_names(self.modelform_object, self.form_model)
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def update_get(self, request, *args, **kwargs):
self.modelform_object = self.get_modelform_object()
return self.modelform_get(request, *args, **kwargs)
def update_post(self, request, *args, **kwargs):
self.modelform_object = self.get_modelform_object()
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template.
"""
template_name_suffix = '_modelform'
def get_template_names(self):
return self._get_template_names(self.modelform_object, self.form_model)
class CreateUpdateView(CreateView):
"""Update object if modelform_pk_url_kwarg is in kwargs, otherwise create it."""
modelform_create_class = None
def get_modelform_class(self):
if self.modelform_pk_url_kwarg in self.kwargs:
return self.modelform_class
else:
return self.modelform_create_class
def create_get(self, request, *args, **kwargs):
if self.modelform_pk_url_kwarg in self.kwargs:
self.modelform_object = self.get_modelform_object()
return self.modelform_get(request, *args, **kwargs)
else:
return super(CreateUpdateView, self).create_get(request, *args, **kwargs)
def create_post(self, request, *args, **kwargs):
if self.modelform_pk_url_kwarg in self.kwargs:
self.modelform_object = self.get_modelform_object()
else:
super(CreateUpdateView, self).create_post(request, *args, **kwargs)
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL.
"""
self.modelform_object = self.get_modelform_object()
self.modelform_object.delete()
return HttpResponseRedirect(self.get_success_url())
# Add support for browsers which only accept GET and POST for now.
def post(self, *args, **kwargs):
return self.delete(*args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
|
StarcoderdataPython
|
1637738
|
from pkg_resources import get_distribution
__version__ = get_distribution('behave_http').version
|
StarcoderdataPython
|
1769795
|
_auther_ = 'Harry'
_date_ = '2/1/2018 9:56 PM'
|
StarcoderdataPython
|
3233416
|
<reponame>Tapawingo/FreeTakServer<filename>FreeTAKServer/model/Enumerations/connectionTypes.py
#######################################################
#
# connectionTypes.py
# Python implementation of the Enumeration connectionTypes
# Generated by Enterprise Architect
# Created on: 07-Dec-2021 7:23:24 PM
# Original author: natha
#
#######################################################
from enum import Enum
class ConnectionTypes(Enum):
TCP = "TCP"
SSL = "SSL"
UDP = "UDP"
PROTO = "PROTO"
|
StarcoderdataPython
|
3323416
|
<reponame>MuriloChaves/prova-de-conceito
# -*- coding: utf-8 -*-
# Importa bibliotecas necessárias
from PIL import Image
import os
def main():
'''
Função principal que, realiza todo o escopo de código da conversão.
'''
# Abre a imagem e realiza a conversão
imagem = Image.open('./data/doguinho.png').convert('LA')
# Imprime a imagem
imagem.show()
# Criando repositório de saída
if not os.path.exists('./saida'):
os.mkdir('./saida')
# Salva a imagem
imagem.save('./saida/doguinho_cinza.png')
pass
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
23885
|
<gh_stars>1-10
#!/usr/bin/env python
import sys
import matplotlib
import numpy as np
import random
import itertools
import socket
import sklearn.metrics
from scipy.optimize import minimize
from scipy.optimize import Bounds
from sklearn import preprocessing
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils.random import sample_without_replacement
np.set_printoptions(precision=4)
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(linewidth=300)
np.set_printoptions(suppress=True)
class opt_gaussian():
def __init__(self, X, Y, Y_kernel='linear', σₓ=None, Γ=None): # X=data, Y=label, σ_type='ℍ', or 'maxKseparation'
ń = X.shape[0]
ð = X.shape[1]
self.Y_kernel = Y_kernel
if ń > 300: # Down sample first
samples = sample_without_replacement(n_population=ń, n_samples=300)
X = X[samples,:]
Y = Y[samples]
ń = X.shape[0]
Γ = None
if Y_kernel == 'linear':
σᵧ = 1
if Γ is None:
self.Ⅱᵀ = np.ones((ń,ń))
ńᒾ = ń*ń
Yₒ = OneHotEncoder(categories='auto', sparse=False).fit_transform(np.reshape(Y,(len(Y),1)))
self.Kᵧ = Kᵧ = Yₒ.dot(Yₒ.T)
ṉ = np.sum(Kᵧ)
HKᵧ = self.Kᵧ - np.mean(self.Kᵧ, axis=0) # equivalent to Γ = Ⲏ.dot(Kᵧ).dot(Ⲏ)
self.Γ = HKᵧH = (HKᵧ.T - np.mean(HKᵧ.T, axis=0)).T
else:
self.Γ = Γ
elif Y_kernel == 'Gaussian':
Ðᵧ = sklearn.metrics.pairwise.pairwise_distances(Y)
σᵧ = np.median(Ðᵧ)
self.Ðᵧᒾ = (-Ðᵧ*Ðᵧ)/2
Ðₓ = sklearn.metrics.pairwise.pairwise_distances(X)
if σₓ is None:
σₓ = np.median(Ðₓ)
self.Ðₓᒾ = (-Ðₓ*Ðₓ)/2
self.σ = [σₓ, σᵧ]
def minimize_H(self):
self.result = minimize(self.ℍ, self.σ, method='L-BFGS-B', options={'gtol': 1e-5, 'disp': False}, bounds=Bounds(0.05, 100000))
if self.result.x[0] < 0.01:
self.result.x[0] = 0.01
def ℍ(self, σ):
[σₓ, σᵧ] = σ
Kₓ = np.exp(self.Ðₓᒾ/(σₓ*σₓ))
if self.Y_kernel == 'linear':
Γ = self.Γ
elif self.Y_kernel == 'Gaussian':
Kᵧ = np.exp(self.Ðᵧᒾ/(σᵧ*σᵧ))
HKᵧ = Kᵧ - np.mean(Kᵧ, axis=0) # equivalent to Γ = Ⲏ.dot(Kᵧ).dot(Ⲏ)
Γ = HKᵧH = (HKᵧ.T - np.mean(HKᵧ.T, axis=0)).T
loss = -np.sum(Kₓ*Γ)
return loss
def get_opt_σ(X,Y, Y_kernel='Gaussian'):
optimizer = opt_gaussian(X,Y, Y_kernel=Y_kernel)
optimizer.minimize_H()
return optimizer.result
def get_opt_σ_via_random(X,Y, Y_kernel='Gaussian'):
optimizer = opt_gaussian(X,Y, Y_kernel=Y_kernel)
opt = 0
opt_σ = 0
for m in range(1000):
σ = (7*np.random.rand(2)).tolist()
new_opt = -optimizer.ℍ(σ)
if opt < new_opt:
opt = new_opt
opt_σ = σ
print('Random Result ')
print('\tbest_σ : ', opt_σ)
print('\tmax_HSIC : ' , opt)
if __name__ == "__main__":
data_name = 'wine'
X = np.loadtxt('../dataset/' + data_name + '.csv', delimiter=',', dtype=np.float64)
Y = np.loadtxt('../dataset/' + data_name + '_label.csv', delimiter=',', dtype=np.int32)
X = preprocessing.scale(X)
optimized_results = get_opt_σ(X,Y, Y_kernel='linear')
best_σ = optimized_results.x
max_HSIC = -optimized_results.fun
print('Optimized Result ')
print('\tbest_σ [σₓ, σᵧ]: ', best_σ)
print('\tmax_HSIC : ' , max_HSIC)
optimized_results = get_opt_σ_via_random(X,Y, Y_kernel='linear')
|
StarcoderdataPython
|
1741899
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from dateutil.relativedelta import relativedelta
from psycopg2 import IntegrityError
from odoo import fields
from odoo.exceptions import AccessError, ValidationError, UserError
from odoo.tools import mute_logger, test_reports
from odoo.addons.hr_holidays.tests.common import TestHrHolidaysBase
class TestHolidaysFlow(TestHrHolidaysBase):
@mute_logger('odoo.addons.base.models.ir_model', 'odoo.models')
def test_00_leave_request_flow_unlimited(self):
""" Testing leave request flow: unlimited type of leave request """
Requests = self.env['hr.leave']
HolidaysStatus = self.env['hr.leave.type']
# HrManager creates some holiday statuses
HolidayStatusManagerGroup = HolidaysStatus.with_user(self.user_hrmanager_id)
HolidayStatusManagerGroup.create({
'name': 'WithMeetingType',
'allocation_type': 'no',
})
self.holidays_status_hr = HolidayStatusManagerGroup.create({
'name': 'NotLimitedHR',
'allocation_type': 'no',
'validation_type': 'hr',
'validity_start': False,
})
self.holidays_status_manager = HolidayStatusManagerGroup.create({
'name': 'NotLimitedManager',
'allocation_type': 'no',
'validation_type': 'manager',
'validity_start': False,
})
HolidaysEmployeeGroup = Requests.with_user(self.user_employee_id)
# Employee creates a leave request in a no-limit category hr manager only
hol1_employee_group = HolidaysEmployeeGroup.create({
'name': 'Hol11',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_hr.id,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days': 1,
})
hol1_user_group = hol1_employee_group.with_user(self.user_hruser_id)
hol1_manager_group = hol1_employee_group.with_user(self.user_hrmanager_id)
self.assertEqual(hol1_user_group.state, 'confirm', 'hr_holidays: newly created leave request should be in confirm state')
# HrUser validates the employee leave request -> should work
hol1_user_group.action_approve()
self.assertEqual(hol1_manager_group.state, 'validate', 'hr_holidays: validated leave request should be in validate state')
# Employee creates a leave request in a no-limit category department manager only
hol12_employee_group = HolidaysEmployeeGroup.create({
'name': 'Hol12',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_manager.id,
'date_from': (datetime.today() + relativedelta(days=12)),
'date_to': (datetime.today() + relativedelta(days=13)),
'number_of_days': 1,
})
hol12_user_group = hol12_employee_group.with_user(self.user_hruser_id)
hol12_manager_group = hol12_employee_group.with_user(self.user_hrmanager_id)
self.assertEqual(hol12_user_group.state, 'confirm', 'hr_holidays: newly created leave request should be in confirm state')
# HrManager validate the employee leave request
hol12_manager_group.action_approve()
self.assertEqual(hol1_user_group.state, 'validate', 'hr_holidays: validates leave request should be in validate state')
@mute_logger('odoo.addons.base.models.ir_model', 'odoo.models')
def test_01_leave_request_flow_limited(self):
""" Testing leave request flow: limited type of leave request """
Requests = self.env['hr.leave']
Allocations = self.env['hr.leave.allocation']
HolidaysStatus = self.env['hr.leave.type']
def _check_holidays_status(holiday_status, ml, lt, rl, vrl):
self.assertEqual(holiday_status.max_leaves, ml,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.leaves_taken, lt,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.remaining_leaves, rl,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.virtual_remaining_leaves, vrl,
'hr_holidays: wrong type days computation')
# HrManager creates some holiday statuses
HolidayStatusManagerGroup = HolidaysStatus.with_user(self.user_hrmanager_id)
HolidayStatusManagerGroup.create({
'name': 'WithMeetingType',
'allocation_type': 'no',
'validity_start': False,
})
self.holidays_status_limited = HolidayStatusManagerGroup.create({
'name': 'Limited',
'allocation_type': 'fixed',
'validation_type': 'both',
'validity_start': False,
})
HolidaysEmployeeGroup = Requests.with_user(self.user_employee_id)
# HrUser allocates some leaves to the employee
aloc1_user_group = Allocations.with_user(self.user_hruser_id).create({
'name': 'Days for limited category',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_limited.id,
'number_of_days': 2,
})
# HrUser validates the first step
aloc1_user_group.action_approve()
# HrManager validates the second step
aloc1_user_group.with_user(self.user_hrmanager_id).action_validate()
# Checks Employee has effectively some days left
hol_status_2_employee_group = self.holidays_status_limited.with_user(self.user_employee_id)
_check_holidays_status(hol_status_2_employee_group, 2.0, 0.0, 2.0, 2.0)
# Employee creates a leave request in the limited category, now that he has some days left
hol2 = HolidaysEmployeeGroup.create({
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_limited.id,
'date_from': (datetime.today() + relativedelta(days=2)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=3)),
'number_of_days': 1,
})
hol2_user_group = hol2.with_user(self.user_hruser_id)
# Check left days: - 1 virtual remaining day
hol_status_2_employee_group.invalidate_cache()
_check_holidays_status(hol_status_2_employee_group, 2.0, 0.0, 2.0, 1.0)
# HrManager validates the first step
hol2_user_group.with_user(self.user_hrmanager_id).action_approve()
self.assertEqual(hol2.state, 'validate1',
'hr_holidays: first validation should lead to validate1 state')
# HrManager validates the second step
hol2_user_group.with_user(self.user_hrmanager_id).action_validate()
self.assertEqual(hol2.state, 'validate',
'hr_holidays: second validation should lead to validate state')
# Check left days: - 1 day taken
_check_holidays_status(hol_status_2_employee_group, 2.0, 1.0, 1.0, 1.0)
# HrManager finds an error: he refuses the leave request
hol2.with_user(self.user_hrmanager_id).action_refuse()
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: refuse should lead to refuse state')
# Check left days: 2 days left again
hol_status_2_employee_group.invalidate_cache(['max_leaves'])
_check_holidays_status(hol_status_2_employee_group, 2.0, 0.0, 2.0, 2.0)
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: hr_user should not be able to reset a refused leave request')
# HrManager resets the request
hol2_manager_group = hol2.with_user(self.user_hrmanager_id)
hol2_manager_group.action_draft()
self.assertEqual(hol2.state, 'draft',
'hr_holidays: resetting should lead to draft state')
employee_id = self.ref('hr.employee_admin')
# cl can be of maximum 20 days for employee_admin
hol3_status = self.env.ref('hr_holidays.holiday_status_cl').with_context(employee_id=employee_id)
# I assign the dates in the holiday request for 1 day
hol3 = Requests.create({
'name': 'Sick Time Off',
'holiday_status_id': hol3_status.id,
'date_from': datetime.today().strftime('%Y-%m-10 10:00:00'),
'date_to': datetime.today().strftime('%Y-%m-11 19:00:00'),
'employee_id': employee_id,
'number_of_days': 1,
})
# I find a small mistake on my leave request to I click on "Refuse" button to correct a mistake.
hol3.action_refuse()
self.assertEqual(hol3.state, 'refuse', 'hr_holidays: refuse should lead to refuse state')
# I again set to draft and then confirm.
hol3.action_draft()
self.assertEqual(hol3.state, 'draft', 'hr_holidays: resetting should lead to draft state')
hol3.action_confirm()
self.assertEqual(hol3.state, 'confirm', 'hr_holidays: confirming should lead to confirm state')
# I validate the holiday request by clicking on "To Approve" button.
hol3.action_approve()
hol3.action_validate()
self.assertEqual(hol3.state, 'validate', 'hr_holidays: validation should lead to validate state')
# Check left days for casual leave: 19 days left
_check_holidays_status(hol3_status, 20.0, 1.0, 19.0, 19.0)
def test_10_leave_summary_reports(self):
# Print the HR Holidays(Summary Employee) Report through the wizard
ctx = {
'model': 'hr.employee',
'active_ids': [self.ref('hr.employee_admin'), self.ref('hr.employee_qdp'), self.ref('hr.employee_al')]
}
data_dict = {
'date_from': datetime.today().strftime('%Y-%m-01'),
'emp': [(6, 0, [self.ref('hr.employee_admin'), self.ref('hr.employee_qdp'), self.ref('hr.employee_al')])],
'holiday_type': 'Approved'
}
self.env.company.external_report_layout_id = self.env.ref('web.external_layout_standard').id
test_reports.try_report_action(self.env.cr, self.env.uid, 'action_hr_holidays_summary_employee', wiz_data=data_dict, context=ctx, our_module='hr_holidays')
def test_sql_constraint_dates(self):
# The goal is mainly to verify that a human friendly
# error message is triggered if the date_from is after
# date_to. Coming from a bug due to the new ORM 13.0
leave_vals = {
'name': 'Sick Time Off',
'holiday_status_id': self.env.ref('hr_holidays.holiday_status_cl').id,
'date_from': datetime.today().strftime('%Y-%m-11 19:00:00'),
'date_to': datetime.today().strftime('%Y-%m-10 10:00:00'),
'employee_id': self.ref('hr.employee_admin'),
'number_of_days': 1,
}
with mute_logger('odoo.sql_db'):
with self.assertRaises(IntegrityError):
with self.cr.savepoint():
self.env['hr.leave'].create(leave_vals)
leave_vals = {
'name': 'Sick Time Off',
'holiday_status_id': self.env.ref('hr_holidays.holiday_status_cl').id,
'date_from': datetime.today().strftime('%Y-%m-10 10:00:00'),
'date_to': datetime.today().strftime('%Y-%m-11 19:00:00'),
'employee_id': self.ref('hr.employee_admin'),
'number_of_days': 1,
}
leave = self.env['hr.leave'].create(leave_vals)
with mute_logger('odoo.sql_db'):
with self.assertRaises(IntegrityError): # No ValidationError
with self.cr.savepoint():
leave.write({
'date_from': datetime.today().strftime('%Y-%m-11 19:00:00'),
'date_to': datetime.today().strftime('%Y-%m-10 10:00:00'),
})
|
StarcoderdataPython
|
3320746
|
<gh_stars>0
from abc import ABC, abstractmethod
from django.conf import settings
from django.core.cache import cache
class BaseExcelClass(ABC):
"""
Base excel class will be use as abstract class
for making interface between the excel and csv
file(flat file) to update into server data base
as django model. Accept file only are excel and
CSV along.
.. notes::
Try not use pandas function directly as possible
"""
def __init__(self, user_id):
pass
@abstractmethod
def read_excel(self, name, **kargs):
"""Read the given file using name from in the
server.
:param name: [file name of the data contains]
:type name: [str]
"""
pass
@abstractmethod
def read_csv(self, name, **kargs):
"""Read the given file using name from in the
server.
:param name: [file name of the data contains]
:type name: [str]
"""
pass
@abstractmethod
def data(self):
"""This method return the data of the read
part.
"""
pass
@abstractmethod
def mapping_fields(self, options):
"""
Convert Field is a function that help mapping the
field(columns of excel)
with :model: `packages.ItemsList`.
@params options it is a dict() object
"""
pass
@abstractmethod
def paytm_process(self):
pass
@abstractmethod
def insert_db(self, user_id):
"""
Inserting the value of excel into db of :model: `packages.ItemsList`.
"""
temp = "%s%d" % (settings.BM_CURRENT_USER_UPLOAD_NAME, user_id)
if cache.get(temp):
cache.set(
settings.BM_CURRENT_USER_UPLOAD_NAME + user_id,
user_id,
settings.BM_CURRENT_USER_UPLOAD_CACHE_TIMEOUT,
)
@abstractmethod
def get_info(self):
pass
@abstractmethod
def api_name(self):
pass
class BaseExcelInterFaceException(Exception):
pass
|
StarcoderdataPython
|
133084
|
from py_pdf_term._common.data import Term
from py_pdf_term.tokenizer import Token
from py_pdf_term.tokenizer.langs import JapaneseTokenClassifier
from ..base import BaseJapaneseCandidateTermFilter
class JapaneseProperNounFilter(BaseJapaneseCandidateTermFilter):
def __init__(self) -> None:
self._classifier = JapaneseTokenClassifier()
def is_candidate(self, scoped_term: Term) -> bool:
return not self._is_region_or_person(scoped_term)
def _is_region_or_person(self, scoped_term: Term) -> bool:
def is_region_or_person_token(token: Token) -> bool:
return (
(
token.pos == "名詞"
and token.category == "固有名詞"
and token.subcategory in {"人名", "地名"}
)
or self._classifier.is_modifying_particle(token)
or self._classifier.is_connector_symbol(token)
)
return all(map(is_region_or_person_token, scoped_term.tokens))
|
StarcoderdataPython
|
1798426
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from tinymce.models import HTMLField
EnhancedTextField = HTMLField if 'tinymce' in settings.INSTALLED_APPS else models.TextField
|
StarcoderdataPython
|
65804
|
import pytest
from tests.common.helpers.assertions import pytest_require
from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\
fanout_graph_facts
from tests.common.ixia.ixia_fixtures import ixia_api_serv_ip, ixia_api_serv_port,\
ixia_api_serv_user, ixia_api_serv_passwd, ixia_api, ixia_testbed
from tests.common.ixia.qos_fixtures import prio_dscp_map, all_prio_list, lossless_prio_list,\
lossy_prio_list
from files.helper import run_pfc_test
@pytest.mark.topology("tgen")
def test_pfc_pause_single_lossless_prio(ixia_api,
ixia_testbed,
conn_graph_facts,
fanout_graph_facts,
duthosts,
rand_one_dut_hostname,
enum_dut_portname_oper_up,
enum_dut_lossless_prio,
all_prio_list,
prio_dscp_map):
"""
Test if PFC can pause a single lossless priority
Args:
ixia_api (pytest fixture): IXIA session
ixia_testbed (pytest fixture): L2/L3 config of a T0 testbed
conn_graph_facts (pytest fixture): connection graph
fanout_graph_facts (pytest fixture): fanout graph
duthosts (pytest fixture): list of DUTs
rand_one_dut_hostname (str): hostname of DUT
enum_dut_portname_oper_up (str): name of port to test, e.g., 's6100-1|Ethernet0'
enum_dut_lossless_prio (str): name of lossless priority to test, e.g., 's6100-1|3'
all_prio_list (pytest fixture): list of all the priorities
prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority).
Returns:
None
"""
dut_hostname, dut_port = enum_dut_portname_oper_up.split('|')
dut_hostname2, lossless_prio = enum_dut_lossless_prio.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname == dut_hostname2,
"Priority and port are not mapped to the expected DUT")
duthost = duthosts[rand_one_dut_hostname]
lossless_prio = int(lossless_prio)
pause_prio_list = [lossless_prio]
test_prio_list = [lossless_prio]
bg_prio_list = [p for p in all_prio_list]
bg_prio_list.remove(lossless_prio)
run_pfc_test(api=ixia_api,
testbed_config=ixia_testbed,
conn_data=conn_graph_facts,
fanout_data=fanout_graph_facts,
duthost=duthost,
dut_port=dut_port,
global_pause=False,
pause_prio_list=pause_prio_list,
test_prio_list=test_prio_list,
bg_prio_list=bg_prio_list,
prio_dscp_map=prio_dscp_map,
test_traffic_pause=True)
def test_pfc_pause_multi_lossless_prio(ixia_api,
ixia_testbed,
conn_graph_facts,
fanout_graph_facts,
duthosts,
rand_one_dut_hostname,
enum_dut_portname_oper_up,
lossless_prio_list,
lossy_prio_list,
prio_dscp_map):
"""
Test if PFC can pause multiple lossless priorities
Args:
ixia_api (pytest fixture): IXIA session
ixia_testbed (pytest fixture): L2/L3 config of a T0 testbed
conn_graph_facts (pytest fixture): connection graph
fanout_graph_facts (pytest fixture): fanout graph
duthosts (pytest fixture): list of DUTs
rand_one_dut_hostname (str): hostname of DUT
enum_dut_portname_oper_up (str): name of port to test, e.g., 's6100-1|Ethernet0'
lossless_prio_list (pytest fixture): list of all the lossless priorities
lossy_prio_list (pytest fixture): list of all the lossy priorities
prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority).
Returns:
None
"""
dut_hostname, dut_port = enum_dut_portname_oper_up.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname,
"Port is not mapped to the expected DUT")
duthost = duthosts[rand_one_dut_hostname]
pause_prio_list = lossless_prio_list
test_prio_list = lossless_prio_list
bg_prio_list = lossy_prio_list
run_pfc_test(api=ixia_api,
testbed_config=ixia_testbed,
conn_data=conn_graph_facts,
fanout_data=fanout_graph_facts,
duthost=duthost,
dut_port=dut_port,
global_pause=False,
pause_prio_list=pause_prio_list,
test_prio_list=test_prio_list,
bg_prio_list=bg_prio_list,
prio_dscp_map=prio_dscp_map,
test_traffic_pause=True)
|
StarcoderdataPython
|
3335672
|
<reponame>UbiOps/command-line-interface
import ubiops as api
from datetime import datetime, timedelta
from ubiops_cli.utils import init_client, get_current_project
from ubiops_cli.src.helpers.formatting import print_item, format_logs_reference, format_logs_oneline, parse_datetime, \
print_list, format_json, format_datetime
from ubiops_cli.src.helpers.options import *
@click.group("logs", short_help="View your logs")
def commands():
"""View your logs."""
pass
@commands.command("list", short_help="List logs")
@DEPLOYMENT_NAME_OPTIONAL
@DEPLOYMENT_VERSION_OPTIONAL
@PIPELINE_NAME_OPTIONAL
@PIPELINE_VERSION_OPTIONAL
@PIPELINE_OBJECT_NAME
@BUILD_ID_OPTIONAL
@REQUEST_ID_OPTIONAL
@PIPELINE_REQUEST_ID_OPTIONAL
@SYSTEM
@START_DATE
@START_LOG
@DATE_RANGE
@LIMIT
@LOGS_FORMATS
def logs_list(deployment_name, deployment_version_name, pipeline_name, pipeline_version_name, pipeline_object_name,
request_id, pipeline_request_id, build_id, system, start_date, start_log, date_range, limit, format_):
"""Get the logs of your project.
Use the command options as filters.
"""
project_name = get_current_project(error=True)
client = init_client()
filters = {}
if deployment_name:
filters['deployment_name'] = deployment_name
if deployment_version_name:
filters['deployment_version'] = deployment_version_name
if pipeline_name:
filters['pipeline_name'] = pipeline_name
if pipeline_version_name:
filters['pipeline_version'] = pipeline_version_name
if pipeline_object_name:
filters['pipeline_object_name'] = pipeline_object_name
if build_id:
filters['build_id'] = build_id
if request_id:
filters['deployment_request_id'] = request_id
if pipeline_request_id:
filters['pipeline_request_id'] = pipeline_request_id
if system is not None:
filters['system'] = system
if start_date is not None:
try:
start_date = format_datetime(parse_datetime(start_date), fmt='%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise Exception("Failed to parse start_date. Please use iso-format, "
"for example, '2020-01-01T00:00:00.000000Z'")
elif start_date is None and start_log is None:
start_date = str(datetime.now())
log_filters = api.LogsCreate(filters=filters, date=start_date, id=start_log, date_range=date_range, limit=limit)
logs = client.projects_log_list(project_name=project_name, data=log_filters)
client.api_client.close()
if format_ == 'json':
click.echo(format_json(logs))
return
if len(logs) > 0:
if format_ == 'oneline':
lines = format_logs_oneline(logs)
elif format_ == 'reference':
lines = format_logs_reference(logs)
elif format_ == 'extended':
lines = format_logs_reference(
logs,
extended=['deployment_request_id', 'pipeline_request_id', 'deployment_name', 'deployment_version',
'pipeline_name', 'pipeline_version', 'pipeline_object_name', 'build_id']
)
else:
lines = format_logs_reference(logs)
click.echo_via_pager(lines)
elif start_date:
starting_point = parse_datetime(start_date).isoformat()
if date_range > 0:
end_point = (parse_datetime(start_date) + timedelta(seconds=date_range)).isoformat()
else:
end_point = (parse_datetime(start_date) - timedelta(seconds=abs(date_range))).isoformat()
click.echo("No logs found between <%s> and <%s>" % (starting_point, end_point))
@commands.command("get", short_help="Get details of a log")
@LOG_ID
@GET_FORMATS
def logs_get(log_id, format_):
"""
\b
Get more details of a log:
- date
- deployment_name
- deployment_version_name
- pipeline_name
- pipeline_version_name
- pipeline_object_name
- request_id
- pipeline_request_id
- system (boolean)
"""
project_name = get_current_project(error=True)
client = init_client()
log_filters = api.LogsCreate(filters={}, id=log_id, limit=1)
log = client.projects_log_list(project_name=project_name, data=log_filters)[0]
client.api_client.close()
print_item(
log,
row_attrs=['id', 'date', 'log'],
required_front=['id', 'date', 'system'],
optional=['deployment_request_id', 'pipeline_request_id', 'deployment_name', 'deployment_version',
'pipeline_name', 'pipeline_version', 'pipeline_object_name', 'build_id'],
required_end=['log'],
rename={'deployment_version': 'deployment_version_name', 'pipeline_version': 'pipeline_version_name'},
fmt=format_
)
@click.group(["audit_events", "audit"], short_help="View your audit events")
def audit_events():
"""View your audit events."""
pass
@audit_events.command("list", short_help="List audit events")
@DEPLOYMENT_NAME_OPTIONAL
@PIPELINE_NAME_OPTIONAL
@AUDIT_LIMIT
@OFFSET
@AUDIT_ACTION
@LIST_FORMATS
def audit_list(deployment_name, pipeline_name, format_, **kwargs):
"""List the audit events.
Use the command options as filters.
"""
project_name = get_current_project(error=True)
client = init_client()
if deployment_name and pipeline_name:
raise Exception("Please, filter either on deployment or pipeline name, not both")
elif deployment_name:
events = client.deployment_audit_events_list(project_name=project_name, deployment_name=deployment_name,
**kwargs)
elif pipeline_name:
events = client.pipeline_audit_events_list(project_name=project_name, pipeline_name=pipeline_name, **kwargs)
else:
events = client.project_audit_events_list(project_name=project_name, **kwargs)
client.api_client.close()
print_list(events, ['date', 'action', 'user', 'event'], fmt=format_, pager=len(events) > 10)
|
StarcoderdataPython
|
1672847
|
<gh_stars>0
import pandas as pd
import argparse
import sys
parser = argparse.ArgumentParser(description='Reformats a met-office weather data file. Input data has one row per year and one column per month. Output data has a date column and a value column.')
parser.add_argument('data_file',metavar='DATA_FILE', help='Data file containing met office weather stats.')
def get_month_data(month_data):
"""Takes a two column DataFrame and returns a one column DataFrame where the index is a pandas period"""
# Each pair of columns contains data for a specific month. What month is this?
month = month_data.columns[0]
# Given a year, return a Period object representing this month in that year
def get_period(year):
return pd.Period('{0} {1}'.format(month,year))
# Change the index of the dataframe to be the monthly period
month_data.index = map(get_period, full_data.iloc[:,2])
# Rename the columns
month_data.columns = ['value', 'year']
month_data.index.name = 'month'
# Remove the year column, we don't need it anymore.
return month_data.drop('year', axis=1)
def unstack_data(full_data):
"""
Takes a dataframe with monthly columns and yearly rows. Returns a single column
dataframe where each row is a specific month of a specific year.
"""
# Loop over columns of the DataFrame in groups of 2
# and feed them to get_month_data
monthly_data = [ get_month_data(full_data.iloc[:, i:i+2]) for i in range(1,25,2)]
# Add all the data for the individual months together
unstacked_data = pd.concat(monthly_data)
return unstacked_data.sort()
if __name__ == '__main__':
args = parser.parse_args()
full_data = pd.read_csv(args.data_file, delim_whitespace=True,
skiprows=7)
unstacked_data = unstack_data(full_data)
# Write the new dataframe to stdout
try:
unstacked_data.to_csv(sys.stdout)
# Don't fall over if we pipe the output to head
except IOError:
pass
|
StarcoderdataPython
|
3253754
|
import b
class C(object):
def foo(self):
b = B()
|
StarcoderdataPython
|
1789082
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 14:23:16 2019
@author: Sikander
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from dateutil.relativedelta import relativedelta
from datetime import datetime
import pickle
from scipy.spatial.distance import jensenshannon
from scipy.stats import entropy
import networkx as nx
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from sklearn.decomposition import PCA
import pandas as pd
import matplotlib as mpl
from matplotlib import cm
#PRELIMINARY
created = {2005: 5916, 2006: 7934, 2007: 11402, 2008: 11176,
2009: 14611, 2010: 18027, 2011: 14872, 2012: 7382, 2013: 5519,
2014: 10264, 2015: 10311, 2016: 8254}
dob = {1970: 57, 1971: 50, 1972: 42, 1973: 46, 1974: 47, 1975: 64, 1976: 49,
1977: 52, 1978: 54, 1979: 61, 1980: 64, 1981: 61, 1982: 77, 1983: 70,
1984: 71, 1985: 88, 1986: 83, 1987: 75, 1988: 79, 1989: 82, 1990: 98,
1991: 69, 1992: 79, 1993: 59, 1994: 62, 1995: 59, 1996: 42, 1997: 37,
1998: 31, 1999: 24, 2000: 15, 2001: 14, 2002: 4, 2003: 7, 2004: 2,
2005: 3, 2006: 2, 2007: 6, 2008: 3, 2009: 3, 2010: 2, 2011: 2, 2012: 3,
2013: 7}
eptype = {'A Person Living with Epilepsy': 1693,
'A Parent of a Child with Epilepsy': 329,
'A Family Member or Caregiver': 143, 'Healthcare Professional': 83}
control = {'Not controlled': 982, 'Controlled': 799}
country = {'BD': 4, 'FR': 1, 'DK': 1, 'MZ': 1, 'HR': 1, 'TR': 5, 'BO': 1,
'JP': 1, 'BT': 1, 'CH': 3, 'DZ': 3, 'MK': 1, 'BR': 3, 'CO': 2,
'GR': 2, 'PR': 3, 'RU': 1, 'LB': 1, 'PT': 3, 'NO': 6, 'TT': 1,
'AF': 1, 'DE': 3, 'NG': 3, 'TN': 1, 'EE': 1, 'NZ': 14, 'LU': 1,
'LR': 1, 'LS': 1, 'TH': 2, 'PE': 1, 'NP': 5, 'PK': 10, 'AT': 1,
'RO': 2, 'EG': 3, 'PL': 1, 'EC': 1, 'BE': 1, 'GT': 1, 'AE': 4,
'VE': 2, 'CM': 1, 'CL': 1, 'IQ': 1, 'BH': 1, 'CA': 88, 'IR': 2,
'ZA': 16, 'VN': 6, 'AL': 1, 'GG': 1, 'CY': 2, 'AR': 4, 'AU': 32,
'IL': 3, 'IN': 53, 'BA': 1, 'NL': 3, 'ID': 2, 'IE': 5, 'PH': 15,
'ES': 4, 'GH': 1, 'MA': 2, 'KE': 2, 'SG': 2, 'ZM': 1, 'GE': 2,
'QA': 2, 'MT': 1, 'SI': 1, 'BW': 2, 'TZ': 2, 'IT': 1,
'HN': 1, 'UG': 1, 'SD': 2, 'UA': 4, 'MX': 8, 'SE': 3, 'GB': 124}
#PRELIMINARY 2
us_states = {'WA': 45, 'DE': 13, 'DC': 9, 'WI': 33, 'WV': 13, 'HI': 5, 'FL': 110,
'WY': 3, 'NH': 11, 'NJ': 51, 'NM': 16, 'TX': 179, 'LA': 30, 'NC': 54,
'ND': 2, 'NE': 10, 'TN': 49, 'NY': 107, 'PA': 77, 'AK': 4, 'NV': 20,
'VA': 55, 'CO': 53, 'CA': 184, 'AL': 30, 'AR': 15, 'VT': 3, 'IL': 81,
'GA': 48, 'IN': 31, 'IA': 18, 'OK': 24, 'AZ': 43, 'ID': 14, 'CT': 22,
'ME': 11, 'MD': 44, 'MA': 53, 'OH': 65, 'UT': 21, 'MO': 37, 'MN': 26,
'MI': 50, 'RI': 10, 'KS': 16, 'MT': 12, 'MS': 14, 'PR': 4, 'SC': 41,
'KY': 20, 'OR': 29, 'SD': 8}
us_st_pop = {'WA': 7.54, 'DE': 0.967, 'DC': 0.702, 'WI': 5.81, 'WV': 1.81, 'HI': 1.42,
'FL': 21.3, 'WY': 0.578, 'NH': 1.36, 'NJ': 8.91, 'NM': 2.1, 'TX': 27.8,
'LA': 4.66, 'NC': 10.4, 'ND': 0.76, 'NE': 3.03, 'TN': 6.77, 'NY': 19.5,
'PA': 12.8, 'AK': 0.737, 'NV': 3.03, 'VA': 8.52, 'CO': 5.7, 'CA': 39.6,
'AL': 4.89, 'AR': 3.01, 'VT': 0.626, 'IL': 12.7, 'GA': 10.5, 'IN': 6.69,
'IA': 3.12, 'OK': 3.94, 'AZ': 7.17, 'ID': 1.75, 'CT': 3.57, 'ME': 1.34,
'MD': 6.04, 'MA': 6.9, 'OH': 11.7, 'UT': 3.16, 'MO': 6.13, 'MN': 5.61,
'MI': 10.0, 'RI': 1.06, 'KS': 2.91, 'MT': 1.06, 'MS': 2.99, 'PR': 3.2,
'SC': 5.08, 'KY': 4.47, 'OR': 4.19, 'SD': 0.882}
norm_state_demo = {'WA': 5.968169761273209,'DE': 13.44364012409514,'DC': 12.820512820512821,
'WI': 5.679862306368331, 'WV': 7.18232044198895, 'HI': 3.5211267605633805,
'FL': 5.164319248826291, 'WY': 5.190311418685122, 'NH': 8.088235294117647,
'NJ': 5.723905723905724, 'NM': 7.619047619047619, 'TX': 6.438848920863309,
'LA': 6.437768240343347, 'NC': 5.1923076923076925, 'ND': 2.6315789473684212,
'NE': 3.3003300330033007, 'TN': 7.237813884785821, 'NY': 5.487179487179487,
'PA': 6.015625, 'AK': 5.4274084124830395, 'NV': 6.6006600660066015, 'VA': 6.455399061032864,
'CO': 9.298245614035087, 'CA': 4.646464646464646, 'AL': 6.134969325153374, 'AR': 4.983388704318937,
'VT': 4.792332268370607, 'IL': 6.377952755905512, 'GA': 4.571428571428571, 'IN': 4.633781763826606,
'IA': 5.769230769230769, 'OK': 6.091370558375635, 'AZ': 5.99721059972106, 'ID': 8.0,
'CT': 6.162464985994398, 'ME': 8.208955223880597, 'MD': 7.28476821192053, 'MA': 7.6811594202898545,
'OH': 5.555555555555556, 'UT': 6.6455696202531644, 'MO': 6.035889070146819, 'MN': 4.634581105169341,
'MI': 5.0, 'RI': 9.433962264150942, 'KS': 5.498281786941581, 'MT': 11.320754716981131, 'MS': 4.682274247491638,
'PR': 1.25, 'SC': 8.070866141732283, 'KY': 4.47427293064877, 'OR': 6.921241050119331, 'SD': 9.070294784580499}
uk_countries = {'England': 41, 'Northern Ireland': 2, 'Scotland': 11, 'Wales': 5}
ca_provinces = {'ON': 32, 'AB': 9, 'NL': 1, 'MB': 5, 'NB': 2, 'BC': 16, 'SK': 5, 'QC': 3, 'NS': 2}
in_states = {'Telangana': 5, 'Karnataka': 5, 'Haryana': 2, 'Andhra Pradesh': 2,
'Gujarat': 2, 'Kerala': 1, 'Uttrakhand': 1, 'Maharashtra': 7,
'Tamil Nadu': 5, 'Delhi': 7, 'Rajasthan': 2, 'West Bengal': 2,
'Jammu & Kashmir': 2, 'Uttar Pradesh': 2, 'Madhya Pradesh': 1,
'Chandigarh': 1, 'Assam': 1, 'Punjab': 1}
au_states = {'VIC': 7, 'WA': 2, 'TAS': 1, 'ACT': 1, 'QLD': 7, 'SA': 2, 'NSW': 3}
#LEVEL OF ENGAGEMENT
num_posts = {1: 12314, 2: 4009, 3: 1963, 4: 1114, 5: 679, 6: 476, 7: 369, 8: 273,
9: 218, 10: 147, 11: 133, 12: 105, 13: 88, 14: 87, 15: 76, 16: 71,
17: 50, 18: 55, 19: 31, 20: 46, 21: 30, 22: 28, 23: 32, 24: 32, 25: 20,
26: 29, 27: 20, 28: 18, 29: 16, 30: 15, 31: 19, 32: 11, 33: 14, 34: 10,
35: 5, 36: 12, 37: 5,38: 10, 39: 7, 40: 8, 41: 11,
42: 7, 43: 5, 44: 5, 45: 4, 46: 5, 47: 4, 48: 10, 49: 7, 50: 5, 51: 5,
52: 4, 53: 3, 54: 7, 55: 1,
56: 2, 57: 3, 58: 9, 59: 4, 60: 2, 61: 3, 62: 3, 63: 1, 64: 4,
65: 4, 66: 3, 67: 2, 68: 1, 69: 4, 70: 5, 71: 4, 72: 4, 73: 3, 74: 1,
75: 1, 76: 5, 77: 1, 78: 1, 79: 1, 80: 2, 81: 4, 82: 1, 83: 1, 84: 2,
85: 5, 86: 3, 87: 1, 88: 1, 89: 1, 90: 2, 93: 1, 94: 2, 95: 2, 96: 1,
97: 2, 99: 2, 100: 1, 101: 1, 103: 2, 104: 1, 105: 2, 106: 1, 108: 3,
109: 1, 111: 3, 113: 3, 116: 1, 117: 1, 118: 1, 119: 1, 122: 3, 124: 1,
125: 1, 129: 1, 131: 1, 132: 1, 133: 2, 137: 3, 140: 3, 141: 1, 143: 1,
144: 3, 145: 2, 148: 1, 152: 1, 154: 1, 158: 1, 160: 1, 162: 1, 169: 1,
172: 1, 175: 1, 183: 1, 184: 1, 187: 1, 193: 1, 194: 1, 198: 1, 200: 1,
201: 1, 205: 1, 211: 1, 215: 1, 217: 1, 220: 1, 221: 1, 229: 1, 233: 1,
253: 1, 255: 1, 259: 1, 268: 1, 269: 1, 271: 1, 277: 2, 278: 1, 283: 1,
284: 1, 285: 1, 336: 1, 349: 1, 350: 1, 353: 1, 360: 1, 395: 1, 399: 1,
413: 1, 457: 1, 568: 1, 605: 1, 655: 1, 692: 1, 698: 1, 713: 1, 734: 1,
918: 1, 1022: 1, 1034: 1, 1091: 1, 1195: 1, 1528: 1, 1609: 1, 4284: 1}
num_chats = {1: 329, 2: 133, 3: 66, 4: 69, 5: 53, 6: 56, 7: 35, 8: 37, 9: 30, 10: 31,
11: 29, 12: 24, 13: 20, 14: 28, 15: 22, 16: 30, 17: 27, 18: 26, 19: 20,
20: 28, 21: 22, 22: 22, 23: 22, 24: 13, 25: 18, 26: 22, 27: 15, 28: 21,
29: 16, 30: 16, 31: 10, 32: 12, 33: 15, 34: 12, 35: 15, 36: 12, 37: 12,
38: 11, 39: 12, 40: 11, 41: 6, 42: 8, 43: 10, 44: 7, 45: 11, 46: 5,
47: 6, 48: 6, 49: 8, 50: 7, 51: 8, 52: 6, 53: 2, 54: 4, 55: 4, 56: 4,
57: 3, 58: 6, 59: 4, 60: 4, 61: 7, 62: 2, 63: 6, 64: 4, 65: 9, 66: 4,
67: 4, 68: 4, 69: 1, 70: 5, 71: 2, 72: 6, 73: 4, 74: 1, 75: 6, 76: 3}
#MEMBER TYPE ANALYSIS
created_PLE = {2016: 239, 2005: 7, 2006: 4, 2007: 19, 2008: 23, 2009: 31,
2010: 42, 2011: 51, 2012: 48, 2013: 59, 2014: 485, 2015: 685}
created_PCE = {2016: 43, 2009: 4, 2010: 3, 2011: 9, 2012: 2, 2013: 10, 2014: 102, 2015: 156}
created_FMC = {2016: 20, 2008: 2, 2009: 1, 2010: 2, 2011: 2, 2012: 2, 2013: 1, 2014: 42, 2015: 71}
created_HP = {2016: 14, 2008: 1, 2009: 4, 2011: 1, 2012: 1, 2013: 3, 2014: 19, 2015: 40}
dob_PLE = {1970: 35, 1971: 34, 1972: 27, 1973: 27, 1974: 32, 1975: 44, 1976: 33,
1977: 24, 1978: 32, 1979: 36, 1980: 36, 1981: 38, 1982: 51, 1983: 52,
1984: 37, 1985: 56, 1986: 63, 1987: 46, 1988: 57, 1989: 50, 1990: 55,
1991: 47, 1992: 58, 1993: 47, 1994: 44, 1995: 41, 1996: 29, 1997: 28,
1998: 23, 1999: 20, 2000: 5, 2001: 10, 2002: 2, 2006: 2, 2007: 2,
2009: 1, 2012: 1, 2014: 41, 2015: 44, 2016: 15}
dob_PCE = {1970: 11, 1971: 4, 1972: 8, 1973: 10, 1974: 7, 1975: 12, 1976: 8,
1977: 15, 1978: 12, 1979: 11, 1980: 9, 1981: 6, 1982: 12, 1983: 7,
1984: 8, 1985: 9, 1986: 5, 1987: 9, 1988: 5, 1989: 7, 1990: 6, 1991: 5,
1992: 3, 1993: 2, 1994: 1, 1995: 4, 1996: 1, 1999: 1, 2001: 1, 2002: 1,
2003: 5, 2004: 1, 2005: 3, 2007: 2, 2008: 3, 2009: 1, 2010: 2, 2011: 2,
2012: 2, 2013: 2, 2014: 16, 2015: 21, 2016: 7}
dob_FMC = {1970: 3, 1971: 3, 1972: 1, 1973: 2, 1974: 2, 1975: 1, 1976: 1, 1977: 3,
1978: 1, 1979: 5, 1981: 1, 1982: 2, 1983: 3, 1985: 4, 1986: 2, 1987: 2,
1988: 4, 1989: 5, 1990: 8, 1991: 5, 1992: 4, 1993: 2, 1994: 6, 1996: 1,
1999: 1, 2000: 1, 2003: 1, 2004: 1, 2013: 1, 2014: 7, 2015: 10, 2016: 1}
dob_HP = {1970: 3, 1971: 3, 1972: 1, 1973: 2, 1974: 2, 1975: 1, 1976: 1, 1977: 3,
1978: 1, 1979: 5, 1981: 1, 1982: 2, 1983: 3, 1985: 4, 1986: 2, 1987: 2,
1988: 4, 1989: 5, 1990: 8, 1991: 5, 1992: 4, 1993: 2, 1994: 6, 1996: 1,
1999: 1, 2000: 1, 2003: 1, 2004: 1, 2013: 1, 2014: 7, 2015: 10, 2016: 1}
#TIMELINES
chats_tl_top = np.delete(np.load("chats_tl_top.npy"), -1, 1)
chats_tl_all = np.load("chats_tl_all.npy")
chats_tl_wkly = np.load("chats_tl_wkly.npy")
forums_tl_top = np.delete(np.load("forums_tl_top.npy"), -1, 1)
forums_tl_all = np.load("forums_tl_all.npy")
forums_tl_wkly = np.load("forums_tl_wkly.npy")
chats_range = ['7-2014','8-2014','9-2014','10-2014','11-2014','12-2014','1-2015','2-2015','3-2015']
forums_range = ['2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016']
forums_ticks = [2, 14, 26, 38, 50, 62, 74, 86, 98, 110, 122, 134]
#TOP USERS {uid: #posts/chats}
usrs_chats = {124741: 3159,142931: 3188,206786: 3234, 91813: 3614, 133186: 3632, 102710: 3699,
74879: 3753, 226796: 3846, 123406: 3906, 103467: 4204, 93147: 4258, 168131: 4631,
117586: 4719, 153391: 5356, 203441: 5358, 123996: 6112, 87564: 7539, 95416: 7711,
44294: 10456, 98914: 11613, 188006: 14238, 90109: 17011, 94861: 18854, 23487: 20288,
214556: 22108, 40886: 45498}
usrs_forums = {13600: 336,2495: 349,12416: 350,39104: 353, 22834: 360, 15797: 395,
70585: 399, 16756: 413, 26464: 457, 33641: 568, 3421: 605, 1837: 655,
42622: 692, 43851: 698, 10112: 713, 27976: 734, 53211: 918, 13993: 1022,
2731: 1034, 1998: 1091, 40321: 1195, 51501: 1528, 101498: 1609, 0: 4284}
#FORUM topics
forum_topics = {u'Fundraising and Awareness': 118, u'Products, Resources, Helpful Links': 588,
u'Women With Epilepsy': 6309, u'Teens Speak Up! ': 3, u'Insurance Issues': 425,
u'Medication Issues': 17723, u'Epilepsy: Insights & Strategies': 178,
u'Corner Booth': 3504, u'Living With Epilepsy - Adults': 34304,
u'Men With Epilepsy': 947, u'Surgery and Devices': 4124,
u'Lennox Gastaut Syndrome': 8, u'Veterans with seizures': 115,
u'Family & Friends': 4730, u'Share Your #DareTo Go The Distance Story': 31,
u'Parents & Caregivers': 7025, u'Epilepsy.com Help': 1755, u'Athletes vs Epilepsy Goal Posts': 1,
u'Epilepsy and College ': 90, u'New to Epilepsy.com': 14876, u'Living With Epilepsy - Youth': 3843,
u'Creative Corner': 251, u'Diagnostic Dilemmas and Testing': 5190, u'Complementary Therapies': 1674,
u'Teen Zone': 2113, u'My Epilepsy Diary': 674, u'In Memoriam': 7, u'Advocate for Epilepsy': 465,
u'Infantile Spasms & Tuberous Sclerosis': 4}
#Tagging results
topics_dct = {'Fundraising and Awareness': 2003194, 'Products, Resources, Helpful Links': 2003130,
'Women With Epilepsy': 2003119, 'Teens Speak Up!': 2010661, 'Insurance Issues': 2003133,
'Medication Issues': 2003121, 'Insights & Strategies': 2003197,
'Share Your #DareTo Go The Distance Story': 2036596, 'Living With Epilepsy - Adults': 2003117,
'Men With Epilepsy': 2003129, 'Surgery and Devices': 2003122, 'Lennox Gastaut Syndrome': 2008441,
'Veterans with seizures': 2003180, 'Family & Friends': 2003118, 'Corner Booth': 2003123,
'Parents & Caregivers': 2003131, 'Epilepsy.com Help': 2003127, 'Athletes vs Epilepsy Goal Posts': 2044536,
'Epilepsy and College': 2003304, 'New to Epilepsy.com': 2003125, 'Living With Epilepsy - Youth': 2003128,
'Creative Corner': 2003134, 'Diagnostic Dilemmas and Testing': 2003126, 'Complementary Therapies': 2003124,
'Teen Zone': 2003120, 'My Epilepsy Diary': 2003228, 'In Memoriam': 2014491, 'Advocate for Epilepsy': 2003132,
'Infantile Spasms & Tuberous Sclerosis': 2008446}
#{'Topic name': (Mathced posts, Total Posts)}
topics_match = {'Fundraising and Awareness': (77, 118), 'Products, Resources, Helpful Links': (416, 588),
'Women With Epilepsy': (5664, 6309), 'Insurance Issues': (273, 425), 'Medication Issues': (15831, 17723),
'Share Your #DareTo Go The Distance Story': (24, 31), 'Living With Epilepsy - Adults': (28293, 34304),
'Men With Epilepsy': (796, 947), 'Surgery and Devices': (3671, 4124), 'Lennox Gastaut Syndrome': (4, 8),
'Veterans with seizures': (94, 115), 'Teens Speak Up!': (2, 3), 'Family & Friends': (3912, 4730),
'Epilepsy and College': (64, 90), 'Insights & Strategies': (145, 178), 'Corner Booth': (1667, 3504),
'Parents & Caregivers': (6173, 7025), 'Epilepsy.com Help': (1049, 1755), 'Athletes vs Epilepsy Goal Posts': (1, 1),
'New to Epilepsy.com': (12978, 14876), 'Living With Epilepsy - Youth': (3345, 3843), 'Creative Corner': (142, 251),
'Diagnostic Dilemmas and Testing': (4498, 5190), 'Complementary Therapies': (1421, 1674), 'Teen Zone': (1674, 2113),
'My Epilepsy Diary': (525, 674), 'In Memoriam': (7, 7), 'Advocate for Epilepsy': (350, 465),
'Infantile Spasms & Tuberous Sclerosis': (4, 4)}
#TAGGING ANALYSIS
matchesperchat = pickle.load(open('tag_analysis/matchesperpost_c.pkl', 'rb'))
matchesperpost = pickle.load(open('tag_analysis/matchesperpost_p.pkl', 'rb'))
matchesperuser_c = pickle.load(open('tag_analysis/matchesperuser_c.pkl', 'rb'))
matchesperuser_p = pickle.load(open('tag_analysis/matchesperuser_p.pkl', 'rb'))
matchpostperuse_c = pickle.load(open('tag_analysis/matchpostsperuser_c.pkl', 'rb'))
matchpostperuse_p = pickle.load(open('tag_analysis/matchpostsperuser_p.pkl', 'rb'))
parents_c = pickle.load(open('tag_analysis/parents_c.pkl', 'rb'))
parents_p = pickle.load(open('tag_analysis/parents_p.pkl', 'rb'))
types_c = pickle.load(open('tag_analysis/types_c.pkl', 'rb'))
types_p = pickle.load(open('tag_analysis/types_p.pkl', 'rb'))
#COMENTIONS
double_menchies = pickle.load(open('double_mentions.pkl', 'rb'))
#TF_IDF
topic_number_dct = pickle.load(open('TF-IDF/topic_number_dct.pkl', 'rb'))
def barchart(dct, title, yscale, xtrot=None):
plt.figure()
plt.bar(dct.keys(), dct.values(), align='center', orientation='h')
if xtrot:
plt.xticks(rotation='vertical')
plt.ylabel("Count", fontsize=18)
plt.yscale(yscale)
plt.title(title, fontsize=24)
#plt.gcf().subplots_adjust(bottom=0.25) #May not need
plt.show()
def horiz_bar(dct, title, xlabel, ylabel):
y_pos = np.arange(len(dct))
plt.figure()
vals = [v for v in dct.values()]
keys = [k for k in dct.keys()]
plt.barh(y_pos, vals, align='center')
plt.yticks(y_pos, keys)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
def histo(dct, bins, title, yscale):
plt.figure()
dat = []
for k in dct:
dat += dct[k]*[k]
plt.hist(dat, bins=bins)
plt.yscale(yscale)
plt.title(title, fontsize=24)
plt.ylabel("Count", fontsize=18)
plt.show()
def samplesize(dct):
size = 0
for k in dct:
size += dct[k]
return size
def tally(dct, key):
if key in dct:
dct[key] += 1
else:
dct[key] = 1
def sortkeys(dct, order=False):
nd = dict()
for key in sorted(dct.keys(), reverse=order):
nd[key] = dct[key]
return nd
def sortvals(dct, order=False):
nd = dict()
for k in sorted(dct, key=dct.get, reverse=order):
nd[k] = dct[k]
return nd
def topitems(dct, top):
count = 0
nd = dict()
for key in dct.keys():
count += 1
if count <= top:
nd[key] = dct[key]
return nd
def monthsInRange(beg, end):
result = []
beg = datetime.strptime(beg, '%Y-%m-%d %H:%M:%S')
end = datetime.strptime(end, '%Y-%m-%d %H:%M:%S')
while beg < end:
result.append(str(beg.month) + '-' + str(beg.year))
beg += relativedelta(months=1)
result.append(str(end.month) + '-' + str(end.year))
return result
def visualizeTimeline(tl_arr, name, usrnum, clim, labels, ticks=None):
plasma_cm = cm.get_cmap('plasma', 512)
newcmp = ListedColormap(plasma_cm(np.linspace(0.25, 1, 1000)))
plt.matshow(tl_arr, cmap=newcmp, aspect="auto")
plt.clim(0, clim)
plt.colorbar().set_label(label=name + ' per month', size=18)
plt.title("Visualization of Top " + usrnum + " Timelines", fontsize=24)
if ticks:
plt.xticks(ticks, labels)
else:
plt.xticks(range(len(labels)), labels)
plt.xlabel('Month', fontsize=16)
plt.ylabel('Users', fontsize=16)
plt.show()
def tot_usage(tl_arr, name, labels=None, ticks=None): #total month-by-month activity
usg = np.sum(tl_arr, axis=0)
plt.figure()
plt.plot([i for i in range(len(usg))], usg)
# if ticks:
# plt.xticks(ticks, labels)
# else:
# plt.xticks(range(len(labels)), labels)
plt.title("Total usage over time - " + name)
plt.xlabel('Month', fontsize=16)
plt.ylabel('Number of ' + name, fontsize=16)
plt.show()
def indiv_usg(tl_arr, name): #total and consecutive month-by-month activity for each user
total = dict()
consecutive = dict()
for user in tl_arr:
tot_eng = 0
cons_eng = []
counter = 0
for mth in user:
if mth == 0:
cons_eng.append(counter)
counter = 0
else:
counter += 1
tot_eng += 1
tally(total, tot_eng)
cons = 0
if len(cons_eng) == 0:
cons = tot_eng
else:
cons = max(cons_eng)
tally(consecutive, cons)
histo(total, 105, name + " - Total Months of Engagement", 'log')
plt.ylabel("Number of users")
plt.xlabel("Total months of engagement per user", fontsize=18)
histo(consecutive, 50, name + " - Consecutive Months of Engagement", 'log')
plt.ylabel("Number of users")
plt.xlabel("Consecutive months of engagement per user", fontsize=18)
def compareTermRank(t1, t2):
lst1 = []
lst2 = []
for k in t1:
if k in t2:
lst1.append(t1[k])
lst2.append(t2[k])
else:
lst1.append(t1[k])
lst2.append(0)
for k in t2:
if k not in t1:
lst1.append(0)
lst2.append(t2[k])
else:
continue
js = jensenshannon(lst1, lst2)
kl = entropy(lst1, lst2)
print("Jensen-Shannon divergence: " + str(js))
print("Kullback-Leibler divergence: " + str(kl))
|
StarcoderdataPython
|
3319676
|
<gh_stars>0
"""Create Flask application object.
This module creates the Flask appliaction object so that each
module can import it safely and the __name__ variable will always
resolve to the correct package.
"""
from flask import Flask
from flask_cors import CORS, cross_origin
APP = Flask(__name__)
CORS(APP)
APP.config.update(
REMOTE_HOSTS=['192.168.0.11', '192.168.0.12', '192.168.0.13'],
REMOTE_HOST_USERNAME='pi',
REMOTE_IMAGE_DIRECTORY='/home/pi/elephant_vending_machine/images'
)
# Circular imports are bad, but views are not used here, only imported, so it's OK
# pylint: disable=wrong-import-position
import elephant_vending_machine.views
|
StarcoderdataPython
|
3289351
|
from .fplEntity import FplEntity
class FplDailyUsageSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Daily Usage")
@property
def state(self):
data = self.getData("daily_usage")
try:
self._state = data[-1]["cost"]
except:
pass
return self._state
def defineAttributes(self):
"""Return the state attributes."""
data = self.getData("daily_usage")
attributes = {}
attributes["friendly_name"] = "Daily Usage"
attributes["device_class"] = "monetary"
attributes["state_class"] = "total_increasing"
attributes["unit_of_measurement"] = "$"
if data is not None:
if (
(len(data) > 0)
and (data[-1] is not None)
and (data[-1]["readTime"] is not None)
):
attributes["date"] = data[-1]["readTime"]
if (
(len(data) > 1)
and (data[-2] is not None)
and (data[-2]["readTime"] is not None)
):
attributes["last_reset"] = data[-2]["readTime"]
return attributes
@property
def icon(self):
return "mdi:currency-usd"
class FplDailyUsageKWHSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Daily Usage KWH")
@property
def state(self):
data = self.getData("daily_usage")
try:
self._state = data[-1]["usage"]
except:
pass
return self._state
def defineAttributes(self):
"""Return the state attributes."""
data = self.getData("daily_usage")
attributes = {}
attributes["friendly_name"] = "Daily Usage"
attributes["device_class"] = "energy"
attributes["state_class"] = "total_increasing"
attributes["unit_of_measurement"] = "kWh"
if data is not None:
if (
(len(data) > 0)
and (data[-1] is not None)
and (data[-1]["readTime"] is not None)
):
attributes["date"] = data[-1]["readTime"]
if (
(len(data) > 1)
and (data[-2] is not None)
and (data[-2]["readTime"] is not None)
):
attributes["last_reset"] = data[-2]["readTime"]
return attributes
@property
def icon(self):
return "mdi:flash"
class FplDailyReceivedKWHSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Daily Received KWH")
@property
def state(self):
data = self.getData("daily_usage")
try:
self._state = data[-1]["netReceivedKwh"]
except:
pass
return self._state
def defineAttributes(self):
"""Return the state attributes."""
data = self.getData("daily_usage")
attributes = {}
attributes["friendly_name"] = "Daily Return to Grid"
attributes["device_class"] = "energy"
attributes["state_class"] = "total_increasing"
attributes["unit_of_measurement"] = "kWh"
if data is not None:
if (
(len(data) > 0)
and (data[-1] is not None)
and (data[-1]["readTime"] is not None)
):
attributes["date"] = data[-1]["readTime"]
if (
(len(data) > 1)
and (data[-2] is not None)
and (data[-2]["readTime"] is not None)
):
attributes["last_reset"] = data[-2]["readTime"]
return attributes
@property
def icon(self):
return "mdi:flash"
class FplDailyDeliveredKWHSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Daily Delivered KWH")
@property
def state(self):
data = self.getData("daily_usage")
try:
self._state = data[-1]["netDeliveredKwh"]
except:
pass
return self._state
def defineAttributes(self):
"""Return the state attributes."""
data = self.getData("daily_usage")
attributes = {}
attributes["friendly_name"] = "Daily Consumption"
attributes["device_class"] = "energy"
attributes["state_class"] = "total_increasing"
attributes["unit_of_measurement"] = "kWh"
if data is not None:
if (
(len(data) > 0)
and (data[-1] is not None)
and (data[-1]["readTime"] is not None)
):
attributes["date"] = data[-1]["readTime"]
if (
(len(data) > 1)
and (data[-2] is not None)
and (data[-2]["readTime"] is not None)
):
attributes["last_reset"] = data[-2]["readTime"]
return attributes
@property
def icon(self):
return "mdi:flash"
|
StarcoderdataPython
|
1678476
|
<filename>terrascript/chef/r.py
# terrascript/chef/r.py
import terrascript
class chef_acl(terrascript.Resource):
pass
class chef_client(terrascript.Resource):
pass
class chef_cookbook(terrascript.Resource):
pass
class chef_data_bag(terrascript.Resource):
pass
class chef_data_bag_item(terrascript.Resource):
pass
class chef_environment(terrascript.Resource):
pass
class chef_node(terrascript.Resource):
pass
class chef_role(terrascript.Resource):
pass
|
StarcoderdataPython
|
1745839
|
<reponame>jhanley634/changepoint
#! /usr/bin/env python
# Copyright 2021 <NAME>. MIT licensed.
from numpy.polynomial import Polynomial
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def arr(a):
"""Input should be a 1-dimensional vector."""
return np.array(a).reshape(-1, 1) # This is a 2-D array, with single column.
def pnomial(x, a=0, b=3, c=4, d=5):
return a * x**3 + b * x**2 + c * x + d
def get_curve(lo=0, hi=100, n_samples=1000, sigma=2e3):
df = pd.DataFrame(dict(x=np.linspace(lo, hi, n_samples)))
df['y'] = df.x.apply(pnomial) + sigma * np.random.standard_normal(n_samples)
return df
def main():
# training (fitting)
df = get_curve()
np_model = Polynomial.fit(df.x, df.y, 2) # np stmt 1
print(type(np_model), np_model)
poly_features = PolynomialFeatures(degree=2, include_bias=False)
x_poly = poly_features.fit_transform(arr(df.x))
lin_reg = LinearRegression()
lin_reg.fit(x_poly, df.y)
print(lin_reg.intercept_, lin_reg.coef_)
# inference
df = get_curve(-300, 500)
np_pred = list(map(np_model, df.x)) # np stmt 2
x = arr(df.x)
x = poly_features.transform(x)
pred = lin_reg.predict(x)
mae = mean_absolute_error(arr(df.y), pred)
print("MAE:", mae)
print(type(pred))
df['pred'] = np.array(pred)
df['delta'] = df.y - df.pred
print(df)
fig, ax = plt.subplots()
ax.plot(df.x, df.y, label='signal')
ax.plot(df.x, pred, label='predicted')
# ax.plot(df.x, np_pred, label='predicted by NP') # overwritten by pred pixels
diff = max(map(abs, np_pred - pred))
assert diff < 1e-6, diff
ax.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3314037
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param: A: an integer array
@return: A tree node
"""
def sortedArrayToBST(self, A):
return self.buildTree(A,0,len(A)-1)
def buildTree(self,A,start,end):
if(start>end):
return None
mid = (start+end) >> 1
node = TreeNode(A[mid])
node.left = self.buildTree(A,start,mid-1)
node.right = self.buildTree(A,mid+1,end)
return node
|
StarcoderdataPython
|
3274505
|
<filename>extraction.py<gh_stars>0
from flask_wtf import FlaskForm
from wtforms import SubmitField
from flask import make_response
import mysql.connector
import pandas as pd
import time
from pprint import pprint
class DownloadCSVData:
def __init__(self):
try:
mydb = mysql.connector.connect(host="host",user="user", passwd="password", database="db")
mycursor = mydb.cursor()
sql = "SELECT * FROM goxipdb.members"
mycursor.execute(sql)
self.data = pd.DataFrame(mycursor.fetchall())
self.columns = mycursor.column_names
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
finally:
mycursor.close()
mydb.close()
def __call__(self):
df = self.data
df.columns = self.columns
#pprint(df)
file_name = time.strftime('%Y%m%d', time.localtime(time.time())) + '.csv'
response = make_response(df.to_csv(index = False))
response.headers["Content-Disposition"] = "attachment; filename=%s" %file_name
response.headers["Content-type"] = "text/csv"
return response
class Submit(FlaskForm):
submit = SubmitField('')
|
StarcoderdataPython
|
40489
|
from unittest import TestCase
from src.stack import StackWithMaxValue
class TestStackWithMaxValue(TestCase):
def test_push(self):
stack = StackWithMaxValue()
stack.push(1)
stack.push(2)
stack.push(3)
self.assertEqual([1, 2, 3], stack.as_list())
def test_pop(self):
stack = StackWithMaxValue()
stack.push(1)
self.assertEqual(1, stack.pop())
with self.assertRaises(IndexError):
stack.pop()
self.assertEqual([], stack.as_list())
def test_peek(self):
stack = StackWithMaxValue()
stack.push(1)
self.assertEqual(1, stack.peek())
self.assertEqual([1], stack.as_list())
stack.pop()
with self.assertRaises(IndexError):
stack.peek()
def test_is_empty(self):
stack = StackWithMaxValue()
self.assertTrue(stack.is_empty())
stack.push(1)
self.assertFalse(stack.is_empty())
def test_maximum(self):
stack = StackWithMaxValue()
stack.push(1)
stack.push(2)
self.assertEqual(2, stack.maximum())
stack.pop()
self.assertEqual(1, stack.maximum())
|
StarcoderdataPython
|
4832375
|
<reponame>canance/signpi-server<filename>frontend/urls.py
# Author: <NAME> <<EMAIL>>
# Revision: 6 February 2016
#
# Copyright 2016 Coastal Carolina University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^cast/', views.cast, name='cast'),
url(r'^change/', views.change, name='change'),
url(r'^create_device/', views.create_device, name="create_device"),
url(r'^create_device_group', views.create_device_group, name='create_device_group'),
url(r'^devices/', views.devices, name='devices'),
url(r'^device_groups/', views.device_groups, name='device_groups'),
url(r'^edit_device/(?P<dev>\d+)/$', views.edit_device, name='edit_device'),
url(r'^slideshows/', views.slideshows, name="slideshows"),
url(r'^slideshow/(?P<name>.*)/$', views.get_slideshow, name="slideshow"),
url(r'^slideshow_json/(?P<name>.*)/$', views.get_slideshow_json, name="slideshow_json"),
url(r'^streams/', views.streams, name="streams"),
url(r'^create_slideshow/', views.create_slideshow, name="create_slideshow"),
url(r'^create_stream/', views.create_stream, name="create_stream"),
url(r'^delete_slideshow/(?P<name>.*)/$', views.delete_slideshow, name="delete_slideshow"),
url(r'^edit_slideshow/(?P<name>.*)/$', views.edit_slideshow, name='edit_slideshow'),
url(r'^delete_stream/(?P<name>.*)/$', views.delete_stream, name="delete_stream"),
url(r'^edit_stream/(?P<name>.*)/$', views.edit_stream, name='edit_stream'),
url(r'^edit_device_group/(?P<grp>\d+)/$', views.edit_device_group, name='edit_device_group'),
]
|
StarcoderdataPython
|
4814641
|
import torch.utils.data as data
import numpy as np
import os, sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import data_transforms
from .io import IO
import json
from .build import DATASETS
# References:
# - https://github.com/hzxie/GRNet/blob/master/utils/data_loaders.py
@DATASETS.register_module()
class KITTI(data.Dataset):
def __init__(self, config):
self.cloud_path = config.CLOUD_PATH
self.bbox_path = config.BBOX_PATH
self.category_file = config.CATEGORY_FILE_PATH
self.npoints = config.N_POINTS
self.subset = config.subset
assert self.subset == 'test'
self.dataset_categories = []
with open(self.category_file) as f:
self.dataset_categories = json.loads(f.read())
self.transforms = data_transforms.Compose([{
'callback': 'NormalizeObjectPose',
'parameters': {
'input_keys': {
'ptcloud': 'partial_cloud',
'bbox': 'bounding_box'
}
},
'objects': ['partial_cloud', 'bounding_box']
}, {
'callback': 'RandomSamplePoints',
'parameters': {
'n_points': 2048
},
'objects': ['partial_cloud']
}, {
'callback': 'ToTensor',
'objects': ['partial_cloud', 'bounding_box']
}])
self.file_list = self._get_file_list(self.subset)
def _get_file_list(self, subset):
"""Prepare file list for the dataset"""
file_list = []
for dc in self.dataset_categories:
samples = dc[subset]
for s in samples:
file_list.append({
'taxonomy_id': dc['taxonomy_id'],
'model_id': s,
'partial_cloud_path': self.cloud_path % s,
'bounding_box_path': self.bbox_path % s,
})
return file_list
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
sample = self.file_list[idx]
data = {}
for ri in ['partial_cloud', 'bounding_box']:
file_path = sample['%s_path' % ri]
data[ri] = IO.get(file_path).astype(np.float32)
if self.transforms is not None:
data = self.transforms(data)
return sample['taxonomy_id'], sample['model_id'], data['partial_cloud']
|
StarcoderdataPython
|
160534
|
from django.shortcuts import render
from django.http import HttpResponse
from .forms import SubscribeForm
from products.models import *
# Create your views here.
def landing_view(request):
name = "landing_view"
form = SubscribeForm(request.POST or None)
if request.method == "POST" and form.is_valid():
data = form.changed_data
new_form = form.save()
return render(request, 'landing/landing.html', locals())
def home(request):
products_images = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True)
products_images_popular = products_images.filter(product__category__id=5)
products_images_men = products_images.filter(product__category__id=6)
products_images_different = products_images.filter(product__category__id=7)
return render(request, 'landing/home.html', locals())
|
StarcoderdataPython
|
1618798
|
<reponame>BupyeongHealer/SAMSUNG_SAIDALAB_RLCustom
# Copyright (C) 2019 SAMSUNG SDS <<EMAIL>>
#
# This code is distribued under the terms and conditions from the MIT License (MIT).
#
# Authors : <NAME>, <NAME>, <NAME>, <NAME>
from core.common.agent import Agent
from core.common.util import *
import math
class PPOAgent(Agent):
def __init__(self, state_size, action_size, continuous, actor, critic,
discount_factor=0.99, loss_clipping=0.2, epochs=10, noise=1.0, entropy_loss=1e-3,
buffer_size=256, batch_size=64, **kwargs):
"""
Constructor for PPO with clipped loss
#Arguments
state_size(integer): Number of state size
action_size(integer): Number of action space
continuous(bool): True if action space is continuous type
actor(Keras Model): network for actor
critic(Keras Model): network for critic
discount_factor(float): discount reward factor
loss_clipping(float): hyper parameter for loss clipping, in PPO paper, 0.2 is recommended.
epochs(integer): hyper parameter
noise(float): hyper parameter
entropy_loss : hyper parameter
buffer_size : hyper parameter
batch_size : hyper parameter
#Return
None
"""
super(PPOAgent, self).__init__(**kwargs)
self.action_size = action_size
self.state_szie = state_size
self.continuous = continuous
self.critic = critic
self.actor = actor
self.episode = 0
self.discount_factor = discount_factor
self.loss_clipping = loss_clipping # Only implemented clipping for the surrogate loss, paper said it was best
self.epochs = epochs
self.noise = noise # Exploration noise
self.entropy_loss = entropy_loss
self.buffer_size = buffer_size
self.batch_size = batch_size
self.dummy_action,self.dummy_value = np.zeros((1, action_size)), np.zeros((1, 1))
self.observation = None
self.reward = []
self.reward_over_time = []
self.gradient_steps = 0
self.batch = [[], [], [], []]
self.tmp_batch = [[], [], []]
def reset_env(self):
self.reward = []
def discounted_reward(self):
for j in range(len(self.reward) - 2, -1, -1):
self.reward[j] += self.reward[j + 1] * self.discount_factor
def forward(self, observation):
self.observation = observation
self.tmp_batch[0].append(observation)
if self.continuous:
p = self.actor.predict([self.observation.reshape(1, self.state_szie), self.dummy_value, self.dummy_action])
action = action_matrix = p[0] + np.random.normal(loc=0, scale=self.noise, size=p[0].shape)
self.tmp_batch[1].append(action_matrix)
self.tmp_batch[2].append(p)
return action, action_matrix, p
else:
state = np.reshape(self.observation, [1, self.state_szie])
p = self.actor.predict([state, self.dummy_value, self.dummy_action])
action = np.random.choice(self.action_size, p=np.nan_to_num(p[0]))
action_matrix = np.zeros(self.action_size)
action_matrix[action] = 1
self.tmp_batch[1].append(action_matrix)
self.tmp_batch[2].append(p)
return [action]
def backward(self, reward, terminal):
if self.train_mode is False:
return
self.reward.append(reward)
if terminal:
self.discounted_reward()
for i in range(len(self.tmp_batch[0])):
obs, action, pred = self.tmp_batch[0][i], self.tmp_batch[1][i], self.tmp_batch[2][i]
r = self.reward[i]
self.batch[0].append(obs)
self.batch[1].append(action)
self.batch[2].append(pred)
self.batch[3].append(r)
self.tmp_batch = [[], [], []]
self.reset_env()
if len(self.batch[0]) >= self.buffer_size:
obs, action, pred, reward = np.array(self.batch[0]), np.array(self.batch[1]), np.array(self.batch[2]), np.reshape(
np.array(self.batch[3]), (len(self.batch[3]), 1))
self.batch = [[], [], [], []]
pred = np.reshape(pred, (pred.shape[0], pred.shape[2]))
obs, action, pred, reward = obs[:self.buffer_size], action[:self.buffer_size], \
pred[:self.buffer_size], reward[:self.buffer_size]
pred_values = self.critic.predict(obs)
self.gradient_steps += 1
# Calculate Loss
advantage = reward - pred_values
old_prediction = pred
advantage = (advantage - advantage.mean()) / advantage.std()
actor_loss = self.actor.fit([obs, advantage, old_prediction], [action], batch_size=self.batch_size,
shuffle=True, epochs=self.epochs, verbose=False)
critic_loss = self.critic.fit([obs], [reward], batch_size=self.batch_size, shuffle=True, epochs=self.epochs,
verbose=False)
def compile(self, optimizer, metrics=[]):
"""
# Argument
optimizer (object) : [0] = actor optimizer, [1] = critic optimizer
metrics (Tensor) : [0] = Keras Tensor as an advantage , [1] = Keras Tensor as an old_prediction
# Return
None
"""
# Compile actor model
advantage = metrics[0]
old_prediction = metrics[1]
if self.continuous:
self.actor.compile(optimizer=optimizer[0],
loss=[self.proximal_policy_optimization_loss_continuous(
advantage=advantage, old_prediction=old_prediction)])
else:
self.actor.compile(optimizer=optimizer[0],
loss=[self.proximal_policy_optimization_loss(
advantage=advantage, old_prediction=old_prediction)])
self.actor.summary()
#Compile critic model
self.critic.compile(optimizer=optimizer[1], loss='mse')
self.compiled=True
def proximal_policy_optimization_loss(self, advantage, old_prediction):
def loss(y_true, y_pred):
prob = y_true * y_pred
old_prob = y_true * old_prediction
r = prob / (old_prob + 1e-10)
return -K.mean(K.minimum(r * advantage, K.clip(r, min_value=1 - self.loss_clipping,
max_value=1 + self.loss_clipping) * advantage) + self.entropy_loss * (
prob * K.log(prob + 1e-10)))
return loss
def proximal_policy_optimization_loss_continuous(self, advantage, old_prediction):
def loss(y_true, y_pred):
var = K.square(self.noise)
pi = math.pi
denom = K.sqrt(2 * pi * var)
prob_num = K.exp(- K.square(y_true - y_pred) / (2 * var))
old_prob_num = K.exp(- K.square(y_true - old_prediction) / (2 * var))
prob = prob_num / denom
old_prob = old_prob_num / denom
r = prob / (old_prob + 1e-10)
return -K.mean(K.minimum(r * advantage,
K.clip(r, min_value=1 - self.loss_clipping,
max_value=1 + self.loss_clipping) * advantage))
return loss
def load_weights(self, file_path, filename):
self.actor.load_weights(file_path[0])
self.critic.load_weights(file_path[1])
def save_weights(self, filepath, filename=None, overwrite=False):
algorithm_critic_name = '_critic_'
algorithm_actor_name = '_actor_'
critic_filepath = filepath + os.path.sep + filename + algorithm_critic_name + yyyymmdd24hhmmss() + '.' + 'h5f'
actor_filepath = filepath + os.path.sep + filename + algorithm_actor_name + yyyymmdd24hhmmss() + '.' + 'h5f'
self.critic.save_weights(critic_filepath, overwrite)
print('{} file is saved as a critic model for evaluation'.format(critic_filepath))
self.actor.save_weights(actor_filepath, overwrite)
print('{} file is saved as a actor model for evaluation'.format(actor_filepath))
return [actor_filepath, critic_filepath]
|
StarcoderdataPython
|
1799631
|
<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RESTCall class to store the REST API call definitions
"""
from __future__ import absolute_import
import pprint
import json
import bigml.api
from bigml.constants import RENAMED_RESOURCES
from bigml.resourcehandler import get_resource_type
INDENT = 4 * " "
def sort_lists(dict_structure):
"""Sorts the lists in a dict_structure
"""
if dict_structure is not None and isinstance(dict_structure, dict):
for key, value in dict_structure.items():
if value is not None:
if isinstance(value, list):
dict_structure[key] = sorted(value)
elif isinstance(value, dict):
sort_lists(dict_structure[key])
return dict_structure
def resource_alias(resource_id, alias):
"""Returns the alias if found
"""
if isinstance(resource_id, basestring):
return alias.get(resource_id, '"%s"' % resource_id)
elif isinstance(resource_id, list):
alias_names = []
for resource_id_id in resource_id:
alias_names.append(
alias.get(resource_id_id, '"%s"' % resource_id_id))
return repr(alias_names)
def to_whizzml(args):
"""Transforming dict syntax to whizzml argument syntax
"""
whizzml_args = []
if isinstance(args, list):
return "[%s]" % " ".join([to_whizzml(arg) for arg in args])
if isinstance(args, dict):
for arg, value in args.items():
whizzml_args.append("%s %s" % (json.dumps(arg), to_whizzml(value)))
return "{%s}" % " ".join(whizzml_args)
return json.dumps(args)
class RESTCall(object):
"""Object to store the REST call definition
"""
def __init__(self, action, origins=None, args=None,
resource_id=None, resource_type=None, suffix=None, name=None):
"""Constructor for the REST call definition
resource_id: ID for the generated resource
action: ["create" | "update" | "get"]
origins: list of resouce IDs for the origin resources
args: dict for the rest of arguments
"""
self.action = action
self.origins = [resource_id] if origins is None and \
action == "update" else origins
self.args = args or {}
input_data = self.args.get("input_data")
if input_data:
del self.args["input_data"]
self.input_data = input_data
self.resource_id = resource_id
self.resource_type = resource_type
self.suffix = suffix
if resource_id and not resource_type:
self.resource_type = bigml.api.get_resource_type(self.resource_id)
else:
self.resource_type = resource_type
self.name = name
def reify(self, language=None, alias=None):
"""REST call command line
language: computing language to write the output in
alias: list of aliases for the related resources (e.g
{"source/55c4de8d1fa89c2dc70012d0": "source1"}
will cause the references to
"source/55c4de8d1fa89c2dc70012d0" to be renamed
as "source1")
"""
out = ""
if not language:
out += "\n\n"
out += "resource ID: %s\n" % self.resource_id
out += "action: %s\n" % self.action
out += "origins: %s\n" % self.origins
out += "args: %s\n" % self.args
out += "input data: %s\n" % self.input_data
return out
else:
try:
reify_handler = getattr(self, "reify_%s" % language)
except AttributeError:
reify_handler = self.reify
return reify_handler(
alias=alias)
def reify_python(self, alias=None):
"""REST call command line in python. See ``reify`` method.
"""
resource_type = get_resource_type(self.resource_id)
resource_name = resource_alias(self.resource_id, alias)
resource_method_suffix = RENAMED_RESOURCES.get(
resource_type, resource_type)
origin_names = [resource_alias(resource_id, alias) for resource_id
in self.origins]
arguments = ", ".join(origin_names)
if self.suffix:
arguments = "%s%s" % (arguments, self.suffix)
if self.input_data:
arguments = "%s, \\\n%s%s" % ( \
arguments, INDENT,
pprint.pformat(self.input_data).replace("\n", "\n%s" % INDENT))
if self.args:
sort_lists(self.args)
arguments = "%s, \\\n%s%s" % (arguments, \
INDENT, \
pprint.pformat(self.args).replace( \
"\n", "\n%s" % INDENT))
out = "%s = api.%s_%s(%s)\napi.ok(%s)\n\n" % (
resource_name,
self.action,
resource_method_suffix,
arguments,
resource_name)
return out
def reify_whizzml(self, alias=None):
""" Whizzml command for the REST call
"""
name = self.name if self.name else resource_alias( \
self.resource_id, alias)
args = self.origin_attributes()
arguments = to_whizzml(args)
for resource_id, name in alias.items():
arguments = arguments.replace('"%s"' % resource_id, name)
command = "(%s-and-wait-%s %s)" % ( \
self.action, self.resource_type, arguments)
return command
def origin_attributes(self):
""" Add as arguments the origin attributes
"""
args = {}
args.update(self.args)
origin_types = []
if isinstance(self.origins, basestring):
origin_types.append(bigml.api.get_resource_type(self.origins))
else:
for origin in self.origins:
origin_types.append(bigml.api.get_resource_type(origin))
if self.resource_type == "source":
args.update({"remote": self.origins[0]})
return args
if self.resource_type == "dataset" and origin_types[0] == "source":
args.update({"source": self.origins[0]})
return args
if self.resource_type == "dataset" and origin_types[0] == "cluster":
args.update({"cluster": self.origins[0]})
return args
if (self.resource_type == "dataset" and len(origin_types) < 2 and
origin_types[0] == "dataset"):
args.update({"origin_dataset": self.origins[0]})
return args
if self.resource_type == "dataset":
args.update({"origin_datasets": self.origins})
return args
if self.resource_type == "model" and origin_types[0] == "cluster":
args.update({"cluster": self.origins[0]})
return args
if (self.resource_type == "model" and len(origin_types) < 2 and
origin_types[0] == "dataset"):
args.update({"dataset": self.origins[0]})
return args
if self.resource_type == "model":
args.update({"datasets": self.origins})
return args
if (self.resource_type == "ensemble" and len(origin_types) < 2 and
origin_types[0] == "dataset"):
args.update({"dataset": self.origins[0]})
return args
if self.resource_type == "ensemble":
args.update({"datasets": self.origins})
return args
if (self.resource_type == "cluster" and len(origin_types) < 2 and
origin_types[0] == "dataset"):
args.update({"dataset": self.origins[0]})
return args
if self.resource_type == "cluster":
args.update({"datasets": self.origins})
return args
if (self.resource_type == "anomaly" and len(origin_types) < 2 and
origin_types[0] == "dataset"):
args.update({"dataset": self.origins[0]})
return args
if self.resource_type == "anomaly":
args.update({"datasets": self.origins})
return args
if self.resource_type == "evaluation":
args.update({origin_types[0]: self.origins[0],
"dataset": self.origins[1]})
return args
if (self.resource_type == "correlation" and len(origin_types) < 2 and
origin_types[0] == "dataset"):
args.update({"dataset": self.origins[0]})
return args
if (self.resource_type == "statisticaltest" and
len(origin_types) < 2 and
origin_types[0] == "dataset"):
args.update({"dataset": self.origins[0]})
return args
if self.resource_type == "batchprediction":
args.update({origin_types[0]: self.origins[0],
"dataset": self.origins[1]})
return args
if self.resource_type == "batchcentroid":
args.update({origin_types[0]: self.origins[0],
"dataset": self.origins[1]})
return args
if self.resource_type == "batchanomalyscore":
args.update({origin_types[0]: self.origins[0],
"dataset": self.origins[1]})
return args
if self.resource_type == "prediction":
args.update({origin_types[0]: self.origins[0],
"input_data": self.input_data})
return args
if self.resource_type == "centroid":
args.update({origin_types[0]: self.origins[0],
"input_data": self.input_data})
return args
if self.resource_type == "anomalyscore":
args.update({origin_types[0]: self.origins[0],
"input_data": self.input_data})
return args
|
StarcoderdataPython
|
4833156
|
<gh_stars>0
# --------------------------------------------------------------------------
#
# MyCSPath
# mycspath.py
# author: <NAME>
#
# --------------------------------------------------------------------------
from flask import Flask, request, make_response, redirect, url_for
from flask import render_template, session
from database import Database, Profiles, Paths
from CASClient import CASClient
app = Flask(__name__, template_folder='.')
# generated by os.urandom(24)
SECRET_KEY = os.urandom(24)
app.secret_key = SECRET_KEY
# connect to database
database = Database()
profiles = Profiles()
paths = Paths()
#db_tags = Tags()
#db_langs = Languages()
# TESTING
# database = Database()
# results = session.query(database)
# tags = []
# tags.append("design")
# tags.append("systems")
# filtered_results = database.filter_tags(tags)
# results = []
# prerequisites = []
# for result in filtered_results:
# results.append(result)
# for result in filtered_results:
# prerequisites.append(result.prerequisites)
# unordered_path = database.make_bundles(results, prerequisites)
# for bundle in unordered_path:
# print(bundle.course)
# print(bundle.prereqs)
@app.route('/')
@app.route('/index')
def landing():
html = render_template('index.html')
response = make_response(html)
return response
@app.route('/home', methods=['GET', 'POST'])
def home():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
#netid = "test"
# get list of tags from db
""" results = db_tags.setup()
tags = []
for result in results:
tags.append(result.tag)
tags.remove('applications')
tags.remove('systems')
tags.remove('theory')
tags.sort()
# create columns
num_tags = len(tags)
col_len = int(num_tags/3)
i = 0
tags1 = []
tags2 = []
tags3 = []
while i < num_tags:
j = 0
while j < col_len:
tags1.append(tags[i])
j+=1
i+=1
j = 0
while j < col_len:
tags2.append(tags[i])
j+=1
i+=1
j = 0
while j < (num_tags - (2*col_len)):
tags3.append(tags[i])
j+=1
i+=1
# get list of languages from db
results = db_langs.setup()
langs = []
for result in results:
langs.append(result.lang)
# create columns
num_langs = len(langs)
col_len = int(num_langs/2)
i = 0
langs1 = []
langs2 = []
while i < num_langs:
j = 0
while j < col_len:
langs1.append(langs[i])
j+=1
i+=1
j = 0
while j < (num_langs - col_len):
langs2.append(langs[i])
j+=1
i+=1 """
tags1 = ['3D modeling', 'GUI programming', 'NLP', 'NP-completeness', 'animation', 'architecture', 'art', 'artificial intelligence', 'assembly language', 'astronomy', 'astrophysics', 'bioengineering', 'biology', 'business', 'chemistry', 'compilers', 'computation', 'computer architecture', 'computer vision', 'cryptocurrencies and blockchains', 'cryptography', 'data science/analysis', 'data structures']
tags2 = ['database programming', 'deep learning', 'design', 'distributed systems', 'ecology', 'economics', 'electrical engineering', 'energy', 'environmental systems', 'finance', 'functional programming', 'general programming', 'geometry', 'graphics', 'healthcare', 'image processing', 'intellectual property', 'language', 'linear algebra', 'machine language', 'machine learning', 'mathematics', 'mechanical & aerospace engineering']
tags3 = ['music', 'nanofabrication', 'network programming', 'networking', 'neuroscience', 'neural networks', 'operating systems', 'optics', 'optimization', 'physics', 'politics', 'probability', 'processors', 'psychology', 'public policy', 'quantitative modeling', 'quantum computing', 'robotics', 'security', 'server design', 'startups', 'statistics', 'system design', 'translation', 'web programming']
langs1 = ['AJAX', 'AMPL', 'C', 'C++', 'ChucK', 'CSS', 'EES', 'Flask', 'Go', 'HTML', 'Java', 'JavaScript', 'Jinja2', 'Julia']
langs2 = ['Machine Language', 'Mathematica', 'MATLAB', 'Max/MSP', 'OCalm', 'OpenFst', 'PHP', 'Python', 'R', 'SQL', 'Stata', 'Verilog', 'WEKA', 'Wolfram Language']
html = render_template('home.html', netid=netid, tags1=tags1, tags2=tags2, tags3=tags3, langs1=langs1, langs2=langs2)
response = make_response(html)
return response
@app.route('/courseinfo')
def courseinfo():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
results = database.setup()
html = render_template('courseinfo.html', results=results)
response = make_response(html)
return response
@app.route('/results', methods=['GET', 'POST'])
def results():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
if request.method == 'POST':
langs = request.form.getlist('lang')
tags = request.form.getlist('tag')
if not (len(langs) or len(tags)):
results = database.get_all()
total = 0
msg = "Looks like you didn't select any filters, so here is our complete list of courses."
else:
results_tags = database.filter_tags(tags)
results_langs = database.filter_langs(langs)
results = database.merge_results(results_langs, results_tags)
msg = "The following courses matched at least one of your selections!"
html = render_template('results.html', results=results, msg=msg)
response = make_response(html)
return response
@app.route('/path', methods=['GET', 'POST'])
def path():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
if request.method == 'POST':
courses = request.form.getlist('courses')
prereqs = request.form.getlist('prereqs')
for course in courses:
print(course)
for pq in prereqs:
print(pq)
result_list = database.make_result_list(courses)
for result in result_list:
print(result.course)
bundles = database.make_bundles(result_list, prereqs)
for bundle in bundles:
print(bundle.course)
print(bundle.prereqs)
new_bundles = database.remove_duplicate_prereqs(bundles)
for bundle in new_bundles:
print(bundle.course)
print("------------")
print(bundle.prereqs)
print("\n")
more_bundles = database.remove_duplicate_courses(new_bundles)
for bundle in more_bundles:
print(bundle.course)
print("------------")
print(bundle.prereqs)
print("\n")
html = render_template('path.html', bundles=bundles)
response = make_response(html)
return response
@app.route('/about')
def about():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
html = render_template('about.html')
response = make_response(html)
return response
@app.route('/tutorial')
def tutorial():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
html = render_template('tutorial.html')
response = make_response(html)
return response
@app.route('/saved', methods=['GET', 'POST'])
def saved():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
#netid = 'test'
# add new path to db
if request.method == 'POST':
path = request.form.getlist('dragged')
title = request.form.get('title')
print(title)
for item in path:
print(item)
print("here")
paths.add_to_dict(netid, title, path)
# render saved paths on page
result = paths.get_row(netid)
current_dict = {}
if result is not None:
current_dict = result.paths
for title, path in current_dict.items():
print(title + ":")
print("------------")
for course in path:
print(course)
html = render_template('saved.html', current_dict=current_dict)
response = make_response(html)
return response
@app.route('/delete_path', methods=['GET', 'POST'])
def delete_path():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
#netid = 'test'
if request.method == 'POST':
# get title of path to remove
title = request.form.get('title')
print("title: " + title)
# get row in db matching current netid
row = paths.get_row(netid)
current_dict = row.paths
# remove
paths.remove_row(row)
print("row removed")
# update
removed = current_dict.pop(title, None)
print(removed)
paths.add_row(netid, current_dict)
return redirect('/saved')
@app.route('/profile')
def profile():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
#netid = "test"
if not profiles.check_exists(netid):
html = render_template('profile.html')
response = make_response(html)
return response
else:
profile = profiles.get_row(netid)
html = render_template('profile_saved.html', profile=profile)
response = make_response(html)
return response
@app.route('/profile_saved', methods=['GET', 'POST'])
def profile_saved():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
#netid = "test"
if request.method == 'POST':
btn_type = request.form.get('btn_type')
if btn_type == 'Save':
# get fields from edit form
name = request.form.get('Name')
print(name)
class_yr = request.form.get('Class Year')
degree = request.form.get('Degree')
concentration = request.form.get('Concentration')
goals = request.form.get('Goals')
# remove existing row
if profiles.check_exists(netid):
profile = profiles.get_row(netid)
profiles.remove_row(profile)
# add updated row
profiles.add_profile(netid, name, class_yr, degree, concentration, goals)
# get new row
profile = profiles.get_row(netid)
html = render_template('profile_saved.html', profile=profile)
response = make_response(html)
return response
@app.route('/edit_profile', methods=['GET', 'POST'])
def edit_profile():
casauth = CASClient()
netid = casauth.authenticate().rstrip()
#netid = "test"
if request.method == 'POST':
profile = profiles.get_row(netid)
html = render_template('edit_profile.html', profile=profile)
response = make_response(html)
return response
# from https://stackabuse.com/deploying-a-flask-application-to-heroku/
if __name__ == '__main__':
app.run()
|
StarcoderdataPython
|
1722213
|
"""Kick off for training A3C agent training"""
import argparse
import datetime
from stable_baselines.common.policies import MlpPolicy, FeedForwardPolicy
from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv
from stable_baselines import PPO2
from loveletter.env import LoveLetterEnv
from loveletter.arena import Arena
from loveletter.agents.random import AgentRandom
from loveletter.agents.tf_agent import TFAgent
from loveletter.agents.agent import Agent
from loveletter.trainers.a3c_model import ActorCritic
from loveletter.trainers.a3c_train import train
from loveletter.trainers.a3c_test import test
parser = argparse.ArgumentParser(description='RL for Love Letter')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 0.0001)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
parser.add_argument('--num-steps', type=int, default=20, metavar='NS',
help='number of forward steps for update (default: 20)')
parser.add_argument('--total-steps', type=int, default=1e6, metavar='NS',
help='number of total steps (default: 1M)')
parser.add_argument('--save-name', metavar='FN', default='default_model',
help='path/prefix for the filename to save shared model\'s parameters')
parser.add_argument('--load-name', default=None, metavar='SN',
help='path/prefix for the filename to load shared model\'s parameters')
parser.add_argument('--log-dir', default="./tensorboard/", metavar='path',
help='path to the tensorboard log directory')
class CustomPolicy(FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(CustomPolicy, self).__init__(*args, **kwargs,
net_arch=[512, dict(pi=[256, 128],
vf=[256, 128])],
feature_extraction="mlp")
if __name__ == '__main__':
args = parser.parse_args()
if args.load_name:
env = SubprocVecEnv([lambda: LoveLetterEnv(TFAgent(args.load_name, args.seed + i))
for i in range(args.num_processes)])
else:
env = SubprocVecEnv([lambda: LoveLetterEnv(AgentRandom(args.seed + i))
for i in range(args.num_processes)])
model = PPO2(CustomPolicy,
env,
verbose=0,
tensorboard_log=args.log_dir,
learning_rate=args.lr,
n_steps=args.num_steps,
nminibatches=5)
if args.load_name:
model.load(args.load_name)
model.learn(total_timesteps=int(args.total_steps),
callback=None,
log_interval=1000,
tb_log_name='PPO2 %s' % datetime.datetime.now().strftime('%H-%M-%S'))
model.save(args.save_name)
|
StarcoderdataPython
|
37182
|
<gh_stars>1-10
import math
from pomagma.compiler.expressions import Expression_1
from pomagma.compiler.util import log_sum_exp, memoize_make, set_with
def assert_in(element, set_):
assert element in set_, (element, set_)
def assert_not_in(element, set_):
assert element not in set_, (element, set_)
def assert_subset(subset, set_):
assert subset <= set_, (subset, set_)
OBJECT_COUNT = 1e4 # optimize for this many obs
LOGIC_COST = OBJECT_COUNT / 64.0 # perform logic on 64-bit words
LOG_OBJECT_COUNT = math.log(OBJECT_COUNT)
UNKNOWN = Expression_1('UNKNOWN')
def add_costs(costs):
return (log_sum_exp(*(LOG_OBJECT_COUNT * c for c in costs)) /
LOG_OBJECT_COUNT)
class Plan(object):
__slots__ = ['_args', '_cost', '_rank']
def __init__(self, *args):
self._args = args
self._cost = None
self._rank = None
@property
def cost(self):
if self._cost is None:
self._cost = math.log(self.op_count()) / LOG_OBJECT_COUNT
return self._cost
@property
def rank(self):
if self._rank is None:
s = repr(self)
self._rank = self.cost, len(s), s
return self._rank
def __lt__(self, other):
return self.rank < other.rank
def permute_symbols(self, perm):
return self.__class__.make(*(
a.permute_symbols(perm)
for a in self._args
))
@memoize_make
class Iter(Plan):
__slots__ = ['_repr', 'var', 'body', 'tests', 'lets', 'stack']
def __init__(self, var, body):
Plan.__init__(self, var, body)
assert var.is_var(), var
assert isinstance(body, Plan), body
self._repr = None
self.var = var
self.body = body
self.tests = []
self.lets = {}
self.stack = set()
self.optimize()
def add_test(self, test):
assert isinstance(test, Test), 'add_test arg is not a Test'
self.tests.append(test.expr)
self.stack.add(test)
def add_let(self, let):
assert isinstance(let, Let), 'add_let arg is not a Let'
assert let.var not in self.lets, 'add_let var is not in Iter.lets'
self.lets[let.var] = let.expr
self.stack.add(let)
def __repr__(self):
if self._repr is None:
# Optimized:
# tests = ['if {}'.format(t) for t in self.tests]
# lets = ['let {}'.format(l) for l in sorted(self.lets.iterkeys())]
# self._repr = 'for {0}: {1}'.format(
# ' '.join([str(self.var)] + tests + lets),
# self.body)
self._repr = 'for {}: {}'.format(self.var, self.body)
return self._repr
def validate(self, bound):
assert_not_in(self.var, bound)
bound = set_with(bound, self.var)
for test in self.tests:
assert_subset(test.vars, bound)
for var, expr in self.lets.iteritems():
assert_subset(expr.vars, bound)
assert_not_in(var, bound)
self.body.validate(bound)
def op_count(self, stack=None):
logic_cost = LOGIC_COST * (len(self.tests) + len(self.lets))
object_count = OBJECT_COUNT
for test_or_let in self.stack:
object_count *= test_or_let.prob()
let_cost = len(self.lets)
body_cost = self.body.op_count(stack=self.stack)
return logic_cost + object_count * (let_cost + body_cost)
def optimize(self):
node = self.body
new_lets = set()
while isinstance(node, Test) or isinstance(node, Let):
if isinstance(node, Let):
new_lets.add(node.var)
expr = node.expr
while expr.name == 'UNKNOWN':
expr = expr.args[0]
optimizable = (
self.var in expr.vars and
expr.vars.isdisjoint(new_lets) and
sum(1 for arg in expr.args if self.var == arg) == 1 and
sum(1 for arg in expr.args if self.var in arg.vars) == 1 and
(isinstance(node, Let) or expr.is_rel())
)
if optimizable:
if isinstance(node, Test):
self.add_test(node)
else:
self.add_let(node)
node = node.body
# TODO injective function inverse need not be iterated
@memoize_make
class IterInvInjective(Plan):
__slots__ = ['fun', 'value', 'var', 'body']
def __init__(self, fun, body):
Plan.__init__(self, fun, body)
assert fun.arity == 'InjectiveFunction'
self.fun = fun.name
self.value = fun.var
(self.var,) = fun.args
self.body = body
def __repr__(self):
return 'for {0} {1}: {2}'.format(self.fun, self.var, self.body)
def validate(self, bound):
assert_in(self.value, bound)
assert_not_in(self.var, bound)
self.body.validate(set_with(bound, self.var))
def op_count(self, stack=None):
return 4.0 + 0.5 * self.body.op_count() # amortized
@memoize_make
class IterInvBinary(Plan):
__slots__ = ['fun', 'value', 'var1', 'var2', 'body']
def __init__(self, fun, body):
Plan.__init__(self, fun, body)
assert fun.arity in ['BinaryFunction', 'SymmetricFunction']
self.fun = fun.name
self.value = fun.var
self.var1, self.var2 = fun.args
self.body = body
def __repr__(self):
return 'for {0} {1} {2}: {3}'.format(
self.fun, self.var1, self.var2, self.body)
def validate(self, bound):
assert_in(self.value, bound)
assert_not_in(self.var1, bound)
assert_not_in(self.var2, bound)
self.body.validate(set_with(bound, self.var1, self.var2))
def op_count(self, stack=None):
return 4.0 + 0.25 * OBJECT_COUNT * self.body.op_count() # amortized
@memoize_make
class IterInvBinaryRange(Plan):
__slots__ = ['fun', 'value', 'var1', 'var2', 'lhs_fixed', 'body']
def __init__(self, fun, fixed, body):
Plan.__init__(self, fun, fixed, body)
assert fun.arity in ['BinaryFunction', 'SymmetricFunction']
self.fun = fun.name
self.value = fun.var
self.var1, self.var2 = fun.args
assert self.var1 != self.var2
assert self.var1 == fixed or self.var2 == fixed
self.lhs_fixed = (fixed == self.var1)
self.body = body
def __repr__(self):
if self.lhs_fixed:
return 'for {0} ({1}) {2}: {3}'.format(
self.fun, self.var1, self.var2, self.body)
else:
return 'for {0} {1} ({2}): {3}'.format(
self.fun, self.var1, self.var2, self.body)
def validate(self, bound):
assert self.value in bound
if self.lhs_fixed:
assert_in(self.var1, bound)
assert_not_in(self.var2, bound)
self.body.validate(set_with(bound, self.var2))
else:
assert_in(self.var2, bound)
assert_not_in(self.var1, bound)
self.body.validate(set_with(bound, self.var1))
def op_count(self, stack=None):
return 4.0 + 0.5 * self.body.op_count() # amortized
@memoize_make
class Let(Plan):
__slots__ = ['var', 'expr', 'body']
def __init__(self, expr, body):
Plan.__init__(self, expr, body)
assert isinstance(body, Plan)
assert expr.is_fun()
self.var = expr.var
self.expr = expr
self.body = body
def __repr__(self):
return 'let {0}: {1}'.format(self.var, self.body)
def validate(self, bound):
assert_subset(self.expr.vars, bound)
assert_not_in(self.var, bound)
self.body.validate(set_with(bound, self.var))
__probs = {'NullaryFunction': 0.9}
def prob(self):
return self.__probs.get(self.expr.arity, 0.1)
def op_count(self, stack=None):
if stack and self in stack:
return self.body.op_count(stack=stack)
else:
return 1.0 + self.prob() * self.body.op_count(stack=stack)
@memoize_make
class Test(Plan):
__slots__ = ['expr', 'body']
def __init__(self, expr, body):
Plan.__init__(self, expr, body)
assert not expr.is_var()
assert isinstance(body, Plan)
self.expr = expr
self.body = body
def __repr__(self):
return 'if {0}: {1}'.format(self.expr, self.body)
def validate(self, bound):
assert_subset(self.expr.vars, bound)
self.body.validate(bound)
__probs = {'NLESS': 0.9}
def prob(self):
return self.__probs.get(self.expr.name, 0.1)
def op_count(self, stack=None):
if stack and self in stack:
return self.body.op_count(stack=stack)
else:
return 1.0 + self.prob() * self.body.op_count(stack=stack)
@memoize_make
class Ensure(Plan):
__slots__ = ['expr']
def __init__(self, expr):
Plan.__init__(self, expr)
assert expr.args, ('expr is not compound', expr)
self.expr = expr
def __repr__(self):
return 'ensure {0}'.format(self.expr)
def validate(self, bound):
assert_subset(self.expr.vars, bound)
def op_count(self, stack=None):
fun_count = 0
if self.expr.name == 'EQUATION':
for arg in self.expr.args:
if arg.is_fun():
fun_count += 1
return [1.0, 1.0 + 0.5 * 1.0, 2.0 + 0.75 * 1.0][fun_count]
|
StarcoderdataPython
|
4815384
|
import psycopg2
from app import app
from pgdatabase import PgDatabase
@app.route('/')
@app.route('/index')
def index():
return "This is the Index Page"
@app.route('/master')
def mastercheck():
try:
db = PgDatabase()
my_list = db.query("SHOW transaction_read_only")
my_string = ''.join(my_list)
if my_string=='off':
ret_string='MASTER'
else:
ret_string='READONLY'
db.close()
return ret_string
except Exception as e:
print e
return []
@app.route('/slave')
def slavecheck():
try:
db = PgDatabase()
my_list = db.query("SELECT pg_is_in_recovery()::text")
my_string = ''.join(my_list)
if my_string=='true':
ret_string='SLAVE'
else:
ret_string='NOTSLAVE'
db.close()
return ret_string
except Exception as e:
print e
return []
@app.route('/status')
def statuscheck():
masterstat = mastercheck()
slavestat = slavecheck()
initalstat = 'UNKNOWN'
if masterstat == 'MASTER' and slavestat == 'NOTSLAVE':
return masterstat
elif masterstat == 'READONLY' and slavestat == 'SLAVE':
return slavestat
else:
return initialstat
|
StarcoderdataPython
|
1787778
|
<filename>thelma/tools/parsers/rackscanning.py
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
.. currentmodule:: thelma.entities.rack
General
.......
This parser deals with rack scanning output files. These file contain the tube
barcodes for each position of a 96-well tube rack in TXT format.
Composition of Source Files
...........................
* The first line contains the marker \'Date & time of Trace = \' plus
a time stamp in format %d %b %Y %I:%M:%S %p (for key see
`Timestamp Parsing Key`_). *Example:* ::
Date & time of Trace = 23 Aug 2012 01:42:01 PM
* The second line contains the marker \'Rack Base Name:\' plus the
8-digit rack barcode of the scanned rack (always starting with \'0\',
see also: :attr:`RACK_BARCODE_REGEXP`). *Example:* ::
Rack Base Name: 02498606
* The following list the rack positions and the scanned barcodes. Each line
contains one data pair, the values are separated by \';\'.
The first element of the line is the rack position label (with a two-digit
column number, see also :attr:`RACK_POSITION_REGEXP`). The second element
is either the tube barcode or the marker \'No TrakMate\' if the position
is empty. *Example:* ::
A01; 1034998087
A02; No TrakMate
Timestamp Parsing Key
.....................
According to `http://docs.python.org/library/datetime.html` (23rd Aug 2012):
%b - Locale's abbreviated month name.
%s - Day of the month as a decimal number [1, 31].
%I - Hour (12-hour clock) as a decimal number [01,12].
%M - Minute as a decimal number [00,59].
%S - Second as a decimal number [00,61].
%p - Locale's equivalent of either AM or PM.
%Y - Year with century as a decimal number.
AAB
"""
from datetime import datetime
from thelma.tools.parsers.base import TxtFileParser
from thelma.utils import as_utc_time
__docformat__ = "reStructuredText en"
__all__ = ['RackScanningParser']
class RackScanningParser(TxtFileParser):
"""
Parses a rack scanning output file.
"""
NAME = 'Rack Scanning Output File Parser'
#: Marks the time stamp line.
TIMESTAMP_MARKER = 'Date & time of Trace ='
#: Marks the rack barcode line.
RACK_BARCODE_MARKER = 'Rack Base Name:'
#: The format of the timestamp.
TIMESTAMP_FORMAT = '%d %b %Y %I:%M:%S %p'
#: Placeholder that is used if there is no tube at a position.
NO_TUBE_PLACEHOLDER = 'No TrakMate'
#: The line break character used.
LINEBREAK_CHAR = '\r\n'
#: The character used to separate the values of a barcode position line.
SEPARATOR = ';'
def __init__(self, stream, parent=None):
TxtFileParser.__init__(self, stream, parent=parent)
#: The timestamp parsed from the file.
self.timestamp = None
#: The barcode of the rack to parse.
self.rack_barcode = None
#: The tube barcode (or None) for each label found (labels are
#: validated before storage).
self.position_map = None
def reset(self):
"""
Reset all parser values except for initialisation values.
"""
TxtFileParser.reset(self)
self.timestamp = None
self.rack_barcode = None
self.position_map = dict()
def run(self):
"""
Runs the parser.
"""
self.reset()
self.add_info('Start parsing ...')
self.has_run = True
self._split_into_lines()
if not self.has_errors(): self.__parse_timestamp()
if not self.has_errors(): self.__parse_rack_barcode()
if not self.has_errors(): self.__parse_position_data()
if not self.has_errors(): self.add_info('Parsing completed.')
def __parse_timestamp(self):
"""
Parses the timestamp.
"""
self.add_debug('Parse timestamp ...')
for line in self._lines:
if self.TIMESTAMP_MARKER in line:
datestr = line.split(self.TIMESTAMP_MARKER)[1].strip()
try:
self.timestamp = \
as_utc_time(datetime.strptime(datestr,
self.TIMESTAMP_FORMAT))
except ValueError as errmsg:
self.add_error(errmsg)
break
if self.timestamp is None:
msg = 'Unable do find time stamp!'
self.add_error(msg)
def __parse_rack_barcode(self):
"""
Parses the rack barcode.
"""
self.add_debug('Parse rack barcode ...')
for line in self._lines:
if self.RACK_BARCODE_MARKER in line:
self.rack_barcode = line.split(self.RACK_BARCODE_MARKER)[1].\
strip()
break
if self.rack_barcode is None:
msg = 'Unable to find rack barcode!'
self.add_error(msg)
def __parse_position_data(self):
"""
Parses the position data.
"""
self.add_debug('Parse position data ...')
for i in range(len(self._lines)):
if self.has_errors(): break
line = self._lines[i]
if len(line) < 1: continue
if self.TIMESTAMP_MARKER in line: continue
if self.RACK_BARCODE_MARKER in line: continue
msg = 'Unexpected content in line %i: %s' % (i + 1, line)
if not self.SEPARATOR in line: self.add_error(msg)
tokens = line.split(self.SEPARATOR)
if not len(tokens) == 2: self.add_error(msg)
if self.has_errors(): continue
pos_label = tokens[0].strip()
if self.position_map.has_key(pos_label):
msg = 'Duplicate position label "%s"' % (pos_label)
self.add_error(msg)
if self.has_errors(): continue
tube_barcode = tokens[1].strip()
if tube_barcode == self.NO_TUBE_PLACEHOLDER: tube_barcode = None
self.position_map[pos_label] = tube_barcode
|
StarcoderdataPython
|
1751872
|
import pytest
@pytest.mark.bare
def test_import():
from pytsammalex import clld
from pytsammalex import lexibank
assert clld and lexibank
|
StarcoderdataPython
|
141062
|
def rpn_eval(tokens):
def op(symbol, a, b):
return {
'+': lambda a, b: a + b,
'-': lambda a, b: a - b,
'*': lambda a, b: a * b,
'/': lambda a, b: a / b
}[symbol](a, b)
stack = []
for token in tokens:
if isinstance(token, float):
stack.append(token)
else:
a = stack.pop()
b = stack.pop()
stack.append(
op(token, a, b)
)
return stack.pop()
"""
Reverse Polish Notation
Four-function calculator with input given in Reverse Polish Notation (RPN).
Input:
A list of values and operators encoded as floats and strings
Precondition:
all(
isinstance(token, float) or token in ('+', '-', '*', '/') for token in tokens
)
Example:
>>> rpn_eval([3.0, 5.0, '+', 2.0, '/'])
4.0
"""
|
StarcoderdataPython
|
3065
|
from setuptools import setup
setup(
name="greek-utils",
version="0.2",
description="various utilities for processing Ancient Greek",
license="MIT",
url="http://github.com/jtauber/greek-utils",
author="<NAME>",
author_email="<EMAIL>",
packages=["greekutils"],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Text Processing",
"Topic :: Text Processing :: Linguistic",
"Topic :: Utilities",
],
)
|
StarcoderdataPython
|
1763450
|
<reponame>MayborodaPavel/testPlatform
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse_lazy
class User(AbstractUser):
username = models.CharField(max_length=150, blank=True, null=True)
email = models.EmailField(unique=True, max_length=255)
dob = models.DateField(blank=True, null=True)
info = models.TextField(blank=True, null=True)
avatar = models.ImageField(blank=True, null=True, upload_to='avatars')
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __str__(self):
return self.email
def get_absolute_url(self):
return reverse_lazy('accounts:profile')
|
StarcoderdataPython
|
3270311
|
<filename>src/kdmukai/specterext/bitcoinreserve/client.py
import json
import logging
import requests
from decimal import Decimal
from flask import current_app as app
from werkzeug.wrappers import auth
from kdmukai.specterext.bitcoinreserve.service import BitcoinReserveService
logger = logging.getLogger(__name__)
class BitcoinReserveApiException(Exception):
pass
def authenticated_request(
endpoint: str, method: str = "GET", json_payload: dict = {}
) -> dict:
logger.debug(f"{method} endpoint: {endpoint}")
api_token = BitcoinReserveService.get_api_credentials().get("api_token")
# Must explicitly set User-Agent; Swan firewall blocks all requests with "python".
auth_header = {
"User-Agent": "Specter Desktop",
"Authorization": "Token " + api_token,
}
url = url=app.config.get("BITCOIN_RESERVE_API_URL") + endpoint
logger.debug(url)
logger.debug(auth_header)
try:
response = requests.request(
method=method,
url=url,
headers=auth_header,
json=json_payload,
)
if response.status_code != 200:
raise BitcoinReserveApiException(f"{response.status_code}: {response.text}")
print(json.dumps(response.json(), indent=4))
return response.json()
except Exception as e:
# TODO: tighten up expected Exceptions
logger.exception(e)
logger.error(
f"endpoint: {endpoint} | method: {method} | payload: {json.dumps(json_payload, indent=4)}"
)
logger.error(f"{response.status_code}: {response.text}")
raise e
"""
"User Balance": /user/balance/
EXAMPLE:
curl -X GET http://46.101.227.39/user/balance/
OUTPUT:
{
"balance_eur": "0.00000000"
}
"""
def get_fiat_balances():
return authenticated_request("/user/balance")
"""
"Create Quote": /user/order/quote/
EXAMPLE:
curl -d '{"fiat_currency":"EUR", "fiat_deliver_amount":"10000", "withdrawal_address":"bc1qjg53lww9jrm506dj0g0szmk4pxt6f55x8dncuv", "withdrawal_method":"ONCHAIN"}' -H "Content-Type: application/json" -X POST http://46.101.227.39/user/order/quote/
OUTPUT:
{
"quote_id": "9b787187-1fc2-475d-b495-0b9df5ed270e",
"bitcoin_receive_amount": 0.26516569,
"trade_fee_currency": "EUR",
"trade_fee_amount": 195.0,
"expiration_time_utc": 1641867612.480406
}
"""
def create_quote(
fiat_amount: Decimal, withdrawal_address: str, fiat_currency: str = "EUR"
):
return authenticated_request(
"/user/order/quote",
method="POST",
json_payload={
"fiat_currency": fiat_currency,
"fiat_deliver_amount": fiat_amount,
"withdrawal_address": withdrawal_address,
"withdrawal_method": "ONCHAIN",
},
)
"""
"Confirm Order": /user/order/confirm/
EXAMPLE:
curl -d '{"quote_id":"9b787187-1fc2-475d-b495-0b9df5ed270e"}' -H "Content-Type: application/json" -X POST http://46.101.227.39/user/order/confirm/
OUTPUT:
{
"order_id": "d9160a5a-e23f-4f76-9327-330e2afda736",
"order_status": "COMPLETE",
"bitcoin_receive_amount": 0.26516569,
"trade_fee_currency": "EUR",
"trade_fee_amount": 195.0,
"withdrawal_address": "bc1qjg53lww9jrm506dj0g0szmk4pxt6f55x8dncuv",
"withdrawal_status": "INITIATED",
"withdrawal_method": "ONCHAIN",
"withdrawal_fee": 0.0,
"withdrawal_eta": 1641953991.115419
}
"""
def confirm_order(quote_id: str):
return authenticated_request(
"/user/order/confirm", method="POST", json_payload={"quote_id": quote_id}
)
"""
"Order Status": /user/order/status/
EXAMPLE:
curl -d '{"order_id":"d9160a5a-e23f-4f76-9327-330e2afda736"}' -H "Content-Type: application/json" -X GET http://46.101.227.39/user/order/status/
OUTPUT:
{
"order_status": "COMPLETE",
"bitcoin_receive_amount": 0.26516569,
"quote_id": "9b787187-1fc2-475d-b495-0b9df5ed270e",
"trade_fee_currency": "EUR",
"trade_fee_amount": 195.0,
"withdrawals": {
"withdrawal_number": 0,
"withdrawal_status": "INITIATED",
"withdrawal_address": "bc1qjg53lww9jrm506dj0g0szmk4pxt6f55x8dncuv",
"withdrawal_method": "ONCHAIN",
"withdrawal_fee": 0.0,
"withdrawal_eta": 1641953991.115419,
"withdrawal_identifier": null
}
}
"""
def get_order_status(order_id: str):
return authenticated_request(
"/user/order/status", method="GET", json_payload={"order_id": order_id}
)
def get_transactions(page_num: int = 0) -> list:
"""
First entry is the summary data:
[
{
"total_transaction_count": 29,
"page": 0
},
{
"transaction_id": "1f88faf0-dfc4-410e-9163-7371f9aa9e30",
"transaction_status": "DONE",
"transaction_type": "WITHDRAWAL",
"transaction_time": "2022-01-18 05:28:35.068650",
"in_currency": null,
"in_amount": "None",
"out_currency": "SATS",
"out_amount": "28838.00000000"
},
{...},
]
"""
return authenticated_request(f"/api/user/transactions/{page_num}")
def get_transaction(transaction_id: str) -> dict:
"""
{
"transaction_type": "MARKET BUY",
"transaction_id": "31ffc3b6-9b9f-41db-ad6d-b636b69ae63d",
"transaction_status": "COMPLETE",
"sats_bought": "147810",
"fiat_spent": "50.00",
"fiat_currency": "EUR",
"withdrawals": {
"transaction_type": "WITHDRAWAL",
"transaction_id": "a9ab0eca-eb9a-4e6e-a692-8531356dd674",
"withdrawal_serial_number": 0,
"withdrawal_status": "DONE",
"withdrawal_address": "bc1qsst0m3pn9adnl68wuhd9h727eu09rnn0nqes2u",
"withdrawal_fee": "0",
"withdrawal_currency": "SATS",
"withdrawal_identifier": "ab4be723b1b11334fde4317c54fad91d583a1570958f78a486a3d4b4d32d7bc1"
}
}
"""
return authenticated_request(f"/api/user/transaction/{transaction_id}")
|
StarcoderdataPython
|
85571
|
<filename>python/testData/paramInfo/NoArgsException.py<gh_stars>0
def function(param, param1):
pass
def result():
pass
function(result<arg1>(), result())
|
StarcoderdataPython
|
36911
|
import logging
from parsl.monitoring.handler import DatabaseHandler
from parsl.monitoring.handler import RemoteHandler
from parsl.utils import RepresentationMixin
class NullHandler(logging.Handler):
"""Setup default logging to /dev/null since this is library."""
def emit(self, record):
pass
class MonitoringStore(RepresentationMixin):
def __init__(self,
host=None,
port=None,
logging_server_host='localhost',
logging_server_port=9595):
"""
Parameters
----------
host : str
The hostname for running the visualization interface.
port : int
The port for the visualization interface.
logging_server_host : str
The hostname for the logging server.
logging_server_port : int
The port for the logging server.
"""
self.host = host
self.port = port
self.logging_server_host = logging_server_host
self.logging_server_port = logging_server_port
class Database(MonitoringStore, RepresentationMixin):
def __init__(self,
connection_string=None, **kwargs):
""" Initializes a monitoring configuration class.
Parameters
----------
connection_string : str, optional
Database connection string that defines how to connect to the database. If not set, DFK init will use a sqlite3
database inside the rundir.
"""
super().__init__(**kwargs)
self.connection_string = connection_string
class VisualizationServer(RepresentationMixin):
def __init__(self,
host='http://localhost',
port=8899):
"""
Parameters
----------
host : str
The hostname for running the visualization interface.
port : int
The port for the visualization interface
"""
self.host = host
self.port = port
class Monitoring(RepresentationMixin):
""" This is a config class for monitoring. """
def __init__(self,
store=None,
visualization_server=None,
monitoring_interval=15,
workflow_name=None,
version='1.0.0'):
""" Initializes a monitoring configuration class.
Parameters
----------
monitoring_interval : float, optional
The amount of time in seconds to sleep in between resource monitoring logs per task.
workflow_name : str, optional
Name to record as the workflow base name, defaults to the name of the parsl script file if left as None.
version : str, optional
Optional workflow identification to distinguish between workflows with the same name, not used internally only for display to user.
Example
-------
.. code-block:: python
import parsl
from parsl.config import Config
from parsl.executors.threads import ThreadPoolExecutor
from parsl.monitoring.db_logger import MonitoringConfig
config = Config(
executors=[ThreadPoolExecutor()],
monitoring_config=MonitoringConfig(
MonitoringStore=DatabaseStore(
connection_string='sqlite///monitoring.db'
)
VisualizationInterface=VisualizationInterface(
host='http:localhost'
port='9999'
)
)
)
parsl.load(config)
"""
self.store = store
self.visualization_server = visualization_server
self.version = version
self.monitoring_interval = monitoring_interval
self.workflow_name = workflow_name
# for now just set this to none but can be used to present the dashboard location to user
self.dashboard_link = None
def get_parsl_logger(
logger_name='parsl_monitor_logger',
is_logging_server=False,
monitoring_config=None,
**kwargs):
"""
Parameters
----------
logger_name : str, optional
Name of the logger to use. Prevents adding repeat handlers or incorrect handlers
is_logging_server : Bool, optional
Used internally to determine which handler to return when using local db logging
monitoring_config : MonitoringConfig, optional
Pass in a logger class object to use for generating loggers.
Returns
-------
logging.logger object
Raises
------
OptionalModuleMissing
"""
logger = logging.getLogger(logger_name)
if monitoring_config is None:
logger.addHandler(NullHandler())
return logger
if monitoring_config.store is None:
raise ValueError('No MonitoringStore defined')
if is_logging_server:
# add a handler that will take logs being received on the server and log them to the store
handler = DatabaseHandler(monitoring_config.store.connection_string)
# use the specific name generated by the server or the monitor wrapper
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
else:
# add a handler that will pass logs to the logging server
handler = RemoteHandler(monitoring_config.store.logging_server_host, monitoring_config.store.logging_server_port)
# use the specific name generated by the server or the monitor wrapper
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
|
StarcoderdataPython
|
3384543
|
<gh_stars>0
from django.shortcuts import render
from .models import Coins
from rest_framework import viewsets
from rest_framework import permissions, generics
from .serializers import CoinsSerializer
# Create your views here.
class CoinViewSet(viewsets.ModelViewSet):
queryset = Coins.objects.all()
serializer_class = CoinsSerializer
class UpdateCoin(generics.UpdateAPIView):
queryset = Coins.objects.all()
serializer_class = CoinsSerializer
class GetCoins(generics.ListAPIView):
serializer_class = CoinsSerializer
def get_queryset(self):
username = self.kwargs['pk']
return Coins.objects.filter(username=username)
|
StarcoderdataPython
|
3213194
|
<gh_stars>1-10
import os
import json
import logging
import numpy
from osgeo import gdal
from ground_surveyor import gsconfig
def pick_best_pile_layer(pile_md_filename,
selection_options):
pile_md = json.load(open(pile_md_filename))
best_i = -1
best_value = 0
target_field = selection_options.get('order_field',
'normalized_sharpness')
cc_threshold = selection_options.get('cross_correlation_threshold',
None)
small_cc_threshold = selection_options.get(
'small_cross_correlation_threshold',
None)
zero_threshold = selection_options.get('zero_threshold',
None)
for i in range(len(pile_md['sharpness'])):
if cc_threshold is not None:
cc_raw = pile_md['cross_correlation_raw'][i]
if cc_raw < cc_threshold:
continue
if small_cc_threshold is not None:
small_cc = pile_md['cross_correlation_small'][i]
if small_cc < small_cc_threshold:
continue
if zero_threshold is not None and pile_md['n_pixel_at_zero_intensity'][i] > zero_threshold:
continue
if target_field == 'normalized_sharpness':
target_value = pile_md['sharpness'][i] \
/ pile_md['intensity_median'][i]
else:
target_value = pile_md[target_field]
if target_value > best_value:
best_value = target_value
best_i = i
# If nothing met the threshold, try again without the threshold.
if best_i == -1 and cc_threshold is not None:
for i in range(len(pile_md['sharpness'])):
if zero_threshold is not None and pile_md['n_pixel_at_zero_intensity'][i] > zero_threshold:
continue
if target_field == 'normalized_sharpness':
target_value = pile_md['sharpness'][i] \
/ pile_md['intensity_median'][i]
else:
target_value = pile_md[target_field]
if target_value > best_value:
best_value = target_value
best_i = i
logging.debug('Picked input metatile %d for pile %s with %s value of %s.',
best_i,
os.path.basename(pile_md_filename)[:15],
target_field, best_value)
return best_i
def get_pile_layer(pile_md_filename, i_file):
raw_filename = pile_md_filename.replace('_datacube_metadata.json',
'_raw.tif')
raw_ds = gdal.Open(raw_filename)
return raw_ds.GetRasterBand(i_file+1).ReadAsArray()
def merge_pile_into_mosaic(mosaic_ds,
pile_md_filename,
selected_i,
selected_img,
processing_options):
pile_parts = os.path.basename(pile_md_filename).split('_')[0:3]
assert pile_parts[0] == 'uf'
uf_i = int(pile_parts[1])
uf_j = int(pile_parts[2])
if processing_options.get('normalize_intensity',False):
pile_md = json.load(open(pile_md_filename))
selected_img = selected_img * 1000.0 \
/ pile_md['intensity_median'][selected_i]
mosaic_ds.GetRasterBand(1).WriteArray(
selected_img, uf_i * 256, uf_j * 256)
alpha_band = mosaic_ds.GetRasterBand(mosaic_ds.RasterCount)
if alpha_band.GetColorInterpretation() == gdal.GCI_AlphaBand:
if alpha_band.DataType == gdal.GDT_UInt16:
opaque = 65535
else:
opaque = 255
alpha_band.WriteArray(
numpy.ones(selected_img.shape) * opaque,
uf_i * 256, uf_j * 256)
def make_metatile(pile_directory):
mosaic_filename = os.path.join(pile_directory,'mosaic.tif')
mosaic_ds = gdal.GetDriverByName('GTiff').Create(
mosaic_filename, 4096, 4096, 1, gdal.GDT_UInt16)
# TODO: Try to add georeferencing...
return mosaic_filename, mosaic_ds
def mosaic_metatile(pile_directory,
selection_options,
processing_options={}):
mosaic_filename, mosaic_ds = make_metatile(pile_directory)
counter = 0
for filename in os.listdir(pile_directory):
if (not filename.startswith('uf_')) or (not filename.endswith('_metadata.json')):
continue
pile_md_filename = os.path.join(pile_directory, filename)
i_file = pick_best_pile_layer(pile_md_filename, selection_options)
if i_file >= 0:
selected_img = get_pile_layer(pile_md_filename, i_file)
merge_pile_into_mosaic(mosaic_ds, pile_md_filename,
i_file, selected_img,
processing_options)
counter += 1
logging.info('%d piles contributed to making %s.',
counter, mosaic_filename)
return mosaic_filename
|
StarcoderdataPython
|
3316182
|
import math
import numpy as np
from .gazebo_env import GazeboEnv
import logging
logger = logging.getLogger("gymfc")
class AttitudeFlightControlEnv(GazeboEnv):
def compute_reward(self):
""" Compute the reward """
return -np.clip(np.sum(np.abs(self.error))/(self.omega_bounds[1]*3), 0, 1)
def sample_target(self):
""" Sample a random angular velocity """
return self.np_random.uniform(self.omega_bounds[0], self.omega_bounds[1], size=3)
class GyroErrorFeedbackEnv(AttitudeFlightControlEnv):
def __init__(self, world="attitude-iris.world",
omega_bounds = [-math.pi, math.pi],
max_sim_time = 1.,
motor_count = 4,
memory_size=1,):
self.omega_bounds = omega_bounds
self.max_sim_time = max_sim_time
self.memory_size = memory_size
self.motor_count = motor_count
self.observation_history = []
super(GyroErrorFeedbackEnv, self).__init__(motor_count = motor_count, world=world)
self.omega_target = self.sample_target()
def step(self, action):
action = np.clip(action, self.action_space.low, self.action_space.high)
# Step the sim
self.obs = self.step_sim(action)
self.error = self.omega_target - self.obs.angular_velocity_rpy
self.observation_history.append(np.concatenate([self.error]))
state = self.state()
done = self.sim_time >= self.max_sim_time
reward = self.compute_reward()
info = {"sim_time": self.sim_time, "sp": self.omega_target, "current_rpy": self.omega_actual}
return state, reward, done, info
############
class StabilizeFlightControlEnv(GazeboEnv):
def compute_reward(self):
""" Compute the reward """
return -np.clip(np.sum(np.abs(self.error))/(self.omega_bounds[1]*3), 0, 1)
def sample_target(self):
""" Sample a random angular velocity """
return self.np_random.uniform(self.omega_bounds[0], self.omega_bounds[1], size=3)
class StabilizeEnv(StabilizeFlightControlEnv):
def __init__(self, world="attitude-iris.world",
omega_bounds=[-math.pi, math.pi],
max_sim_time=1.,
motor_count=4,
memory_size=1, ):
self.omega_bounds = omega_bounds
self.max_sim_time = max_sim_time
self.memory_size = memory_size
self.motor_count = motor_count
self.observation_history = []
super(StabilizeEnv, self).__init__(motor_count=motor_count, world=world)
self.omega_target = self.sample_target()
def step(self, action):
action = np.clip(action, self.action_space.low, self.action_space.high)
# Step the sim
self.obs = self.step_sim(action)
self.error = self.omega_target - self.obs.angular_velocity_rpy
self.observation_history.append(np.concatenate([self.error]))
state = self.state()
done = self.sim_time >= self.max_sim_time
reward = self.compute_reward()
info = {"sim_time": self.sim_time, "sp": self.omega_target, "current_rpy": self.omega_actual}
return state, reward, done, info
def state(self):
""" Get the current state """
# The newest will be at the end of the array
memory = np.array(self.observation_history[-self.memory_size:])
return np.pad(memory.ravel(),
( (3 * self.memory_size) - memory.size, 0),
'constant', constant_values=(0))
def reset(self):
self.observation_history = []
return super(StabilizeEnv, self).reset()
#############
class GyroErrorESCVelocityFeedbackEnv(GazeboEnv):
def __init__(self, world="attitude-iris.world",
omega_bounds =[-math.pi, math.pi],
max_sim_time = 1.,
motor_count = 4,
memory_size=1,):
self.omega_bounds = omega_bounds
self.max_sim_time = max_sim_time
self.memory_size = memory_size
self.motor_count = motor_count
self.observation_history = []
super(GyroErrorESCVelocityFeedbackEnv, self).__init__(motor_count = motor_count, world=world)
self.omega_target = self.sample_target()
def step(self, action):
action = np.clip(action, self.action_space.low, self.action_space.high)
# Step the sim
self.obs = self.step_sim(action)
self.error = self.omega_target - self.obs.angular_velocity_rpy
self.observation_history.append(np.concatenate([self.error, self.obs.motor_velocity]))
state = self.state()
done = self.sim_time >= self.max_sim_time
reward = self.compute_reward()
info = {"sim_time": self.sim_time, "sp": self.omega_target, "current_rpy": self.omega_actual}
return state, reward, done, info
def compute_reward(self):
""" Compute the reward """
return -np.clip(np.sum(np.abs(self.error))/(self.omega_bounds[1]*3), 0, 1)
def sample_target(self):
""" Sample a random angular velocity """
return self.np_random.uniform(self.omega_bounds[0], self.omega_bounds[1], size=3)
def state(self):
""" Get the current state """
# The newest will be at the end of the array
memory = np.array(self.observation_history[-self.memory_size:])
return np.pad(memory.ravel(),
(( (3+self.motor_count) * self.memory_size) - memory.size, 0),
'constant', constant_values=(0))
def reset(self):
self.observation_history = []
return super(GyroErrorESCVelocityFeedbackEnv, self).reset()
class GyroErrorESCVelocityFeedbackContinuousEnv(GyroErrorESCVelocityFeedbackEnv):
def __init__(self, command_time_off=[], command_time_on=[], **kwargs):
self.command_time_off = command_time_off
self.command_time_on = command_time_on
self.command_off_time = None
super(GyroErrorESCVelocityFeedbackContinuousEnv, self).__init__(**kwargs)
def step(self, action):
""" Sample a random angular velocity """
ret = super(GyroErrorESCVelocityFeedbackContinuousEnv, self).step(action)
# Update the target angular velocity
if not self.command_off_time:
self.command_off_time = self.np_random.uniform(*self.command_time_on)
elif self.sim_time >= self.command_off_time: # Issue new command
# Commands are executed as pulses, always returning to center
if (self.omega_target == np.zeros(3)).all():
self.omega_target = self.sample_target()
self.command_off_time = self.sim_time + self.np_random.uniform(*self.command_time_on)
else:
self.omega_target = np.zeros(3)
self.command_off_time = self.sim_time + self.np_random.uniform(*self.command_time_off)
return ret
|
StarcoderdataPython
|
136510
|
import logging, os
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras.layers import BatchNormalization, Conv2D, UpSampling2D, MaxPooling2D, Dropout
from tensorflow.keras.optimizers import SGD, Adam, Adagrad
from tensorflow.keras import backend as K
import numpy as np
from numpy.random import seed
from datetime import datetime
from datetime import timedelta
import pickle
import os
import os.path
import math
import argparse
def accuracy05(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>0.5,y_pred>0.5), tf.float32))
tn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>0.5),tf.math.logical_not(y_pred>0.5)), tf.float32))
return (tp+tn)/tf.cast(tf.size(y_true), tf.float32)
def precision05(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>0.5,y_pred>0.5), tf.float32))
total_pred = tf.reduce_sum(tf.cast(y_pred>0.5, tf.float32))
return tp/(total_pred+K.epsilon())
def recall05(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>0.5,y_pred>0.5), tf.float32))
total_true = tf.reduce_sum(tf.cast(y_true>0.5, tf.float32))
return tp/(total_true+K.epsilon())
def accuracy1(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>1,y_pred>1), tf.float32))
tn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>1),tf.math.logical_not(y_pred>1)), tf.float32))
return (tp+tn)/tf.cast(tf.size(y_true), tf.float32)
def precision1(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>1,y_pred>1), tf.float32))
#fp = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>1),y_pred>1), tf.float64))
total_pred = tf.reduce_sum(tf.cast(y_pred>1, tf.float32))
#if tf.math.less(total_pred, tf.constant([1.])):
# return 0.
return tp/(total_pred+K.epsilon())
def recall1(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>1,y_pred>1), tf.float32))
#fn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_pred>1),y_true>1), tf.float64))
total_true = tf.reduce_sum(tf.cast(y_true>1, tf.float32))
#if tf.math.less(total_true, tf.constant([1.])):
# return 0.
return tp/(total_true+K.epsilon())
def accuracy5(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>5,y_pred>5), tf.float32))
tn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>5),tf.math.logical_not(y_pred>5)), tf.float32))
return (tp+tn)/tf.cast(tf.size(y_true), tf.float32)
def precision5(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>5,y_pred>5), tf.float32))
#fp = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>5),y_pred>5), tf.float64))
total_pred = tf.reduce_sum(tf.cast(y_pred>5, tf.float32))
#if tf.math.less(total_pred, tf.constant([1.])):
# return 0.
return tp/(total_pred+K.epsilon())
def recall5(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>5,y_pred>5), tf.float32))
#fn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_pred>5),y_true>5), tf.float64))
total_true = tf.reduce_sum(tf.cast(y_true>5, tf.float32))
#if tf.math.less(total_true, tf.constant([1.])):
# return 0.
return tp/(total_true+K.epsilon())
def accuracy10(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>10,y_pred>10), tf.float32))
tn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>10),tf.math.logical_not(y_pred>10)), tf.float32))
return (tp+tn)/tf.cast(tf.size(y_true), tf.float32)
def precision10(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>10,y_pred>10), tf.float32))
#fp = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_true>10),y_pred>10), tf.float64))
total_pred = tf.reduce_sum(tf.cast(y_pred>10, tf.float32))
#if tf.math.less(total_pred, tf.constant([1.])):
# return 0.
return tp/(total_pred+K.epsilon())
def recall10(y_true, y_pred):
tp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_true>10,y_pred>10), tf.float32))
#fn = tf.reduce_sum(tf.cast(tf.math.logical_and(tf.math.logical_not(y_pred>10),y_true>10), tf.float64))
total_true = tf.reduce_sum(tf.cast(y_true>10, tf.float32))
#if tf.math.less(total_true, tf.constant([1])):
# return 0.
return tp/(total_true+K.epsilon())
def get_unet():
concat_axis = 3
inputs = layers.Input(shape=(512, 512, 3))
feats = 8#16
bn0 = BatchNormalization(axis=3)(inputs)
conv1 = layers.Conv2D(feats, (3, 3), activation='relu', padding='same', name='conv1_1')(bn0)
bn2 = BatchNormalization(axis=3)(conv1)
pool1 = layers.MaxPooling2D(pool_size=(2, 2))(bn2) #256
conv2 = layers.Conv2D(2*feats, (3, 3), activation='relu', padding='same')(pool1)
bn4 = BatchNormalization(axis=3)(conv2)
pool2 = layers.MaxPooling2D(pool_size=(2, 2))(bn4) #128
conv3 = layers.Conv2D(4*feats, (3, 3), activation='relu', padding='same')(pool2)
bn6 = BatchNormalization(axis=3)(conv3)
pool3 = layers.MaxPooling2D(pool_size=(2, 2))(bn6) #64
conv4 = layers.Conv2D(8*feats, (3, 3), activation='relu', padding='same')(pool3)
bn8 = BatchNormalization(axis=3)(conv4)
pool4 = layers.MaxPooling2D(pool_size=(2, 2))(bn8) #32
conv5 = layers.Conv2D(16*feats, (3, 3), activation='relu', padding='same')(pool4)
bn10 = BatchNormalization(axis=3)(conv5)
pool5 = layers.MaxPooling2D(pool_size=(2, 2))(bn10) #16
conv6 = layers.Conv2D(32*feats, (3, 3), activation='relu', padding='same')(pool5)
bn11 = BatchNormalization(axis=3)(conv6)
up_conv6 = layers.UpSampling2D(size=(2, 2))(bn11) #32
up7 = layers.concatenate([up_conv6, conv5], axis=concat_axis)
conv7 = layers.Conv2D(16*feats, (3, 3), activation='relu', padding='same')(up7)
bn13 = BatchNormalization(axis=3)(conv7)
up_conv5 = layers.UpSampling2D(size=(2, 2))(bn13) #64
up6 = layers.concatenate([up_conv5, conv4], axis=concat_axis)
conv6 = layers.Conv2D(8*feats, (3, 3), activation='relu', padding='same')(up6)
bn15 = BatchNormalization(axis=3)(conv6)
up_conv6 = layers.UpSampling2D(size=(2, 2))(bn15) #128
up7 = layers.concatenate([up_conv6, conv3], axis=concat_axis)
conv7 = layers.Conv2D(4*feats, (3, 3), activation='relu', padding='same')(up7)
bn13 = BatchNormalization(axis=3)(conv7)
# Rectify last convolution layer to constraint output to positive precipitation values.
conv8 = layers.Conv2D(1, (1, 1), activation='relu')(bn13)
model = models.Model(inputs=inputs, outputs=conv8)
return model
def get_band_data(loc, dates, b, mean=None, std=None):
y = np.concatenate([np.load(f"Y_{loc}_{d}.npy") for d in dates], axis=0)
y = np.clip(y,0,30)
x11 = np.concatenate([np.load(f"X_B11_{loc}_{d}.npy") for d in dates], axis=0)
x16 = np.concatenate([np.load(f"X_B16_{loc}_{d}.npy") for d in dates], axis=0)
xi = np.concatenate([np.load(f"X_B{b}_{loc}_{d}.npy") for d in dates], axis=0)
if mean is None:
mean = [x11.mean(),x16.mean(),xi.mean()]
std = [x11.std(),x16.std(),xi.std()]
x11 = (x11-mean[0])/std[0]
x16 = (x16-mean[1])/std[1]
xi = (xi-mean[2])/std[2]
x = np.stack((x11,x16,xi), axis=3)
x11 = None
x16 = None
xi = None
return x, y[:,:,:,None], mean, std
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Himawari-GPM Band comparison')
parser.add_argument('-b1', '--band1', help='Band 1 in list', type=int, required=True)
parser.add_argument('-b2', '--band2', help='Band 2 in list', type=int, required=True)
parser.add_argument('-b3', '--band3', help='Band 3 in list', type=int, required=True)
parser.add_argument('-loc', '--location', help='Geographic location', type=str, required=True)
parser.add_argument('-val', '--validation', help='Month used for validation', type=int, required=True)
parser.add_argument('-s', '--seed', help='Random seed', type=int, required=False, default=1)
args = parser.parse_args()
seed(args.seed)
if os.path.isfile(f'model_3months_200epochs_8chann_v{args.validation}_{args.location}_s{args.seed}_b{args.band1}_{args.band2}_{args.band3}.h5'):
exit()
tf.random.set_seed(args.seed)
dates = ["201811","201812","201901","201902"]
x_train, y_train, mean, std = get_band_data(args.location, [x for i, x in enumerate(dates) if i!=args.validation], args.band3)
x_test, y_test, _, _ = get_band_data(args.location, [x for i, x in enumerate(dates) if i==args.validation], args.band3, mean, std)
print(x_train.shape, y_train.shape)
print("MSE train", np.mean(np.square(y_train)))
print("MSE test", np.mean(np.square(y_test)))
model = get_unet()
print(model.summary())
opt = Adagrad(lr=0.0001)
model.compile(loss='mse', metrics=[accuracy05,precision05,recall05,accuracy1,precision1,recall1,accuracy5,precision5,recall5,accuracy10,precision10,recall10], optimizer=opt)
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), shuffle=True, epochs=200, verbose=1)
with open(f'history_3months_200epochs_8chann_v{args.validation}_{args.location}_s{args.seed}_b{args.band1}_{args.band2}_{args.band3}.pkl', 'wb') as f:
pickle.dump(history.history, f)
model.save(f'model_3months_200epochs_8chann_v{args.validation}_{args.location}_s{args.seed}_b{args.band1}_{args.band2}_{args.band3}.h5')
|
StarcoderdataPython
|
191302
|
<reponame>pulina/tastypie-queryset-client
from decimal import Decimal
from datetime import datetime
#from django.conf import settings
#settings.DEBUG = True
from testcases import (
TestServerTestCase,
get_client
)
from django.core.management import call_command
from .utils import id_generator
def _getDateTimeField(klass):
call_command('loaddata', 'small_data.json')
for i in klass.client.inbox_message.objects.all():
klass.assertTrue(isinstance(i.ctime, datetime))
def _setDateTimeField(klass):
ctime = datetime.now()
utime = datetime.now()
message = klass.client.message(
subject=id_generator(), body=id_generator(), ctime=ctime, utime=utime)
message.save()
klass.assertTrue(isinstance(message.ctime, datetime))
klass.assertTrue(isinstance(message.utime, datetime))
# klass.assertTrue(message.ctime == ctime) # lag
# klass.assertTrue(message.utime == utime) # lag
message_ = klass.client.message.objects.get(id=message.id)
klass.assertTrue(message.id == message_.id)
def _DecimalField(klass):
decimal_test = Decimal('0.2')
# Test save
strict = klass.client.strict(decimal_test=decimal_test)
strict.save()
# Tests
klass.assertTrue(isinstance(strict.decimal_test, Decimal))
klass.assertTrue(strict.decimal_test == decimal_test)
# Get
strict_ = klass.client.strict.objects.get(id=strict.id)
klass.assertTrue(strict.id == strict_.id)
# Search
strict_ = klass.client.strict.objects.filter(decimal_test=decimal_test)
klass.assertTrue(strict_.count() == 1)
def _FloatField(klass):
float_test = float(0.2)
# Test save
strict = klass.client.strict(float_test=float_test)
strict.save()
# Tests
klass.assertTrue(isinstance(strict.float_test, float))
klass.assertTrue(strict.float_test == float_test)
# Get
strict_ = klass.client.strict.objects.get(id=strict.id)
klass.assertTrue(strict.id == strict_.id)
# Search
strict_ = klass.client.strict.objects.filter(float_test=float_test)
klass.assertTrue(strict_.count() == 1)
def _IntegerField(klass):
integer_test = 10
# Test save
strict = klass.client.strict(integer_test=integer_test)
strict.save()
# Tests
klass.assertTrue(isinstance(strict.integer_test, int))
klass.assertTrue(strict.integer_test == integer_test)
# Get
strict_ = klass.client.strict.objects.get(id=strict.id)
klass.assertTrue(strict.id == strict_.id)
# Search
strict_ = klass.client.strict.objects.filter(integer_test=integer_test)
klass.assertTrue(strict_.count() == 1)
def _BooleanField(klass):
pass
def _GenericIPAddressField(klass):
pass
class StrictTestCase(TestServerTestCase):
def setUp(self):
self.start_test_server()
self.client = get_client()
def tearDown(self):
self.stop_test_server()
def test_getDateTimeField(self):
_getDateTimeField(self)
def test_setDateTimeField(self):
_setDateTimeField(self)
def test_DecimalField(self):
_DecimalField(self)
def test_FloatField(self):
_FloatField(self)
def test_IntegerField(self):
_IntegerField(self)
def test_BooleanField(self):
_BooleanField(self)
def test_GenericIPAddressField(self):
_GenericIPAddressField(self)
class NotStrictTestCase(TestServerTestCase):
def setUp(self):
self.start_test_server()
self.client = get_client(strict_field=False)
def tearDown(self):
self.stop_test_server()
# TODO: Json serialize error
# def test_getDateTimeField(self):
# _getDateTimeField(self)
# TODO: Json serialize error
# def test_setDateTimeField(self):
# _setDateTimeField(self)
# TODO: Str response
# def test_DecimalField(self):
# _DecimalField(self)
def test_FloatField(self):
_FloatField(self)
def test_IntegerField(self):
_IntegerField(self)
def test_BooleanField(self):
_BooleanField(self)
def test_GenericIPAddressField(self):
_GenericIPAddressField(self)
|
StarcoderdataPython
|
67763
|
<gh_stars>0
import sys
from pathlib import Path
path = str(Path(__file__).parents[1].resolve())
sys.path.append(path)
import argparse
import random
import numpy as np
import librosa
import rospy
from std_msgs.msg import UInt8MultiArray
from imperio.sonorus.audio.utils import audio_int2float
from imperio.robot.hr.lip_control import PhonemesPublisher
from imperio.robot.hr.lip_control.utils import drop_random, shift_time
class PhonemesPublisherOnVoice(object):
def __init__(self, shift=0, duration=0.02, seed=None, drop_th=0.8):
self.shift = shift
self.duration = duration
self.seed = seed
self.drop_th = drop_th
self.random = random.Random(seed)
rospy.Subscriber("/hr/control/audio/stream", UInt8MultiArray, self.callback)
self.pub = PhonemesPublisher(
default_viseme_params=dict(magnitude=0.99, rampin=0.01, rampout=0.01,),
)
def callback(self, msg):
phonemes = self.pub.random(self.duration, chunk=self.duration)
phonemes = drop_random(phonemes, seed=self.seed, drop_th=self.drop_th)
phonemes = shift_time(phonemes, shift=self.shift)
# if phonemes:
self.pub.publish(phonemes)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--shift",
type=float,
default=0,
help="Time shift in seconds after the audio message is received. Default is 0 secs.",
)
parser.add_argument(
"-d",
"--duration",
type=float,
default=0.02,
help="Duration in seconds for which phonemes will be played for each audio message received. Default is 0.02 secs.",
)
args = parser.parse_args()
rospy.init_node("phoneme_publisher")
PhonemesPublisherOnVoice(shift=args.shift, duration=args.duration)
rospy.spin()
|
StarcoderdataPython
|
82082
|
<filename>samples/fn_functions.py
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2017 allancth
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from datetime import datetime
def add(a: int, b: int) -> int:
return a + b
def multiply(a, b):
return a * b
def subtract(a, b):
return b - a
def divide(a, b):
return a / b
def greet(name: str="World") -> str:
print("Hello {0}!".format(name))
def fib(n: int=0) -> int:
if n == 0:
return 0
if n == 1:
return 1
return fib(n - 1) + fib(n - 2)
result = add(1, 2)
print(result)
greet("Alice")
greet()
for i in range(10):
print(fib(i))
def get_date():
return datetime.now().day, datetime.now().month, datetime.now().year
t = get_date()
print(t)
print("{0}-{1}-{2}".format(t[2], t[1], t[0]))
d, m, y = get_date()
print("{0}-{1}-{2}".format(y, m, d))
|
StarcoderdataPython
|
3386682
|
<gh_stars>1-10
import tensorflow as tf
class TextClassifierCNNModel(object):
def __init__(self,
seq_length=600,
num_classes=10,
vocab_size=5000,
embedding_dim=64,
num_filters=256,
kernel_size=5,
learning_rate=1e-3):
self.name = 'text_classify_cnn'
# 1. Define input & output.
self.input = tf.placeholder(tf.int32, [None, seq_length],
name='input')
self.output = tf.placeholder(tf.float32,
[None, num_classes],
name='output')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# 2. Define embedding.
with tf.device('/cpu:0'):
embedding = tf.get_variable('embedding', [vocab_size,
embedding_dim])
embedding_inputs = tf.nn.embedding_lookup(embedding, self.input)
# 3. Define CNN.
with tf.name_scope("cnn"):
conv_layer = tf.layers.conv1d(embedding_inputs,
num_filters,
kernel_size, name='conv')
max_pooling_layer = tf.reduce_max(conv_layer, reduction_indices=[1],
name='mp')
# 4. Define FCN.
with tf.name_scope("fcn"):
fc_1 = tf.layers.dense(max_pooling_layer, 128,
name='fc_1')
fc_1 = tf.contrib.layers.dropout(fc_1, self.keep_prob)
fc_1 = tf.nn.relu(fc_1)
self.net = tf.layers.dense(fc_1, num_classes,
name='fc_2')
self.predict = tf.argmax(tf.nn.softmax(self.net), 1)
# 5. Define optimization.
with tf.name_scope("optimize"):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=self.net, labels=self.output)
self.loss = tf.reduce_mean(cross_entropy)
self.optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(self.loss)
# 6. Define accuracy.
with tf.name_scope("accuracy"):
correct_predict = tf.equal(tf.argmax(self.output, 1), self.predict)
self.accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))
|
StarcoderdataPython
|
3294610
|
from datetime import timedelta
import arrow
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.files.base import ContentFile
from django.utils import timezone
from model_bakery import baker
from devilry.apps.core.deliverystore import MemoryDeliveryStore
from devilry.apps.core.models import Assignment
from devilry.apps.core.models import AssignmentGroup
from devilry.apps.core.models import Deadline
from devilry.apps.core.models import Delivery
from devilry.apps.core.models import FileMeta
from devilry.apps.core.models import Period
from devilry.apps.core.models import RelatedExaminer
from devilry.apps.core.models import RelatedStudent
from devilry.apps.core.models import StaticFeedback
from devilry.apps.core.models import StaticFeedbackFileAttachment
from devilry.apps.core.models import Subject
from devilry.devilry_comment.models import CommentFile
class ReloadableDbBuilderInterface(object):
def update(self, **attributes):
raise NotImplementedError()
def reload_from_db(self):
raise NotImplementedError()
class UserBuilder(ReloadableDbBuilderInterface):
"""
The old user builder class.
Use :class:`.UserBuilder2` for new tests.
"""
def __init__(self, username, full_name=None, email=None, is_superuser=False):
email = email or <EMAIL>'.<EMAIL>(username)
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
username = ''
self.user = get_user_model().objects.create_user(
username=username,
email=email,
is_superuser=is_superuser,
password='<PASSWORD>',
fullname=full_name or '')
def update(self, **attributes):
for attrname, value in attributes.items():
setattr(self.user, attrname, value)
self.user.save()
self.reload_from_db()
def reload_from_db(self):
self.user = get_user_model().objects.get(id=self.user.id)
class UserBuilder2(ReloadableDbBuilderInterface):
"""
A user builder much more suitable for :class:`devilry.devilry_account.model.User`
than :class:`.UserBuilder`.
Use this insted of :class:`.UserBuilder` for new tests.
"""
def __init__(self, **kwargs):
self.user = baker.make_recipe('devilry.devilry_account.user', **kwargs)
self.user.save()
def update(self, **attributes):
for attrname, value in attributes.items():
setattr(self.user, attrname, value)
self.user.save()
self.reload_from_db()
def reload_from_db(self):
self.user = get_user_model().objects.get(id=self.user.id)
def add_emails(self, *emails):
for email in emails:
self.user.useremail_set.create(email=email, use_for_notifications=False)
return self
def add_primary_email(self, email, use_for_notifications=True):
self.user.useremail_set.create(email=email, use_for_notifications=use_for_notifications,
is_primary=True)
return self
def add_usernames(self, *usernames):
for username in usernames:
self.user.username_set.create(username=username, is_primary=False)
return self
def add_notification_emails(self, *emails):
for email in emails:
self.user.useremail_set.create(email=email, use_for_notifications=True)
return self
def add_primary_username(self, username):
self.user.username_set.create(username=username, is_primary=True)
return self
class CoreBuilderBase(ReloadableDbBuilderInterface):
object_attribute_name = None
def _save(self):
getattr(self, self.object_attribute_name).save()
def get_object(self):
return getattr(self, self.object_attribute_name)
def __set_object_value(self, attribute, value):
setattr(self.get_object(), attribute, value)
def update(self, **attributes):
for attribute, value in attributes.items():
self.__set_object_value(attribute, value)
self._save()
self.reload_from_db()
def reload_from_db(self):
obj = getattr(self, self.object_attribute_name)
setattr(self, self.object_attribute_name, obj.__class__.objects.get(pk=obj.pk))
@classmethod
def make(cls, **kwargs):
"""
Creates a usable object of this builders model.
Use this when you just need an object with no special
meaning.
"""
raise NotImplementedError()
class BaseNodeBuilderBase(CoreBuilderBase):
modelcls = None
#: Used to generate unique names.
sequencenumber = 0
def __init__(self, short_name=None, long_name=None, **kwargs):
if not short_name:
short_name = '{}{}'.format(self.object_attribute_name,
self.sequencenumber)
full_kwargs = {
'short_name': short_name,
'long_name': long_name or short_name
}
full_kwargs.update(kwargs)
setattr(self, self.object_attribute_name, self.modelcls.objects.create(**full_kwargs))
BaseNodeBuilderBase.sequencenumber += 1
def add_admins(self, *users):
for user in users:
obj = getattr(self, self.object_attribute_name)
obj.admins.add(user)
return self
class FileMetaBuilder(CoreBuilderBase):
object_attribute_name = 'filemeta'
def __init__(self, delivery, filename, data):
self.filemeta = FileMeta.objects.create(delivery=delivery, filename=filename, size=0)
f = FileMeta.deliverystore.write_open(self.filemeta)
f.write(data)
f.close()
self.filemeta.size = len(data)
self.filemeta.save()
class StaticFeedbackFileAttachmentBuilder(CoreBuilderBase):
object_attribute_name = 'fileattachment'
def __init__(self, staticfeedback, filename='test.txt', filedata='Testdata'):
self.fileattachment = StaticFeedbackFileAttachment(staticfeedback=staticfeedback, filename=filename)
self.fileattachment.file.save(filename, ContentFile(filedata))
class StaticFeedbackBuilder(CoreBuilderBase):
object_attribute_name = 'feedback'
def __init__(self, **kwargs):
self.feedback = StaticFeedback.objects.create(**kwargs)
def add_fileattachment(self, **kwargs):
kwargs['staticfeedback'] = self.feedback
return StaticFeedbackFileAttachmentBuilder(**kwargs)
class DeliveryBuilder(CoreBuilderBase):
object_attribute_name = 'delivery'
@classmethod
def set_memory_deliverystore(cls):
FileMeta.deliverystore = MemoryDeliveryStore()
def __init__(self, **kwargs):
if 'time_of_delivery' not in kwargs:
kwargs['time_of_delivery'] = timezone.now()
self.delivery = Delivery(**kwargs)
if 'number' not in kwargs:
self.delivery.set_number()
self.delivery.save()
def _save(self):
self.delivery.save()
def add_filemeta(self, **kwargs):
kwargs['delivery'] = self.delivery
return FileMetaBuilder(**kwargs)
def add_feedback(self, **kwargs):
kwargs['delivery'] = self.delivery
return StaticFeedbackBuilder(**kwargs)
def add_passed_feedback(self, **kwargs):
kwargs['points'] = 1
kwargs['grade'] = 'Passed'
kwargs['is_passing_grade'] = True
kwargs['delivery'] = self.delivery
return StaticFeedbackBuilder(**kwargs)
def add_failed_feedback(self, **kwargs):
kwargs['points'] = 0
kwargs['grade'] = 'Failed'
kwargs['is_passing_grade'] = False
kwargs['delivery'] = self.delivery
return StaticFeedbackBuilder(**kwargs)
def add_passed_A_feedback(self, **kwargs):
kwargs['points'] = 100
kwargs['grade'] = 'A'
kwargs['is_passing_grade'] = True
kwargs['delivery'] = self.delivery
return StaticFeedbackBuilder(**kwargs)
def add_failed_F_feedback(self, **kwargs):
kwargs['points'] = 0
kwargs['grade'] = 'F'
kwargs['is_passing_grade'] = False
kwargs['delivery'] = self.delivery
return StaticFeedbackBuilder(**kwargs)
class DeadlineBuilder(CoreBuilderBase):
object_attribute_name = 'deadline'
def __init__(self, **kwargs):
self.deadline = Deadline.objects.create(**kwargs)
def add_delivery(self, **kwargs):
kwargs['deadline'] = self.deadline
kwargs['successful'] = kwargs.get('successful', True)
return DeliveryBuilder(**kwargs)
def add_delivery_after_deadline(self, timedeltaobject, **kwargs):
if 'time_of_delivery' in kwargs:
raise ValueError(
'add_delivery_after_deadline does not accept ``time_of_delivery`` as kwarg, it sets it automatically.')
kwargs['time_of_delivery'] = self.deadline.deadline + timedeltaobject
return self.add_delivery(**kwargs)
def add_delivery_before_deadline(self, timedeltaobject, **kwargs):
if 'time_of_delivery' in kwargs:
raise ValueError(
'add_delivery_before_deadline does not accept ``time_of_delivery`` as kwarg, it sets it automatically.')
kwargs['time_of_delivery'] = self.deadline.deadline - timedeltaobject
return self.add_delivery(**kwargs)
def add_delivery_x_hours_after_deadline(self, hours, **kwargs):
return self.add_delivery_after_deadline(timedelta(hours=hours), **kwargs)
def add_delivery_x_hours_before_deadline(self, hours, **kwargs):
return self.add_delivery_before_deadline(timedelta(hours=hours), **kwargs)
class CommentFileBuilder(CoreBuilderBase):
object_attribute_name = 'comment_file'
def __init__(self, **kwargs):
fileobject = ContentFile(kwargs['data'], kwargs['filename'])
del (kwargs['data'])
kwargs['filesize'] = fileobject.size
self.comment_file = CommentFile.objects.create(**kwargs)
self.comment_file.file = fileobject
self.comment_file.save()
class GroupCommentBuilder(CoreBuilderBase):
object_attribute_name = 'groupcomment'
@classmethod
def quickadd_ducku_duck1010_active_assignment1_group_feedbackset_groupcomment(cls, studentuser=None, examiner=None,
comment=None):
students = []
if studentuser:
students.append(studentuser)
return FeedbackSetBuilder \
.quickadd_ducku_duck1010_active_assignment1_group_feedbackset(studentuser=studentuser, examiner=examiner) \
.add_groupcomment(
user=studentuser,
user_role='student',
instant_publish=True,
visible_for_students=True,
text=comment if comment is not None else 'Lorem ipsum I dont know it from memory bla bla bla..',
published_datetime=arrow.get(timezone.now()).replace(weeks=-4, days=-3, hours=-10).datetime)
def __init__(self, **kwargs):
kwargs['comment_type'] = 'groupcomment'
self.groupcomment = baker.make('devilry_group.GroupComment', **kwargs)
def add_file(self, **kwargs):
kwargs['comment'] = self.groupcomment
return CommentFileBuilder(**kwargs)
def add_files(self, files):
retval = []
for fileobject in files:
retval.append(self.add_file(**fileobject))
@classmethod
def make(cls, **kwargs):
feedbacksetbuilder_kwargs = {}
for key in list(kwargs.keys()):
if key.startswith('feedback_set__'):
feedbacksetbuilder_kwargs[key[len('feedback_set__'):]] = kwargs.pop(key)
groupbuilder = FeedbackSetBuilder.make(**feedbacksetbuilder_kwargs)
return cls(feedback_set=groupbuilder.feedback_set, **kwargs)
class FeedbackSetBuilder(CoreBuilderBase):
object_attribute_name = 'feedback_set'
@classmethod
def quickadd_ducku_duck1010_active_assignment1_group_feedbackset(cls, studentuser=None, examiner=None):
students = []
if studentuser:
students.append(studentuser)
return AssignmentGroupBuilder \
.quickadd_ducku_duck1010_active_assignment1_group(studentuser=studentuser) \
.add_feedback_set(points=10,
published_by=examiner,
created_by=examiner,
deadline_datetime=arrow.get(timezone.now()).replace(weeks=-4).datetime)
def __init__(self, **kwargs):
self.feedback_set = baker.make('devilry_group.FeedbackSet', **kwargs)
def add_groupcomment(self, files=[], **kwargs):
kwargs['feedback_set'] = self.feedback_set
groupcomment = GroupCommentBuilder(**kwargs)
groupcomment.add_files(files)
return groupcomment.groupcomment
@classmethod
def make(cls, **kwargs):
groupbuilder_kwargs = {}
for key in list(kwargs.keys()):
if key.startswith('group__'):
groupbuilder_kwargs[key[len('group__'):]] = kwargs.pop(key)
groupbuilder = AssignmentGroupBuilder.make(**groupbuilder_kwargs)
return cls(group=groupbuilder.group, **kwargs)
class AssignmentGroupBuilder(CoreBuilderBase):
object_attribute_name = 'group'
@classmethod
def quickadd_ducku_duck1010_active_assignment1_group(cls, studentuser=None):
students = []
if studentuser:
students.append(studentuser)
return AssignmentBuilder \
.quickadd_ducku_duck1010_active_assignment1() \
.add_group(students=students)
def __init__(self, students=[], candidates=[], examiners=[], relatedstudents=[], **kwargs):
self.group = AssignmentGroup.objects.create(**kwargs)
self.add_students(*students)
self.add_candidates(*candidates)
self.add_examiners(*examiners)
self.add_candidates_from_relatedstudents(*relatedstudents)
def add_candidates_from_relatedstudents(self, *relatedstudents):
for relatedstudent in relatedstudents:
self.group.candidates.create(relatedstudent=relatedstudent,
student_id=relatedstudent.user_id)
def add_students(self, *users):
for user in users:
period = self.group.period
relatedstudent = RelatedStudent.objects.get_or_create(user=user,
period=period)[0]
self.group.candidates.create(relatedstudent=relatedstudent)
return self
def add_candidates(self, *candidates):
for candidate in candidates:
self.group.candidates.add(candidate)
return self
def add_examiners(self, *users):
for user in users:
period = self.group.period
relatedexaminer = RelatedExaminer.objects.get_or_create(user=user, period=period)[0]
self.group.examiners.create(relatedexaminer=relatedexaminer)
return self
def add_deadline(self, **kwargs):
kwargs['assignment_group'] = self.group
return DeadlineBuilder(**kwargs)
def add_deadline_in_x_weeks(self, weeks, **kwargs):
if 'deadline' in kwargs:
raise ValueError('add_deadline_in_x_weeks does not accept ``deadline`` as kwarg, it sets it automatically.')
kwargs['deadline'] = arrow.get(timezone.now()).replace(weeks=+weeks).datetime
return self.add_deadline(**kwargs)
def add_deadline_x_weeks_ago(self, weeks, **kwargs):
if 'deadline' in kwargs:
raise ValueError(
'add_deadline_x_weeks_ago does not accept ``deadline`` as kwarg, it sets it automatically.')
kwargs['deadline'] = arrow.get(timezone.now()).replace(weeks=-weeks).datetime
return self.add_deadline(**kwargs)
def add_feedback_set(self, **kwargs):
kwargs['group'] = self.group
return FeedbackSetBuilder(**kwargs)
@classmethod
def make(cls, **kwargs):
assignmentbuilder_kwargs = {}
for key in list(kwargs.keys()):
if key.startswith('assignment__'):
assignmentbuilder_kwargs[key[len('assignment__'):]] = kwargs.pop(key)
assignmentbuilder = AssignmentBuilder.make(**assignmentbuilder_kwargs)
return cls(parentnode=assignmentbuilder.assignment, **kwargs)
class AssignmentBuilder(BaseNodeBuilderBase):
object_attribute_name = 'assignment'
modelcls = Assignment
@classmethod
def quickadd_ducku_duck1010_active_assignment1(cls):
return PeriodBuilder.quickadd_ducku_duck1010_active() \
.add_assignment('assignment1')
def __init__(self, *args, **kwargs):
if not 'publishing_time' in kwargs:
kwargs['publishing_time'] = timezone.now()
super(AssignmentBuilder, self).__init__(*args, **kwargs)
def add_group(self, *args, **kwargs):
kwargs['parentnode'] = self.assignment
return AssignmentGroupBuilder(*args, **kwargs)
@classmethod
def make(cls, **kwargs):
if 'publishing_time' in kwargs:
return PeriodBuilder.make().add_assignment(**kwargs)
else:
return PeriodBuilder.make().add_assignment_in_x_weeks(weeks=1, **kwargs)
class PeriodBuilder(BaseNodeBuilderBase):
object_attribute_name = 'period'
modelcls = Period
def __init__(self, *args, **kwargs):
relatedstudents = kwargs.pop('relatedstudents', None)
relatedexaminers = kwargs.pop('relatedexaminers', None)
super(PeriodBuilder, self).__init__(*args, **kwargs)
if relatedstudents:
self.add_relatedstudents(*relatedstudents)
if relatedexaminers:
self.add_relatedexaminers(*relatedexaminers)
@classmethod
def quickadd_ducku_duck1010_active(cls):
return SubjectBuilder.quickadd_ducku_duck1010() \
.add_6month_active_period()
def add_assignment(self, *args, **kwargs):
kwargs['parentnode'] = self.period
if 'first_deadline' not in kwargs:
kwargs['first_deadline'] = timezone.now()
return AssignmentBuilder(*args, **kwargs)
def add_assignment_x_weeks_ago(self, weeks, **kwargs):
kwargs['publishing_time'] = arrow.get(timezone.now()).replace(weeks=-weeks).datetime
return self.add_assignment(**kwargs)
def add_assignment_in_x_weeks(self, weeks, **kwargs):
kwargs['publishing_time'] = arrow.get(timezone.now()).replace(weeks=+weeks).datetime
return self.add_assignment(**kwargs)
def add_relatedstudents(self, *users):
relatedstudents = []
for user in users:
if isinstance(user, RelatedStudent):
relatedstudent = user
else:
relatedstudent = RelatedStudent(
user=user)
relatedstudent.period = self.period
relatedstudents.append(relatedstudent)
RelatedStudent.objects.bulk_create(relatedstudents)
return self
def add_relatedexaminers(self, *users):
relatedexaminers = []
for user in users:
if isinstance(user, RelatedExaminer):
relatedexaminer = user
else:
relatedexaminer = RelatedExaminer(
user=user)
relatedexaminer.period = self.period
relatedexaminers.append(relatedexaminer)
RelatedExaminer.objects.bulk_create(relatedexaminers)
return self
@classmethod
def make(cls, **kwargs):
return SubjectBuilder.make().add_period(**kwargs)
class SubjectBuilder(BaseNodeBuilderBase):
object_attribute_name = 'subject'
modelcls = Subject
@classmethod
def quickadd_ducku_duck1010(cls, **kwargs):
return SubjectBuilder('duck1010', **kwargs)
def add_period(self, *args, **kwargs):
kwargs['parentnode'] = self.subject
if 'start_time' not in kwargs:
kwargs['start_time'] = arrow.get(timezone.now()).replace(days=-(30 * 3)).datetime
if 'end_time' not in kwargs:
kwargs['end_time'] = arrow.get(timezone.now()).replace(days=30 * 3).datetime
return PeriodBuilder(*args, **kwargs)
def add_6month_active_period(self, **kwargs):
kwargs['parentnode'] = self.subject
if 'start_time' in kwargs or 'end_time' in kwargs:
raise ValueError(
'add_6month_active_period does not accept ``start_time`` or ``end_time`` as kwargs, it sets them automatically.')
kwargs['start_time'] = arrow.get(timezone.now()).replace(days=-(30 * 3)).datetime
kwargs['end_time'] = arrow.get(timezone.now()).replace(days=30 * 3).datetime
if not 'short_name' in kwargs:
kwargs['short_name'] = 'active'
return self.add_period(**kwargs)
def add_6month_lastyear_period(self, **kwargs):
kwargs['parentnode'] = self.subject
if 'start_time' in kwargs or 'end_time' in kwargs:
raise ValueError(
'add_6month_lastyear_period does not accept ``start_time`` or ``end_time`` as kwargs, it sets them automatically.')
kwargs['start_time'] = arrow.get(timezone.now()).replace(days=-(365 + 30 * 3)).datetime
kwargs['end_time'] = arrow.get(timezone.now()).replace(days=-(365 - 30 * 3)).datetime
if not 'short_name' in kwargs:
kwargs['short_name'] = 'lastyear'
return self.add_period(**kwargs)
def add_6month_nextyear_period(self, **kwargs):
kwargs['parentnode'] = self.subject
if 'start_time' in kwargs or 'end_time' in kwargs:
raise ValueError(
'add_6month_nextyear_period does not accept ``start_time`` or ``end_time`` as kwargs, it sets them automatically.')
kwargs['start_time'] = arrow.get(timezone.now()).replace(days=365 - 30 * 3).datetime
kwargs['end_time'] = arrow.get(timezone.now()).replace(days=365 + 30 * 3).datetime
if not 'short_name' in kwargs:
kwargs['short_name'] = 'nextyear'
return self.add_period(**kwargs)
@classmethod
def make(cls, **kwargs):
return SubjectBuilder(**kwargs)
|
StarcoderdataPython
|
3295258
|
<filename>db_tools/cli.py
import click
from datetime import datetime
from pathlib import Path
from captif_db_config import Config
from db_tools import __version__
from db_tools.tools import (
generate_duplicate_database,
dump_database,
restore_database,
)
@click.version_option(__version__, prog_name="db-tools")
@click.group()
def cli():
pass
@click.command(name="dump")
@click.option("--config-file", "config_file", required=True)
@click.option("--backup-name", "backup_name", default="",
help="text to be appended to the database name when generating a filename")
@click.argument("database", required=True)
@click.argument("tables", nargs=-1)
@click.argument("backup-path", required=True)
def cli_dump_database(config_file, backup_name, database, tables, backup_path):
"""Dump database to a sql file.
"""
config = Config(config_file)
if backup_name == "":
backup_name = datetime.now().strftime("%Y%m%d_%H%M")
sql_file = Path(backup_path).joinpath(f"{database}_{backup_name}.sql")
dump_database(config, database, sql_file, tables)
@click.command(name="restore")
@click.option("--config-file", "config_file", required=True)
@click.argument('database', required=True)
@click.argument('path', required=True)
def cli_restore_database(config_file, database, path):
"""Restore database from backup file. The name of the new database must be provided.
"""
config = Config(config_file)
restore_database(config, database, path)
@click.command(name="duplicate")
@click.option("--config-file", "config_file", required=True)
@click.argument('database', required=True)
@click.argument("tables", nargs=-1)
def cli_duplicate_database(config_file, database, tables):
"""Generate a duplicate copy of a database with '_copy' appended to the name.
"""
config = Config(config_file)
generate_duplicate_database(config, database, tables)
cli.add_command(cli_dump_database)
cli.add_command(cli_restore_database)
cli.add_command(cli_duplicate_database)
if __name__ == '__main__':
cli()
|
StarcoderdataPython
|
149286
|
<gh_stars>0
# MAKE_ENDS
def make_ends(nums):
return [nums[0], nums[0]] if len(nums)<2 else [nums[0], nums[len(nums)-1]]
|
StarcoderdataPython
|
4834876
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: ifields_methods.py
# Purpose: store custom methods for wrapper class of IFields Interface
# Licence: MIT License
#-------------------------------------------------------------------------------
"""Store custom methods for wrapper class of IFields Interface.
name := repr(zos_obj).split()[0].split('.')[-1].lower() + '_methods.py'
"""
from __future__ import print_function
from __future__ import division
from win32com.client import CastTo as _CastTo, constants as _constants
from pyzos.zosutils import wrapped_zos_object as _wrapped_zos_object
# Overridden methods
# ------------------
# Extra methods
# -------------
|
StarcoderdataPython
|
3266131
|
#FLM: Remove empty glyphs
# Removes all empty glyphs from a font
font = fl.font
glyphs = font.glyphs
namesToKeep = [ '.notdef', 'NULL', 'CR', 'space' ]
# Find all the empty glyphs
for glyph in reversed(glyphs):
if not glyph.nodes and not glyph.components:
if not glyph.name in namesToKeep:
del glyphs[font.FindGlyph(glyph.name)]
fl.UpdateFont()
|
StarcoderdataPython
|
1674815
|
import smart_imports
smart_imports.all()
class GeneralTest(utils_testcase.TestCase):
def setUp(self):
super(GeneralTest, self).setUp()
game_logic.create_test_map()
self.account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(self.account.id)
self.hero = self.storage.accounts_to_heroes[self.account.id]
self.action_idl = self.hero.actions.current_action
self.bundle_id = self.action_idl.bundle_id
def test_TEXTGEN_TYPE(self):
for action_class in list(prototypes.ACTION_TYPES.values()):
self.assertTrue('TEXTGEN_TYPE' in action_class.__dict__)
def test_percents_consistency(self):
# just test that quest will be ended
while not self.action_idl.leader:
self.storage.process_turn()
game_turn.increment()
self.assertEqual(self.storage.tests_get_last_action().percents, self.hero.last_action_percents)
def test_action_default_serialization(self):
default_action = helpers.TestAction(hero=self.hero,
bundle_id=self.bundle_id,
state=helpers.TestAction.STATE.UNINITIALIZED)
self.assertEqual(default_action.serialize(), {'bundle_id': self.bundle_id,
'state': helpers.TestAction.STATE.UNINITIALIZED,
'percents': 0.0,
'description': None,
'type': helpers.TestAction.TYPE.value,
'visited_places': [],
'created_at_turn': game_turn.number()})
deserialized_action = helpers.TestAction.deserialize(default_action.serialize())
deserialized_action.hero = self.hero
self.assertEqual(default_action, deserialized_action)
def test_action_full_serialization(self):
mob = mobs_storage.mobs.create_mob_for_hero(self.hero)
account_2 = self.accounts_factory.create_account()
self.storage.load_account_data(account_2.id)
hero_2 = self.storage.accounts_to_heroes[account_2.id]
meta_action = meta_actions.ArenaPvP1x1.create(self.storage, self.hero, hero_2)
default_action = helpers.TestAction(hero=self.hero,
bundle_id=self.bundle_id,
state=helpers.TestAction.STATE.UNINITIALIZED,
created_at_turn=666,
context=helpers.TestAction.CONTEXT_MANAGER(),
description='description',
place_id=2,
mob=mob,
data={'xxx': 'yyy'},
break_at=0.75,
percents_barier=77,
extra_probability=0.6,
mob_context=helpers.TestAction.CONTEXT_MANAGER(),
textgen_id='textgen_id',
info_link='/bla-bla',
meta_action=meta_action,
replane_required=True,
visited_places={13},
path=navigation_path.Path(cells=[(1, 2)]))
self.assertEqual(default_action.serialize(), {'bundle_id': self.bundle_id,
'state': helpers.TestAction.STATE.UNINITIALIZED,
'context': helpers.TestAction.CONTEXT_MANAGER().serialize(),
'mob_context': helpers.TestAction.CONTEXT_MANAGER().serialize(),
'mob': mob.serialize(),
'textgen_id': 'textgen_id',
'extra_probability': 0.6,
'percents_barier': 77,
'percents': 0.0,
'description': 'description',
'type': helpers.TestAction.TYPE.value,
'created_at_turn': 666,
'place_id': 2,
'data': {'xxx': 'yyy'},
'info_link': '/bla-bla',
'break_at': 0.75,
'meta_action': meta_action.serialize(),
'replane_required': True,
'visited_places': [13],
'path': navigation_path.Path(cells=[(1, 2)]).serialize()})
deserialized_action = helpers.TestAction.deserialize(default_action.serialize())
deserialized_action.hero = self.hero
self.assertEqual(default_action, deserialized_action)
|
StarcoderdataPython
|
3220056
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
This module contains utilitaries used by other dialog modules.
"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "25/10/2017"
import os
import types
from silx.gui import qt
def samefile(path1, path2):
"""Portable :func:`os.path.samepath` function.
:param str path1: A path to a file
:param str path2: Another path to a file
:rtype: bool
"""
if path1 == path2:
return True
if path1 == "":
return False
if path2 == "":
return False
return os.path.samefile(path1, path2)
def findClosestSubPath(hdf5Object, path):
"""Find the closest existing path from the hdf5Object using a subset of the
provided path.
Returns None if no path found. It is possible if the path is a relative
path.
:param h5py.Node hdf5Object: An HDF5 node
:param str path: A path
:rtype: str
"""
if path in ["", "/"]:
return "/"
names = path.split("/")
if path[0] == "/":
names.pop(0)
for i in range(len(names)):
n = len(names) - i
path2 = "/".join(names[0:n])
if path2 == "":
return ""
if path2 in hdf5Object:
return path2
if path[0] == "/":
return "/"
return None
def patchToConsumeReturnKey(widget):
"""
Monkey-patch a widget to consume the return key instead of propagating it
to the dialog.
"""
assert(not hasattr(widget, "_oldKeyPressEvent"))
def keyPressEvent(self, event):
k = event.key()
result = self._oldKeyPressEvent(event)
if k in [qt.Qt.Key_Return, qt.Qt.Key_Enter]:
event.accept()
return result
widget._oldKeyPressEvent = widget.keyPressEvent
widget.keyPressEvent = types.MethodType(keyPressEvent, widget)
|
StarcoderdataPython
|
134110
|
import xlwt # 这是操作excel的库,需要安装这个库 命令: pip install xlwt
import requests
from lxml import etree
# 上面这两行是需要装的库
# 目前只取了列表页店铺名和商品名和价格信息,评价数暂时拿不到
def get_lsf_info_from_jd():
"""京东螺狮粉部分信息"""
# 这是请求头信息
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
}
# 这是京东搜索螺狮粉的链接
url = 'https://search.jd.com/Search?keyword=%E8%9E%BA%E7%8B%AE%E7%B2%89&qrst=1&stock=1&page=100&s=1&click=0'
# 这是发送请求 类似于浏览器中输入上面网址
res = requests.get(url, headers=headers)
# res是请求之后返回的数据 也就是页面数据 data_html是解析返回的数据
data_html = etree.HTML(res.content)
divs = data_html.xpath('//div[contains(@class, "gl-i-wrap")]')
data_list = []
# 下面这些是解析拼接的过程,很复杂,应该有更好的方法
for d in divs:
data_dict = {}
print('价格: ', d.xpath('./div[2]/strong/i')[0].text)
print('店铺: ', d.xpath('./div[5]/span/a/@title')[0] if d.xpath('./div[5]/span/a/@title') else '无信息')
# 将商品价格店铺存入字典中,方便后期写入excel
data_dict['price'] = d.xpath('./div[2]/strong/i')[0].text
data_dict['shop'] = d.xpath('./div[5]/span/a/@title')[0] if d.xpath('./div[5]/span/a/@title') else '无信息'
if d.xpath('./div[3]/a/@title') != ['']:
print('商品: ', d.xpath('./div[3]/a/@title')[0])
# 将商品名称存入字典中,方便后期写入excel
data_dict['name'] = d.xpath('./div[3]/a/@title')[0]
else:
goods_1 = d.xpath('./div[3]/a/em/text()')
goods_2 = d.xpath('./div[3]/a/em/font/text()')
if len(goods_1) > len(goods_2):
a = ''
for i in range(len(goods_2)):
a += goods_1[i]
a += goods_2[i]
a += goods_1[-1]
else:
a = ''
for i in range(len(goods_2)):
a += goods_1[i]
a += goods_2[i]
print('商品: ', a)
# 将商品名称存入字典中,方便后期写入excel
data_dict['name'] = a
print('-' * 50)
# 将每个商品存入列表中,列表中最终会有多个商品的信息,方便一次性写入excel中
data_list.append(data_dict)
# 将列表中的数据统一写入excel中
# 创建 xls 文件对象, 就是创建一个excel文件
wb = xlwt.Workbook()
# 新增表单页,就是excel下面的sheet名字,可以创建多个
sh1 = wb.add_sheet('京东商品')
# 然后按照位置来添加数据,括号里面第一个参数是行,第二个参数是列,第三个参数是写入的数据,表示excel中第几行第几列写入什么数据
sh1.write(0, 0, '商品名') # 表示第一行第一列写入'商品名',行列从0开始
sh1.write(0, 1, '店铺名')
sh1.write(0, 2, '价格')
# 用循环将列表中的数据写入excel中
for index, data in enumerate(data_list):
# 因为第一行(第一行是数字0)有商品名,店铺名等数据,所以数据从第二行开始写入,index也是从0开始,所以index需要加1
sh1.write(index + 1, 0, data['name'])
sh1.write(index + 1, 1, data['shop'])
sh1.write(index + 1, 2, data['price'])
# 最后保存文件即可,括号内就是你要生称的excel文件名,文件会生成在当前目录下
wb.save('test_w.xls')
# get_lsf_info_from_jd()
# 淘宝获取数据
def get_lsf_info_from_tb():
# url = 'https://detail.tmall.com/item.htm?spm=a230r.1.14.1.1a782a9d4L50l6&id=598614273525&ns=1&abbucket=20'
url = 'https://s.taobao.com/search?q=%E8%9E%BA%E7%8B%AE%E7%B2%89&imgfile=&commend=all&ssid=s5-e&search_type=item&sourceId=tb.index&spm=a21bo.2017.201856-taobao-item.1&ie=utf8&initiative_id=tbindexz_20170306&bcoffset=0&ntoffset=6&p4ppushleft=1%2C48&sort=sale-desc'
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
}
res = requests.get(url, headers=headers)
# print(res.content)
data_html = etree.HTML(res.content)
divs = data_html.xpath('//div[contains(@class, "item J_MouserOnverReq")]')
# divs = data_html.xpath('//*[@id="mainsrp-itemlist"]/div/div/div[1]/div[1]/div[2]/div[2]')
# divs = data_html.xpath('//*[@id="mainsrp-itemlist"]/div/div/div[1]/div[1]/div[2]/div[2]')
print(divs)
for i in divs:
a =1
# divs = data_html.xpath('//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[1]/h1/text()')
# prices = data_html.xpath('//*[@id="J_PromoPrice"]/dd/div/span/text()')
# old_prices = data_html.xpath('//*[@id="J_StrPriceModBox"]/dd/span/text()')
# count = data_html.xpath('//*[@id="J_DetailMeta"]/div[1]/div[1]/div/ul/li[1]/div/span[2]/text()')
# comment_count = data_html.xpath('//*[@id="J_ItemRates"]/div/span[2]/text()')
# print(divs)
# print(prices)
# print(old_prices)
# print(count)
# print(comment_count)
get_lsf_info_from_tb()
|
StarcoderdataPython
|
1693687
|
from setuptools import setup
setup(
name='autodsp',
version='0.0.1',
description='Code to reproduce the 2021 WASPAA paper titled AUTO-DSP: LEARNING TO OPTIMIZE ACOUSTIC ECHO CANCELLERS.',
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
url='https://github.com/jmcasebeer/autodsp',
packages=['autodsp'],
license='University of Illinois Open Source License',
install_requires=[
'matplotlib==3.4.3',
'numpy==1.21.2',
'pandas==1.3.3',
'scipy==1.7.1',
'tqdm==4.62.3',
'wandb==0.12.4',
]
)
|
StarcoderdataPython
|
3201208
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from enum import Enum
from six import text_type
class TextEnum(text_type, Enum):
def __repr__(self):
return self._value_ # pylint:disable=no-member
def __str__(self):
return str(self.value) # pylint:disable=no-member
|
StarcoderdataPython
|
121992
|
"""Test module for the user profile endpoint"""
import os
import pytest
from unittest.mock import Mock
from tempfile import NamedTemporaryFile
from django.urls import resolve, reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from rest_framework.test import APIClient
import cloudinary.uploader
from PIL import Image
from src.apps.core.utilities.messages import ERRORS, MESSAGES
from tests.fixtures.user_profile import (NEW_PROFILE, INVALID_PROFILE,
MOCK_RESPONSE)
PROFILE_URL = reverse('user:profile')
PROFILE_PHOTO_URL = reverse('user:photo')
api_client = APIClient()
@pytest.mark.django_db
class TestUserProfileView:
"""Class to test the user profile views"""
def test_profile_url_succeeds(self):
"""Test the paths"""
assert resolve(PROFILE_URL).view_name == 'user:profile'
def test_getting_a_logged_in_user_profile_succeeds(self, auth_header,
client):
"""Test getting logged in users profile"""
response = client.get(PROFILE_URL, **auth_header)
resp_data = response.data
data = resp_data['data']
assert response.status_code == 200
assert resp_data['status'] == 'success'
assert data['first_name'] is None
assert data['last_name'] is None
assert data['middle_name'] is None
assert data['gender'] is None
assert data['phone'] is None
assert data['seat_preference'] is None
assert data['dob'] is None
def test_getting_a_logged_in_user_profile_without_auth_fails(self, client):
"""Test getting logged in users profile"""
response = client.get(PROFILE_URL)
resp_data = response.data
assert response.status_code == 401
assert resp_data['status'] == 'error'
assert resp_data['user_message'] == ERRORS['AUTH_01']
def test_updating_user_profile_succeeds(self, client, auth_header):
"""Test that logged in user can update profile"""
response = client.patch(PROFILE_URL,
content_type='application/json',
data=NEW_PROFILE,
**auth_header)
resp_data = response.data
data = resp_data['data']
assert response.status_code == 200
assert resp_data['status'] == 'success'
assert data['first_name'] == NEW_PROFILE['first_name']
assert data['last_name'] == NEW_PROFILE['last_name']
assert data['middle_name'] == NEW_PROFILE['middle_name']
assert data['gender'] == NEW_PROFILE['gender']
assert data['phone'] == NEW_PROFILE['phone']
assert data['seat_preference'] == NEW_PROFILE['seat_preference']
assert data['dob'] == NEW_PROFILE['dob']
def test_updating_user_profile_with_invalid_data_fails(
self, client, auth_header):
"""Test that logged in user cannot update profile with invalid data"""
response = client.patch(PROFILE_URL,
content_type='application/json',
data=INVALID_PROFILE,
**auth_header)
resp_data = response.data
data = resp_data['errors']
assert response.status_code == 400
assert resp_data['status'] == 'error'
assert data['middle_name'][0] == ERRORS['USR_07']
assert data['phone'][0] == ERRORS['USR_07']
def test_updating_a_logged_in_user_profile_without_auth_fails(
self, client):
"""Test getting logged in users profile"""
response = client.patch(PROFILE_URL)
resp_data = response.data
assert response.status_code == 401
assert resp_data['status'] == 'error'
assert resp_data['user_message'] == ERRORS['AUTH_01']
def test_user_can_upload_profile_photo_succeeds(self, client, auth_header,
create_image):
"""Test that a user can upload profile photograph."""
# Mock the cloudinary SDK
cloudinary.uploader.upload = Mock(return_value=MOCK_RESPONSE)
cloudinary.uploader.destroy = Mock()
# set up form data
profile_photo = create_image(None, 'photo_pic.png')
photo_file = SimpleUploadedFile('test_photo.png',
profile_photo.getvalue(),
content_type='image/png')
data = dict(photo=photo_file)
response = api_client.put(PROFILE_PHOTO_URL,
data=data,
format="multipart",
**auth_header)
resp_data = response.data
assert response.status_code == 200
assert resp_data['status'] == 'success'
assert resp_data['user_message'] == MESSAGES['PHOTO_UPLOAD']
assert resp_data['data'] == []
assert cloudinary.uploader.upload.called
assert cloudinary.uploader.upload.call_count == 1
assert cloudinary.uploader.destroy.called
assert cloudinary.uploader.destroy.call_count == 1
def test_user_cannot_upload_profile_photo_of_invalid_type_fails(
self, client, auth_header, create_image):
"""Test that a user cannot upload profile photograph invalild file
type."""
# set up form data
profile_photo = create_image(None, 'photo_pic.png')
photo_file = SimpleUploadedFile('test_photo.png',
profile_photo.getvalue(),
content_type='text/plain')
data = dict(photo=photo_file)
response = api_client.put(PROFILE_PHOTO_URL,
data=data,
format="multipart",
**auth_header)
resp_data = response.data
data = resp_data['errors']
assert response.status_code == 400
assert resp_data['status'] == 'error'
assert data['photo'][0] == ERRORS['USR_11']
def test_user_cannot_upload_profile_photo_with_invalid_file_fails(
self, client, auth_header):
"""Test that a user cannot upload profile photograph without file"""
response = api_client.put(PROFILE_PHOTO_URL,
data={},
format="multipart",
**auth_header)
resp_data = response.data
data = resp_data['errors']
assert response.status_code == 400
assert resp_data['status'] == 'error'
assert data['photo'][0] == ERRORS['USR_04']
def test_uploading_profile_photo_with_file_exceeding_max_size_fails(
self, client, auth_header, create_image):
"""Test uploading profile photograph exceeding max 2mb"""
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
filename = os.path.join(base_dir,
'fixtures/media/high_resoulution.jpg')
image = Image.open(filename)
picture = NamedTemporaryFile()
image.save(picture, format="JPEG")
picture.seek(0)
response = api_client.put(PROFILE_PHOTO_URL,
data={'photo': picture},
format="multipart",
**auth_header)
resp_data = response.data
data = resp_data['errors']
assert response.status_code == 400
assert resp_data['status'] == 'error'
assert data['photo'][0] == ERRORS['USR_10']
def test_the_exception_handler_succeeds(self, client, auth_header,
create_image):
"""Raise exception on upload"""
# Mock the cloudinary SDK
cloudinary.uploader.upload = Mock(return_value=Exception('Test'))
cloudinary.uploader.destroy = Mock()
# set up form data
profile_photo = create_image(None, 'photo_pic.png')
photo_file = SimpleUploadedFile('test_photo.png',
profile_photo.getvalue(),
content_type='image/png')
data = dict(photo=photo_file)
response = api_client.put(PROFILE_PHOTO_URL,
data=data,
format="multipart",
**auth_header)
resp_data = response.data
assert response.status_code == 200
assert resp_data['status'] == 'success'
assert resp_data['user_message'] == MESSAGES['PHOTO_UPLOAD']
assert resp_data['data'] == []
assert cloudinary.uploader.upload.called
assert cloudinary.uploader.upload.call_count == 1
|
StarcoderdataPython
|
1665639
|
<gh_stars>1-10
import pytest
from lamberthub.universal_solvers import izzo
@pytest.mark.parametrize("M", [1, 2, 3])
def test_minimum_time_of_flight_convergence(M):
ll = -1
x_T_min_expected, T_min_expected = izzo._compute_T_min(
ll, M, maxiter=10, atol=1e-8, rtol=1e-10
)
y = izzo._compute_y(x_T_min_expected, ll)
T_min = izzo._tof_equation_y(x_T_min_expected, y, 0.0, ll, M)
assert T_min_expected == T_min
|
StarcoderdataPython
|
194371
|
#!/usr/bin/env python
# generate a jsonl version of a small slice of a dataset that can be fed to megatron-lm preprocessor
import sys
from datasets import load_dataset
dataset_name = "stas/openwebtext-10k"
# subset to jsonlines
n_samples = 1000
ds = load_dataset(dataset_name, split='train')
ds_small = ds.select(range(n_samples))
path = f"openwebtext-{n_samples}.jsonl"
ds_small.to_json(path, orient="records", lines=True)
|
StarcoderdataPython
|
1606690
|
<reponame>Django-Lessons/lesson-31-python-logging<gh_stars>1-10
class Disk():
def __init__(self):
pass
def free(self):
return 0
def total(self):
return 0
|
StarcoderdataPython
|
1765809
|
# Multiples of 3 and 5
# Find the sum of all the multiples of 3 or 5
def multiplesOf3and5(num):
sums = sum(n for n in range(num) if n%3 == 0 or n%5 == 0)
return sums
|
StarcoderdataPython
|
126900
|
<gh_stars>0
# -*- coding: utf-8 -*-
from optparse import make_option
import sys
import traceback
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.core.management.color import no_style
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
from django_sqlalchemy.core import engines
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default='default', help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
show_traceback = options.get('traceback')
self.style = no_style()
db = options.get('database')
engine = engines[db]
for app_path in settings.INSTALLED_APPS:
try:
app_mod = import_module(app_path + '.models')
except ImportError:
continue
base_klass = getattr(app_mod, 'Base', None)
if not base_klass:
continue
if verbosity >= 1:
print "SQLALchemy: Installing tables from %s..." % (app_path)
base_klass.metadata.create_all(engine)
## Install custom SQL for the app (but only if this
## is a model we've just created)
#if verbosity >= 1:
# print "Installing custom SQL ..."
#for app_name, model_list in manifest.items():
# for model in model_list:
# if model in created_models:
# custom_sql = custom_sql_for_model(model, self.style, connection)
# if custom_sql:
# if verbosity >= 2:
# print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
# try:
# for sql in custom_sql:
# cursor.execute(sql)
# except Exception, e:
# sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
# (app_name, model._meta.object_name, e))
# if show_traceback:
# traceback.print_exc()
# transaction.rollback_unless_managed(using=db)
# else:
# transaction.commit_unless_managed(using=db)
# else:
# if verbosity >= 3:
# print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
#if verbosity >= 1:
# print "Installing indexes ..."
## Install SQL indices for all newly created models
#for app_name, model_list in manifest.items():
# for model in model_list:
# if model in created_models:
# index_sql = connection.creation.sql_indexes_for_model(model, self.style)
# if index_sql:
# if verbosity >= 2:
# print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
# try:
# for sql in index_sql:
# cursor.execute(sql)
# except Exception, e:
# sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
# (app_name, model._meta.object_name, e))
# transaction.rollback_unless_managed(using=db)
# else:
# transaction.commit_unless_managed(using=db)
## Load initial_data fixtures (unless that has been disabled)
#if load_initial_data:
# from django.core.management import call_command
# call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
|
StarcoderdataPython
|
4812483
|
<filename>whampy/tests/test_load_SkySurvey.py
import pytest
import numpy as np
import astropy.units as u
from ..skySurvey import SkySurvey
from ..skySurvey import directory
# Set up the random number generator.
np.random.seed(1234)
# When running tests locally: filename = "/Users/dk/Data/WHAM/wham-ss-DR1-v161116-170912.fits"
def test_remote_load():
"""
Ensure survey loads from default remote link
"""
survey = SkySurvey(mode = "remote")
assert survey["VELOCITY"].unit == u.km/u.s
survey = SkySurvey()
def test_idlsav_load():
import os.path
"""
Ensure IDL Save File loading also works
"""
filename = os.path.join(directory, "data/wham-ss-DR1-v161116-170912.sav")
survey_idl = SkySurvey(filename = filename)
assert survey["DATA"].unit == u.R * u.s / u.km
def test_idlsav_load_varerror():
import os.path
"""
Ensure IDL Save File loading fails if WHAM data not there
"""
filename = os.path.join(directory, "data/wham-ss-DR1-v161116-170912.sav")
try:
surveey_idl = SkySurvey(filename = filename, idl_var = "test_fail")
except TypeError:
assert True
else:
assert False
# def test_idlsav_load_nointen():
# import os.path
# """
# Ensure IDL Save File loading also works for developer versions that
# don't have a "INTEN" field
# Note: This test is not yet implemented - need a test IDL Save File
# """
# cur_directory = os.path.dirname(__file__)
# filename = os.path.join(cur_directory, "test_data/test_no_inten.sav")
# survey_idl = SkySurvey(filename = filename)
# assert survey["INTEN"].unit == u.R
def test_section_circle():
from astropy.coordinates import Angle
"""
Ensure survey section extraction works
"""
l = Angle(np.random.random(1) * 360.*u.deg).wrap_at("180d")
b = Angle((np.random.random(1) * 180.*u.deg) - 90*u.deg)
center = [l.value,b.value]
radius = np.random.random()*30.
circle = survey.sky_section(center, radius)
assert circle["DATA"].unit == u.R / u.km * u.s
def test_section_circle_coord_radius_number():
from astropy.coordinates import Angle
from astropy.coordinates import SkyCoord
"""
Ensure survey section extraction works
"""
l = Angle(np.random.random(1) * 360.*u.deg).wrap_at("180d")
b = Angle((np.random.random(1) * 180.*u.deg) - 90*u.deg)
center = SkyCoord(l = l, b = b, frame = 'galactic')
radius = np.random.random()*30.
if radius < 5:
radius = 5
circle = survey.sky_section(center, radius)
assert circle["DATA"].unit == u.R / u.km * u.s
def test_section_rect_coord():
from astropy.coordinates import Angle
from astropy.coordinates import SkyCoord
"""
Ensure survey section extraction works
"""
l = Angle(np.random.random(2) * 360.*u.deg).wrap_at("180d")
b = Angle((np.random.random(2) * 180.*u.deg) - 90*u.deg)
bounds = SkyCoord(l = l, b = b, frame = 'galactic')
rect = survey.sky_section(bounds)
assert rect["DATA"].unit == u.R / u.km * u.s
def test_section_rect():
from astropy.coordinates import Angle
from astropy.coordinates import SkyCoord
"""
Ensure survey section extraction works
"""
l = Angle(np.random.random(2) * 360.*u.deg).wrap_at("180d")
b = Angle((np.random.random(2) * 180.*u.deg) - 90*u.deg)
bounds = [l.min().value, l.max().value, b.min().value, b.max().value]
rect = survey.sky_section(bounds)
assert rect["DATA"].unit == u.R / u.km * u.s
def test_section_no_wrap():
from astropy.coordinates import Angle
from astropy.coordinates import SkyCoord
"""
Ensure survey section extraction works
"""
l = Angle(np.random.random(2) * 360.*u.deg).wrap_at("360d")
b = Angle((np.random.random(2) * 180.*u.deg) - 90*u.deg)
bounds = [l.min().value, l.max().value, b.min().value, b.max().value]
rect = survey.sky_section(bounds, wrap_at_180 = False)
assert rect["DATA"].unit == u.R / u.km * u.s
def test_bounds_error():
try:
survey.sky_section(np.random.random(5))
except TypeError:
assert True
else:
assert False
def test_no_radius():
try:
survey.sky_section(np.random.random(1), radius = None)
except TypeError:
assert True
else:
assert False
def test_no_radius_coord():
from astropy.coordinates import SkyCoord
try:
survey.sky_section(SkyCoord(l = np.random.random(1) * u.deg, b = np.random.random(1) * u.deg,
frame = 'galactic'),
radius = None)
except TypeError:
assert True
else:
assert False
def test_no_radius_len2():
try:
survey.sky_section(np.random.random(2),
radius = None)
except TypeError:
assert True
else:
assert False
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.