max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
grocery/products/migrations/0006_auto_20210426_1320.py | DeepakDk04/bigbasketClone | 0 | 12786851 | <reponame>DeepakDk04/bigbasketClone<filename>grocery/products/migrations/0006_auto_20210426_1320.py
# Generated by Django 3.1.7 on 2021-04-26 07:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0005_auto_20210426_1318'),
]
operations = [
migrations.AlterField(
model_name='category',
name='description',
field=models.CharField(default='', max_length=200),
),
migrations.AlterField(
model_name='product',
name='price',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='product',
name='stockCount',
field=models.IntegerField(default=0),
),
]
| 1.65625 | 2 |
main.py | Praveendwivedi/Recento | 0 | 12786852 | import discord
import os
import requests,json
import tweepy
consumer_key=os.getenv('C_K')
consumer_secret=os.getenv('C_S')
access_token=os.getenv('A_T')
access_token_secret=os.getenv('A_S')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
r = requests.get('https://api.github.com/user', auth=('user', 'pass'))
print(r.json())
print(r.status_code)
client = discord.Client()
@client.event
async def on_ready():
print('hello, I\'m {0.user}'.format(client))
@client.event
async def on_message(msg):
if msg.author==client.user:
return
if msg:
print(msg)
await msg.channel.send('hey {}'.format(msg.author.name))
tweets = api.search(msg.content,lang='en',result_type='recent',include_entities="mashable")
for tweet in tweets:
if not tweet.text.startswith('RT'):
await msg.channel.send(tweet.user.screen_name+' : \n'+tweet.text)
client.run(os.getenv('TOKEN'))
| 2.796875 | 3 |
back-end/RawFishSheep/app_warehouse/migrations/0002_auto_20190508_2022.py | Coldarra/RawFishSheep | 0 | 12786853 | # Generated by Django 2.0.7 on 2019-05-08 12:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app_warehouse', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='cargoin',
name='reason',
field=models.CharField(default='default', max_length=20, verbose_name='入库原因'),
),
migrations.AddField(
model_name='cargoin',
name='shelflife',
field=models.IntegerField(blank=True, default=72, null=True, verbose_name='保质期'),
),
migrations.AddField(
model_name='cargoout',
name='reason',
field=models.CharField(default='default', max_length=20, verbose_name='出库原因'),
),
migrations.AlterField(
model_name='cargoin',
name='goods',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='cargoin_by_goods', to='app_goods.Goods'),
),
migrations.AlterField(
model_name='cargoin',
name='staletime',
field=models.DateTimeField(blank=True, null=True, verbose_name='过期时间'),
),
migrations.AlterField(
model_name='cargoin',
name='warehouse',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='cargoin_by_warehouse', to='app_warehouse.Warehouse'),
),
migrations.AlterField(
model_name='cargoout',
name='goods',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='cargoout_by_goods', to='app_goods.Goods'),
),
migrations.AlterField(
model_name='cargoout',
name='order',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='cargoout_by_order', to='app_order.Order'),
),
migrations.AlterField(
model_name='cargoout',
name='warehouse',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='cargoout_by_warehouse', to='app_warehouse.Warehouse'),
),
]
| 1.609375 | 2 |
dock/cccp/__init__.py | Plane-walker/Okeanos | 6 | 12786854 | from .cccp import *
| 1.09375 | 1 |
_pkg_KuFunc/mod_ViewerShortcuts.py | tianlunjiang/_NukeStudio_v2 | 6 | 12786855 | <reponame>tianlunjiang/_NukeStudio_v2
'''
Based on QuckCreate by <NAME>
'''
#------------------------------------------------------------------------------
#-Module Import
#------------------------------------------------------------------------------
import platform
import os
from Qt import QtWidgets, QtGui, QtCore
import nuke, nukescripts
#------------------------------------------------------------------------------
#-Header
#------------------------------------------------------------------------------
__VERSION__ = '1.0'
__OS__ = platform.system()
__AUTHOR__ = "<NAME>"
__WEBSITE__ = "jiangovfx.com"
__COPYRIGHT__ = "copyright (c) %s - %s" % (__AUTHOR__, __WEBSITE__)
__TITLE__ = "ViewerShortcuts v%s" % __VERSION__
def _version_():
ver='''
version 1.0
- List of convient viewer short cuts
- Minor edit to fit kupipeline
'''
return ver
# ------------------------------------------------------------------------------
# Main Functions
# ------------------------------------------------------------------------------
#Base Gridwarp Struct.
#Dear Foundry... could setValue() support?
gridWarpBaseStruct = '''
1 5 5 4 1 0
{default }
{
{ {2 _x0 _y0} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x1 _y0} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x2 _y0} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x3 _y0} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x4 _y0} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x0 _y1} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x1 _y1} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x2 _y1} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x3 _y1} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x4 _y1} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x0 _y2} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x1 _y2} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x2 _y2} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x3 _y2} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x4 _y2} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x0 _y3} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x1 _y3} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x2 _y3} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x3 _y3} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x4 _y3} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x0 _y4} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x1 _y4} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x2 _y4} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x3 _y4} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
{ {2 _x4 _y4} { {2 0 _ty0} {2 0 -_ty0} {2 _tx0 0} {2 -_tx0 0} } }
}
'''
def CreateOnSelection(_kind):
#If the viewer is connected to a node we will use input 0 for ref. Else we just use the viewer itself.
if nuke.activeViewer().node().input(0):
myNode = nuke.activeViewer().node().input(0)
if not nuke.selectedNodes(): #Trying to be smart by assuming that you don't want to add a node to nothing.
myNode.setSelected(1)
else:
myNode = nuke.activeViewer().node()
bboxinfo = nuke.activeViewer().node()['colour_sample_bbox'].value() #Get the position info from the colour sample bbox
aspect = float(myNode.width()*myNode.pixelAspect())/float(myNode.height()) #Calcualte the aspect (thanks <NAME> for notifying and <NAME> for the correction!)
cornerA = [(bboxinfo[0]*0.5+0.5)*myNode.width(),(((bboxinfo[1]*0.5)+(0.5/aspect))*aspect)*myNode.height()] #Get the button left corner
cornerB = [(bboxinfo[2]*0.5+0.5)*myNode.width(),(((bboxinfo[3]*0.5)+(0.5/aspect))*aspect)*myNode.height()] #Get the top right corner
area_WH = [cornerB[0]-cornerA[0],cornerB[1]-cornerA[1]] #Get the width and height of the bbox
area_Mid = [cornerA[0]+(area_WH[0]/2),cornerA[1]+(area_WH[1]/2)] #Get the center of the bbox
if _kind == 'Crop': #-----Crop Node-----
newNode = nuke.Node("Crop")
newNode['box'].setValue([cornerA[0],cornerA[1],cornerB[0],cornerB[1]])
elif _kind == 'ROI': #-----ROI-----
nuke.activeViewer().node()["roi"].setValue(bboxinfo)
elif _kind == 'Transform': #-----Tranform Node-----
newNode = nuke.Node("Transform")
newNode['center'].setValue([area_Mid[0],area_Mid[1]])
elif _kind == 'GridWarp': #-----GridWarp Node-----
newNode = nuke.Node("GridWarp3")
gridwarpLayout = gridWarpBaseStruct
for x in range(0,5): #Remap placeholder values to x and y coordinates split up to 5 subdevisions
gridwarpLayout=gridwarpLayout.replace("_x%s"%x,"%.0f" % (cornerA[0]+((area_WH[0]/4)*x)))
gridwarpLayout=gridwarpLayout.replace("_y%s"%x,"%.0f" % (cornerA[1]+((area_WH[1]/4)*x)))
gridwarpLayout=gridwarpLayout.replace("_tx0","%.3f" % (area_WH[0]/12)) #Remap tangent's
gridwarpLayout=gridwarpLayout.replace("_ty0","%.3f" % (area_WH[1]/12)) #Remap tangent's
newNode['source_grid_col'].fromScript(gridwarpLayout) #Set Source Grid
newNode['destination_grid_col'].fromScript(gridwarpLayout) #Set Destination Grid
if _kind == 'Text':
newNode = nuke.Node("Text2")
newNode['box'].setValue([cornerA[0],cornerA[1],cornerB[0],cornerB[1]])
elif _kind == 'Radial':
newNode = nuke.Node("Radial")
newNode['area'].setValue([cornerA[0],cornerA[1],cornerB[0],cornerB[1]])
elif _kind == 'Keylight':
newNode = nuke.Node("OFXuk.co.thefoundry.keylight.keylight_v201")
ColorR = myNode.sample(1,area_Mid[0],area_Mid[1],area_WH[0],area_WH[1])
ColorG = myNode.sample(2,area_Mid[0],area_Mid[1],area_WH[0],area_WH[1])
ColorB = myNode.sample(3,area_Mid[0],area_Mid[1],area_WH[0],area_WH[1])
newNode['screenColour'].setValue([ColorR,ColorG,ColorB])
elif _kind == 'Tracker':
#If we allready have a tracker selexted then append tracks to exsisting tracker node.
if myNode.Class()=="Tracker4":
newNode = myNode
nuke.show(newNode)
else: #Creat a new tracker node
newNode = nuke.Node("Tracker4")
numColumns = 31
colTrackX = 2
colTrackY = 3
colRelTrack = 12
trackIdx = int(newNode["tracks"].toScript().split(" ")[3])
newNode['add_track'].execute()
newNode.knob("tracks").setValue(area_Mid[0],numColumns*trackIdx + colTrackX)
newNode.knob("tracks").setValue(area_Mid[1],numColumns*trackIdx + colTrackY)
newNode.knob("tracks").setValue(-area_WH[0]/2,numColumns*trackIdx + colRelTrack)
newNode.knob("tracks").setValue(-area_WH[1]/2,numColumns*trackIdx + colRelTrack+1)
newNode.knob("tracks").setValue(area_WH[0]/2,numColumns*trackIdx + colRelTrack+2)
newNode.knob("tracks").setValue(area_WH[1]/2,numColumns*trackIdx + colRelTrack+3)
elif _kind == 'CornerpinFrom':
newNode = nuke.Node("CornerPin2D")
newNode['from1'].setValue([cornerA[0],cornerA[1]])
newNode['from2'].setValue([cornerB[0],cornerA[1]])
newNode['from3'].setValue([cornerB[0],cornerB[1]])
newNode['from4'].setValue([cornerA[0],cornerB[1]])
elif _kind == 'CornerpinTo':
newNode = nuke.Node("CornerPin2D")
newNode['to1'].setValue([cornerA[0],cornerA[1]])
newNode['to2'].setValue([cornerB[0],cornerA[1]])
newNode['to3'].setValue([cornerB[0],cornerB[1]])
newNode['to4'].setValue([cornerA[0],cornerB[1]])
elif _kind == 'CornerpinFromTo':
newNode = nuke.Node("CornerPin2D")
newNode['to1'].setValue([cornerA[0],cornerA[1]])
newNode['to2'].setValue([cornerB[0],cornerA[1]])
newNode['to3'].setValue([cornerB[0],cornerB[1]])
newNode['to4'].setValue([cornerA[0],cornerB[1]])
newNode['from1'].setValue([cornerA[0],cornerA[1]])
newNode['from2'].setValue([cornerB[0],cornerA[1]])
newNode['from3'].setValue([cornerB[0],cornerB[1]])
newNode['from4'].setValue([cornerA[0],cornerB[1]])
elif _kind == 'Constant':
newNode = nuke.Node("Constant", inpanel=False)
ColorR = myNode.sample(1,area_Mid[0],area_Mid[1],area_WH[0],area_WH[1])
ColorG = myNode.sample(2,area_Mid[0],area_Mid[1],area_WH[0],area_WH[1])
ColorB = myNode.sample(3,area_Mid[0],area_Mid[1],area_WH[0],area_WH[1])
newNode['color'].setValue([ColorR,ColorG,ColorB,1])
| 1.59375 | 2 |
amp/broker/exchange.py | rgozi/amp | 0 | 12786856 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021-10-21
# @Author : iamwm
from amp.broker.store import Store
class Exchange:
"""
exchange of broker
"""
def __init__(self, name: str) -> None:
self.name = name
self.topic_manager = Store(Topic)
def bind_topic(self, topic_name: str):
self.topic_manager.get(topic_name)
EManager = Store(Exchange)
class Topic:
"""
topic of exchange
"""
def __init__(self, name: str) -> None:
self.name = name
| 2.359375 | 2 |
ImageCaptioning/model.py | darkmatter18/Caption-AI | 1 | 12786857 | # Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torchvision.models as models
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#device = torch.device("cpu")
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1, drop=0.5):
super(DecoderRNN, self).__init__()
# Set the hidden size for init_hidden
self.hidden_size = hidden_size
self.num_layers = num_layers
# Set the device
self.device = device
# Embedded layer
self.embed = nn.Embedding(vocab_size, embed_size)
# LSTM layer
self.lstm = nn.LSTM(input_size=embed_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first= True,
dropout = drop)
# Dropout Layer
self.drop = nn.Dropout(p=drop)
# Fully Connected layer
self.fc = nn.Linear(hidden_size, vocab_size)
def init_hidden(self, batch_size):
return (torch.zeros(self.num_layers, batch_size, self.hidden_size, device = device),
torch.zeros(self.num_layers, batch_size, self.hidden_size, device = device))
def forward(self, features, hidden):
# LSTM
lstm_out, hidden = self.lstm(features, hidden)
# Functional component
out = self.fc(lstm_out)
out = out.squeeze(1)
out = out.argmax(dim=1)
features = self.embed(out.unsqueeze(0))
# # Embedding the captions
# embedded = self.embed(captions)
# # print(embedded.shape)
# # print(features.unsqueeze(1).shape)
# # print(embedded.shape)
# embedded = torch.cat((features.unsqueeze(1), embedded), dim=1)
# # LSTM
# lstm_out, hidden = self.lstm(features, hidden)
# # Functional component
# out = self.fc(lstm_out)
return out, features, hidden
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
# Initialize the hidden state
hidden = self.init_hidden(inputs.shape[0])# features is of shape (batch_size, embed_size)
out_list = list()
word_len = 0
with torch.no_grad():
while word_len < max_len:
lstm_out, hidden = self.lstm(inputs, hidden)
out = self.fc(lstm_out)
#print(out.shape)
out = out.squeeze(1)
out = out.argmax(dim=1)
out_list.append(out.item())
inputs = self.embed(out.unsqueeze(0))
word_len += 1
if out == 1:
break
return out_list | 2.15625 | 2 |
tests/__init__.py | grikbi/coreapi-async-worker | 0 | 12786858 | """The unit test code for f8a-server-backbone."""
| 0.910156 | 1 |
PU_Bayesian_classifiers/PSTAN.py | chengning-zhang/Bayesian-Classifers-for-PU_learning | 4 | 12786859 | class PSTAN(Bayes_net_PU):
name = "PSTAN"
def __init__(self, alpha = 1,starting_node = 0):
self.starting_node = starting_node
self.alpha = alpha
def Findparent(self, M):
M = M.copy() # to avoid change global M
np.fill_diagonal(M,0)
p = int(M.shape[0])
V = range(p) # set of all nodes
st = self.starting_node
Vnew = [st] # vertex that already found their parent. intitiate it with starting node. TAN randomly choose one
parent = {st:None} # use a dict to show nodes' interdepedency
while set(Vnew) != set(V): # when their are still nodes whose parents are unknown.
index_i = [] # after for loop, has same length as Vnew, shows the closest node that not in Vnew with Vnew.
max_inf = [] # corresponding distance
for i in range(len(Vnew)): # can be paralelled
vnew = Vnew[i]
ListToSorted = [e for e in M[:,vnew]] # does not need int(e)
index = sorted(range(len(ListToSorted)),key = lambda k: ListToSorted[k],reverse = True)
index_i.append([ele for ele in index if ele not in Vnew][0])
max_inf.append(M[index_i[-1],vnew])
index1 = sorted(range(len(max_inf)),key = lambda k: max_inf[k],reverse = True)[0] ## relative position, Vnew[v1,v2] index_i[v4,v5] max_inf[s1,s2] index1 is the position in those 3 list
Vnew.append(index_i[index1]) # add in that node
parent[index_i[index1]] = Vnew[index1] # add direction, it has to be that the new added node is child, otherwise some nodes has 2 parents which is wrong.
return parent
def fit(self,X_L, X_u, pri, M, case_control = True): # this is based on trainning data !!!
X_L = check_array(X_L)
X_u = check_array(X_u)
if X_L.shape[1] != X_u.shape[1]:
raise ValueError('labeled data and unlabeled data have different number of features ')
n_L,p = X_L.shape
# n_u,p = X_u.shape
if case_control:
X_U_or_UL = X_u
else:
X_U_or_UL = np.concatenate((X_L,X_u),axis = 0)
#
n_U_or_UL = X_U_or_UL.shape[0]
parent = self.Findparent(M)
# part 1: proba that can be estimated from labeled examples. 1 P(xij|1,xkl), 2 p(x_root|1) = N_L(x_root)/N_L, P(xij|1,xkl) = N_L(xi=j,xk=l)/N_L(xkl)
# part 2: learn from U, N_U(xij,xkl), and N_U(xkl)
# part 3: p(xij|0,xkl),p(x_root|0) from previous list
#
List_prob_1 = {} # 1 P(xij|1,xkl), 2 p(x_root|1)
List_count_1 = {} # N_L(xij,xpal) and N_L(xij)
#
List_count_U_or_UL = {} # N_U(xij,xkl) and N_U(xij)
#
List_prob_0 = {} # p(xij|0,xkl),p(x_root|0)
K = {}
# for root node
root_i = self.starting_node
x_i_L = X_L[:,root_i]
x_i_L_counter = Counter(x_i_L)
x_i_U_or_UL = X_U_or_UL[:,root_i]
x_i_U_or_UL_counter = Counter(x_i_U_or_UL)
x_i_values = list(set(x_i_L_counter.keys()).union(x_i_U_or_UL_counter.keys()))
K[root_i] = len(list(x_i_values))
# part 1
x_i_L_prob = {key: (x_i_L_counter[key]+self.alpha)/(K[root_i]*self.alpha + n_L ) for key in x_i_values}
List_prob_1[root_i] = x_i_L_prob
List_count_1[root_i] = x_i_L_counter
# part 2
List_count_U_or_UL[root_i] = x_i_U_or_UL_counter
# part 3
x_i_0_prob = {key: max([0,x_i_U_or_UL_counter[key] - x_i_L_prob[key] * pri * n_U_or_UL]) for key in x_i_values} # N_U(xi =j) - N_u*p(xij, y =1) = N_U(xij,y=0) numeritor, can be negative, make it >=0
x_i_0_prob = {key:(self.alpha + value)/ (K[root_i]*self.alpha + n_U_or_UL * (1-pri) ) for key,value in x_i_0_prob.items()} # add psudo count and divied by dem
x_i_0_prob = {key: value/(sum(np.array(list(x_i_0_prob.values())))) for key,value in x_i_0_prob.items() } # normalize prob sum to 1, however, due to computation problem, it is not sum to 1
List_prob_0[root_i] = x_i_0_prob
#
for i in [e for e in range(0,p) if e != root_i]:
x_i_values = list(set(X_L[:,i]).union(X_U_or_UL[:,i]))
x_i_parent_Value = list(set(X_L[:,parent[i]]).union(X_U_or_UL[:,parent[i] ] ) )
K[i] = len(x_i_values)
# part 1, P(xij|1,xkl) = N_L(xi=j,xk=l)/N_L(xkl)
List_count_1[i] = {v2: {v1:X_L[(X_L[:,i] == v1) & (X_L[:,parent[i]] == v2)].shape[0] for v1 in x_i_values} for v2 in x_i_parent_Value} # {pva1: {'1': , '2':, '3': }, pval2:{}}
List_prob_1[i] = {v2: {v1:(X_L[(X_L[:,i] == v1) & (X_L[:,parent[i]] == v2)].shape[0] + self.alpha)/ (X_L[(X_L[:,parent[i]] == v2)].shape[0] + self.alpha*K[i]) for v1 in x_i_values} for v2 in x_i_parent_Value}
# part 2
List_count_U_or_UL[i] = {v2: {v1:X_U_or_UL[(X_U_or_UL[:,i] == v1) & (X_U_or_UL[:,parent[i]] == v2)].shape[0] for v1 in x_i_values} for v2 in x_i_parent_Value}
# part 3
x_i_0_prob = {v2: {v1: List_count_U_or_UL[i][v2][v1] - List_prob_1[i][v2][v1]*pri* sum(list(List_count_U_or_UL[i][v2].values())) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1: max([0,x_i_0_prob[v2][v1] ]) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1:(x_i_0_prob[v2][v1] + self.alpha)/(self.alpha*K[i] + (1-pri)*sum(list(List_count_U_or_UL[i][v2].values())) ) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1:x_i_0_prob[v2][v1]/sum(list(x_i_0_prob[v2].values())) for v1 in x_i_values} for v2 in x_i_parent_Value} # normalize
List_prob_0[i] = x_i_0_prob
self.case_control_ = case_control
self.is_fitted_ = True
self.parent_ = parent
self.n_features_, self.K_, self.List_count_1_,self.List_prob_1_, self.List_count_U_, self.List_prob_0_, self.prevalence_ = p, K, List_count_1,List_prob_1,List_count_U_or_UL,List_prob_0, pri
return self
def predict_proba(self,X):
check_is_fitted(self)
X = check_array(X)
Prob_1 = []
root_i = self.starting_node
for ins in X:
P1 = self.prevalence_
P0 = 1 - P1
# root_i
P1 = P1 * (self.List_prob_1_[root_i][ins[root_i]])
P0 = P0 * (self.List_prob_0_[root_i][ins[root_i]])
for i in [e for e in range(0,self.n_features_) if e != root_i]:
pValue = ins[self.parent_[i]]
P1 = P1 * (self.List_prob_1_[i][pValue][ins[i]])
P0 = P0 * (self.List_prob_0_[i][pValue][ins[i]])
P = P1 + P0
P1 = P1/P; P0 = P0/P
Prob_1.append(P1)
#
Prob_1 = np.array(Prob_1)
return Prob_1
| 2.65625 | 3 |
learn/tests/tests_services/tests_signal.py | Aigrefin/py3learn | 0 | 12786860 | <gh_stars>0
import smtplib
from unittest.mock import call, Mock, MagicMock, patch
from django.contrib.auth.models import User
from django.test import TestCase
from learn.models import Translation, Dictionary
from learn.services import signals
class SignalsTests(TestCase):
def test_shouldSendMailContaining_DictionaryLanguage_KnownWord_WordToLearn(self):
# Given
false_send_mail = Mock()
false_objects = MagicMock()
false_objects.filter.return_value = [User(email='<EMAIL>',is_staff=False,is_superuser=False)]
false_users_db = MagicMock()
false_users_db.objects = false_objects
dictionary = Dictionary(language='Vietnamese')
translation = Translation(known_word='Bien mangé, plein', word_to_learn='No', dictionary=dictionary)
# When
signals.send_mail_on_new_word(Translation, instance=translation, send=false_send_mail,
user_objects=false_users_db)
# Then
args_list = false_send_mail.call_args_list[0][0]
self.assertEqual(args_list[0], 'New word : Bien mangé, plein')
self.assertEqual(args_list[1], 'Hi !\n\n'
'A new word has been added to the Vietnamese dictionary.\n\n'
'Known word : Bien mangé, plein'
'\nWord to learn : No\n\n'
'Seen you soon !')
self.assertEqual(args_list[2], ['<EMAIL>'])
@patch("logging.getLogger")
def test_shouldLogAndNotFail_whenCannotLoginToSMTP(self, get_logger_mock):
# Given
false_logger = Mock()
get_logger_mock.return_value = false_logger
false_send_mail = Mock()
false_send_mail.side_effect = smtplib.SMTPAuthenticationError(534, b'5.7.14 <https://accounts.google.com/signin/continue> Please log in via your web browser and\n5.7.14 then try again.\n5.7.14 Learn more at\n5.7.14 https://support.google.com/mail/answer/78754 - gsmtp');
false_objects = MagicMock()
false_objects.filter.return_value = []
false_users_db = MagicMock()
false_users_db.objects = false_objects
# When
signals.send_mail_on_new_word(Translation, instance=Mock(), send=false_send_mail,
user_objects=false_users_db)
# Then
self.assertEqual(get_logger_mock.call_args_list[0], call('learn.services.signals'))
self.assertEqual(false_logger.exception.call_args_list[0], call('smtp login failed'))
| 2.515625 | 3 |
Greedy/045. Jump Game II.py | beckswu/Leetcode | 138 | 12786861 |
class Solution:
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n<2: return 0
step, reach, next = 0, 0, 0
for i, v in enumerate(nums):
if i == reach:
reach = max(next, i+v)
step += 1
if reach >= n-1: break
next = nums[reach] + reach
else:
next = max(next, i+v)
return step
class Solution:
def jump(self, nums):
n = len(nums)
step, end, next = 0, 0, 0
for i, v in enumerate(nums[:-1]):
next = max(next, i + v)
if i == end:
step += 1
end = next
return step
class Solution:
# @param {integer[]} nums
# @return {integer}
def jump(self, nums):
n, start, end, step = len(nums), 0, 0, 0
while end < n - 1:
step += 1
maxend = end + 1
for i in range(start, end + 1):
if i + nums[i] >= n - 1:
return step
maxend = max(maxend, i + nums[i])
start, end = end + 1, maxend
return step
class Solution:
# @param {integer[]} nums
# @return {integer}
def jump(self, nums):
n, cur_max, next_max, steps = len(nums), 0, 0, 0
for i in range(n):
if i>cur_max:
steps+=1
cur_max=next_max
if cur_max>=n:break
next_max=max(next_max,nums[i]+i)
return steps
class Solution:
def jump(self, nums: List[int]) -> int:
if len(nums) <= 1: return 0
l, r = 0, nums[0]
times = 1
while r < len(nums) - 1:
times += 1
nxt = max(i + nums[i] for i in range(l, r + 1))
l, r = r, nxt
return times | 3.390625 | 3 |
tests/sampling/test_sampler.py | leopoldavezac/BayesianMMM | 18 | 12786862 | from _pytest.mark import param
import pytest
import numpy as np
from bayesian_mmm.sampling.stan_model_generator import StanModelGenerator
from bayesian_mmm.sampling.sampler import Sampler
from bayesian_mmm.sampling.stan_model_wrapper import StanModelWrapper
MAX_LAG = 4
SPENDS = np.array([[10, 20], [0, 8], [1, 30], [5, 40]])
LAGGED_SPENDS = np.array([
[[10, 0, 0, 0], [20, 0, 0, 0]],
[[ 0, 10, 0, 0], [ 8, 20, 0, 0]],
[[ 1, 0, 10, 0], [30, 8, 20, 0]],
[[ 5, 1, 0, 10], [40, 30, 8, 20]]
])
CTRL_VARS = np.array([
[2, 4],
[5, 2],
[6, 4],
[7, 2]
])
REVENUE = np.array([1, 2, 3, 4])
N = 4
NUM_MEDIA = 2
NUM_CTRL = 2
STAN_MODEL = StanModelWrapper
@pytest.mark.parametrize(
"ctrl_vars", [CTRL_VARS, None]
)
def test_create_sampler_input(ctrl_vars):
if type(ctrl_vars) == np.ndarray:
expected_args = {
"N":N,
"Y":REVENUE,
"max_lag":MAX_LAG,
"num_media":NUM_MEDIA,
"X_media":LAGGED_SPENDS,
"num_ctrl":NUM_CTRL,
"X_ctrl":CTRL_VARS
}
else:
expected_args = {
"N":N,
"Y":REVENUE,
"max_lag":MAX_LAG,
"num_media":NUM_MEDIA,
"X_media":LAGGED_SPENDS
}
sampler = Sampler(STAN_MODEL, MAX_LAG)
sampler.create_stan_input(
SPENDS, ctrl_vars, REVENUE
)
obtained_args = sampler._Sampler__args
expected_args_keys = list(expected_args.keys())
expected_args_keys.sort()
obtained_args_keys = list(obtained_args.keys())
obtained_args_keys.sort()
assert obtained_args_keys == expected_args_keys
for key, val in expected_args.items():
if type(val) == np.ndarray:
assert (val == obtained_args[key]).all()
else:
assert val == obtained_args[key]
# slow to run (stan compilation + sampling)
@pytest.mark.parametrize(
"carryover_transfo_nm,diminushing_returns_transfo_nm,with_ctrl_vars",
[
("adstock","hill",True),
("adstock","hill",False),
("adstock","reach",True),
("adstock","reach",False),
("geo_decay","hill",True),
("geo_decay","hill",False),
("geo_decay","reach",True),
("geo_decay","reach",False)
]
)
def test_run_sampling(
carryover_transfo_nm,
diminushing_returns_transfo_nm,
with_ctrl_vars
):
CARRYOVER_TRANSFO_NM_TO_PARAM_NM = {
"geo_decay":["retain_rate"],
"adstock":["retain_rate", "delay"]
}
DIMINUSHING_RETURNS_TRANSFO_NM_TO_PARAM_NM = {
"hill":["ec", "slope"],
"reach":["half_saturation"]
}
WITH_CTRL_VARS_TO_PARAM_NM = {
True:["gamma_ctrl"],
False:[]
}
stan_model_generator = StanModelGenerator(
carryover_transfo_nm,
diminushing_returns_transfo_nm,
with_ctrl_vars
)
stan_model_generator.create_model()
stan_model = stan_model_generator.get_model()
sampler = Sampler(stan_model, MAX_LAG)
if with_ctrl_vars:
ctrl_vars = CTRL_VARS
else:
ctrl_vars = None
sampler.create_stan_input(
SPENDS,
ctrl_vars,
REVENUE
)
obtained_results = sampler.run_sampling(100, 3)
expected_param_nms = (
CARRYOVER_TRANSFO_NM_TO_PARAM_NM[carryover_transfo_nm]
+ DIMINUSHING_RETURNS_TRANSFO_NM_TO_PARAM_NM[diminushing_returns_transfo_nm]
+ WITH_CTRL_VARS_TO_PARAM_NM[with_ctrl_vars]
+ ["beta_medias", "tau"]
)
expected_param_nms.sort()
obtained_params_nms = list(obtained_results.keys())
obtained_params_nms.sort()
assert expected_param_nms == obtained_params_nms
for param_nm, values in obtained_results.items():
if param_nm != "tau":
assert values.shape == (100,2)
else:
assert values.shape == (100,)
| 1.953125 | 2 |
examples/eventTester.py | tgolsson/appJar | 666 | 12786863 | <filename>examples/eventTester.py
import sys
sys.path.append("../")
from appJar import gui
def press(btn):
print("default:", btn)
if btn == "writing":
app.setTextArea("t1", "some writing")
elif btn == "writing2":
app.setTextArea("t2", "some writing")
elif btn == "get":
print(app.getTextArea("t1"))
elif btn == "get2":
print(app.getTextArea("t2"))
elif btn == "log":
app.logTextArea("t1")
elif btn == "log2":
app.logTextArea("t2")
elif btn == "check":
print(app.textAreaChanged("t1"))
elif btn == "check2":
print(app.textAreaChanged("t2"))
def sub(btn): print("submit ", btn)
def chng(btn):
print("change ", btn)
if btn in ["t1", "t2"]: print(app.getTextArea(btn))
app=gui("Event Tester")
app.addLabel("l1", "click me", 0, 0)
app.setLabelChangeFunction("l1", press)
app.addLabel("l2", "click me", 0, 1)
app.setLabelSubmitFunction("l2", press)
app.addEntry("e1", 1, 0, 2)
app.setEntrySubmitFunction("e1", sub)
app.setEntryChangeFunction("e1", chng)
app.addTextArea("t1", 2, 0)
app.setTextAreaSubmitFunction("t1", sub)
app.setTextAreaChangeFunction("t1", chng)
app.addScrolledTextArea("t2", 2, 1)
app.setTextAreaSubmitFunction("t2", sub)
app.setTextAreaChangeFunction("t2", chng)
app.addButton("writing", press, 3, 0)
app.addButton("writing2", press, 3, 1)
app.addButton("get", press, 4, 0)
app.addButton("get2", press, 4, 1)
app.addButton("log", press, 5, 0)
app.addButton("log2", press, 5, 1)
app.addButton("check", press, 6, 0)
app.addButton("check2", press, 6, 1)
app.go()
| 2.71875 | 3 |
doxieautomator/doxie.py | ninapavlich/doxie-automator | 4 | 12786864 | import os
import sys
import time
import json
import cStringIO
import logging
import requests
from requests.auth import HTTPBasicAuth
from PIL import Image
from base import SingleInstance
import settings
class DoxieAutomator(SingleInstance):
scanner_online = False
DELETE_ON_CORRUPTED = True #If true, delete a file that has an IO error. This happens when the file on the doxie is corrupted.
LOCK_PATH = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), "DoxieAutomator-lock")
_observers = []
def initialize(self):
self.log(u"Looking for Doxie on %s"%(settings.DOXIE_SERVER))
self._observers = []
self.loop()
def loop(self):
files = self._get_latest_images()
status = self._prepare_and_store_images(files)
def bind_to(self, callback):
self._observers.append(callback)
def _get_all_scans_url(self):
return u'%s/scans.json'%(settings.DOXIE_SERVER)
def _get_latest_images(self):
try:
if settings.DOXIE_USERNAME and settings.DOXIE_PASSWORD:
r = requests.get(self._get_all_scans_url(), auth=(settings.DOXIE_USERNAME, settings.DOXIE_PASSWORD))
else:
r = requests.get(self._get_all_scans_url())
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
self.log(u'Timeout trying to connect to %s'%(self._get_all_scans_url()))
return []
except requests.exceptions.TooManyRedirects:
# Tell the user their URL was bad and try a different one
self.log(u'Too many redirect when trying to connect to %s'%(self._get_all_scans_url()))
return []
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
self.log(u'Error when trying to connect to %s: %s'%(self._get_all_scans_url(), str(e)))
return []
try:
scans_json = json.loads( r.text )
if self.scanner_online == False:
self.log(u"Doxie online")
self.scanner_online = True
if len(scans_json) > 0:
self.log(u"Detected %s new scans"%(len(scans_json)))
except ValueError, e:
scans_json = None
if self.scanner_online == True:
self.log("Doxie offline")
self.scanner_online = False
if scans_json:
return [ u'%s/scans%s'%(settings.DOXIE_SERVER, scan["name"]) for scan in scans_json]
return []
def _prepare_and_store_images(self, files):
counter = 1
for file in files:
filename = self._process_filename(file, 'pdf', counter, len(files))
image = self._retrieve_image(file)
retrieve_successful = False
try:
retrieve_successful, local_filename = self._store_file(filename, image)
except IOError as e:
self.log(u"I/O error({0}) on {1}: {2}".format(e.errno, filename, e.strerror))
if retrieve_successful == True or DoxieAutomator.DELETE_ON_CORRUPTED:
self._delete_original(file)
else:
self.log(u"Skipping deleting file %s since retrieval was not successful"%(filename))
if retrieve_successful:
for callback in self._observers:
callback(local_filename)
counter += 1
def _retrieve_image(self, url):
self.log('Retrieving %s from Doxie'%(url))
if settings.DOXIE_USERNAME and settings.DOXIE_PASSWORD:
r = requests.get(url, auth=(settings.DOXIE_USERNAME, settings.DOXIE_PASSWORD), stream=True)
else:
r = requests.get(url, stream=True)
r.raw.decode_content = True
#If the image file on Doxie is .PDF, PIL doesn't process it properly.
#A workaround is to dump the http request content into a cStriongIO,
#the advantage is that it can be reused without having to make
#another requests to Doxie. This way if errors are thrown we can try
#again with a different method.
csi = cStringIO.StringIO()
csi.write(r.raw.read())
csi.seek(0)#rewind
try:
im = Image.open(csi)
except IOError:
self.log('%s not a .jpg file. Probably a .pdf file.'%(url))
csi.seek(0)#rewind
return csi
return im
def _process_filename(self, filename, filetype, counter, total):
timestr = time.strftime("%Y-%m-%d_%H-%M-%S")
if total > 1:
return u'%s-%s.%s'%(timestr, counter, filetype)
return u'%s.%s'%(timestr, filetype)
def _store_file(self, filename, image):
timestr = time.strftime("%Y-%m-%d")
doxie_file_folder = u'%s/%s'%(settings.DOXIE_FOLDER, timestr)
if not os.path.exists(doxie_file_folder):
os.makedirs(doxie_file_folder)
image_path = u'%s/%s'%(doxie_file_folder, filename)
self.log('Saving new scan to %s'%(image_path))
# At this point image is either a PIL.Image, or just a raw
# IO object
try:
image.convert('RGB').save(image_path, "PDF", Quality = 100)
except AttributeError:
image.seek(0)#rewind
with open(image_path,'w') as destination:
destination.write(image.read())
return (True, image_path)
def _delete_original(self, original):
self.log('Clearing %s from Doxie.'%(original))
r = requests.delete(original)
| 2.46875 | 2 |
isi_mip/core/migrations/0009_auto_20180221_1017.py | ISI-MIP/isimip | 4 | 12786865 | <filename>isi_mip/core/migrations/0009_auto_20180221_1017.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-21 09:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20180215_1138'),
]
operations = [
migrations.AlterField(
model_name='confirmdatapublication',
name='body',
field=models.TextField(default='Dear {model_contact_person},\n\nwe have completed the basic quality check for the {simulation_round} simulation data you uploaded recently. You can find the quality-checked data, which is now visible to all ISIMIP participants, in the relevant directory on the DKRZ server:\n/work/bb0820/ISIMIP/ISIMIP2b/OutputData/{sector}/{impact_model}/… The data cover the experiments:\n\n{custom_text}\n\nWe are now ready to copy this data to the ISIMIP node of the ESGF server at esg.pik-potsdam.de, as stated in the terms of use by following the instructions on the data confirmation page:\n\n{data_confirm_page}\n\nBest wishes from the ISIMIP management team.\n\n', help_text='You can use the following tokens in the email template: {model_contact_person}, {simulation_round}, {sector}. {impact_model}'),
),
migrations.AlterField(
model_name='confirmdatapublication',
name='subject',
field=models.CharField(default='[ISIMIP] Data confirmation request', help_text='Invitation subject', max_length=500),
),
]
| 1.625 | 2 |
src/model/pretrained_wordvec.py | Sakchhi/yelp_analysis | 2 | 12786866 | <filename>src/model/pretrained_wordvec.py<gh_stars>1-10
import config, run_config
import os
import pickle
import csv
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import spacy
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
nlp = spacy.load('en')
def get_vectors(row):
doc = nlp(row)
full_w2v = np.array([w.vector for w in doc])
return pd.Series(full_w2v.mean(axis=0))
def get_predictions(train_data, train_labels, test_data):
log_reg_model = LogisticRegression()
log_reg_model.fit(train_data, train_labels)
predictions = log_reg_model.predict(test_data)
pickle_file_name = os.path.join(config.MODEL_DIR, 'classifier_model/{}_logreg_pretrained_spacy96_v{}.pickle'.format(
run_config.model_date_to_read, run_config.model_version_to_read))
pickle.dump(log_reg_model, open(pickle_file_name, 'wb'))
return predictions
if __name__ == '__main__':
df_raw = pd.read_csv(os.path.join(config.CLEANED_REVIEWS_ROOT,
"20200125_yelp_restaurant_reviews_cleaned_gr1000_10k_v1.3.csv")) # .format(
# run_config.model_date_to_read, run_config.model_version_to_read)))
df_raw.full_text_cleaned_text.fillna('', inplace=True)
print(df_raw.columns.tolist())
df_feature = pd.DataFrame(df_raw.full_text_cleaned_text.values, columns=['text'])
df_feature['label_rating'] = df_raw.stars_x.apply(lambda r: int(r > 3))
print(df_feature.label_rating.value_counts())
df_wordvec = df_raw.full_text_cleaned_text.apply(lambda r: get_vectors(r))
print(df_wordvec.shape)
X_train, X_val, y_train, y_val = train_test_split(df_wordvec, df_feature.label_rating, test_size=0.2,
random_state=42)
y_pred = get_predictions(X_train, y_train, X_val)
accuracy_score = metrics.accuracy_score(y_val, y_pred)
print(accuracy_score)
cm = metrics.confusion_matrix(y_val, y_pred)
tp, fn, fp, tn = cm[0][0], cm[0][1], cm[1][0], cm[1][1]
fpr = fp / (fp + tn)
print("FPR = {}".format(fpr))
print("TPR = {}".format(tp / (tp + fn)))
f1 = metrics.f1_score(y_val, y_pred)
print("F1 Score = {}".format(f1))
columns = ['Run', 'Accuracy', 'FPR', 'F1 Score', 'Preprocessing', 'Feature', 'Model', 'Notes']
preprocessing_notes = "NO STEMMER, wordninja, Custom stopwords"
feature_notes = "Pretrained Spacy Word2Vec -- 96 avg"
model_notes = "Logistic Regression"
misc_notes = ""
fields = [run_config.model_version_to_write, accuracy_score, fpr, f1,
preprocessing_notes, feature_notes, model_notes, misc_notes]
with open(os.path.join(config.LOGS_DIR, r'results_summary.csv'), 'a', newline='') as f:
writer = csv.writer(f)
writer.writerow(fields)
df_predictions = pd.DataFrame({"Predictions": y_pred, "Labels": y_val}, index=df_raw.loc[X_val.index]['review_id'])
df_predictions.to_excel(os.path.join(config.OUTPUTS_DIR,
'{}_LogisticRegression_pretrained_spacy96_v{}.xlsx'.format(
run_config.model_date_to_write,
run_config.model_version_to_write)))
| 2.625 | 3 |
third_party/NW Script Tools/Miscellaneous Tools/NW Go To Frame.py | n1ckfg/C4DToolbox | 9 | 12786867 | <filename>third_party/NW Script Tools/Miscellaneous Tools/NW Go To Frame.py
import c4d # reference Cinema4D's existing library of code, called a "module"
def main(): # Define the main function of the script
# - - - - - - COPY THE FOLLOWING INTO YOUR SCRIPT, ABOVE THE MAIN FUNCTION - - - - - - -
def gotoframe(GoToFrame): # Define a function called "gotoframe", which will act as a container for the script to be implemented in other scripts.
FPS = doc[c4d.DOCUMENT_FPS] # Define FPS to look at the current document's fps setting
Time = c4d.BaseTime(GoToFrame,FPS) # Define Time to find the new frame location based on the combination of the current frame, the number of frames to advance, and the fps setting of the document
doc.SetTime(Time) # Move the playhead to the newly referenced location in the timeline
c4d.EventAdd() # Refresh the scene to update the change
# - - - - - - COPY THE ABOVE INTO YOUR SCRIPT, ABOVE THE MAIN FUNCTION - - - - - - - -
# Once you have this copied into your script, you can simply call up the function and insert your value in the parentheses.
# Example: gotoframe(20) will move your playhead to frame 20 if the above is in place. You don't have to copy/paste the whole thing each time you want to use it that way.
if __name__=='__main__': # These two lines close out the main function. This is usually what will be used to end your script.
main() | 3.28125 | 3 |
bmsapp/__init__.py | alanmitchell/bmon | 11 | 12786868 | <reponame>alanmitchell/bmon
from . import logging_setup # causes logging setup code to run.
| 0.984375 | 1 |
sqlint/parser/__init__.py | moriaki3193/sqlint | 1 | 12786869 | <filename>sqlint/parser/__init__.py
# -*- coding: utf-8 -*-
from sqlint.parser import config
from sqlint.parser import parser
from sqlint.parser import pattern
from sqlint.parser import token
__all__ = [
'config',
'parser',
'pattern',
'token'
]
| 1.742188 | 2 |
mdcorpus/examples/parse_character.py | sosuke-k/cornel-movie-dialogs-corpus-storm | 1 | 12786870 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from storm.locals import *
from mdcorpus.orm import *
from mdcorpus.parser import *
parser = Parser()
f = open("sample_movie_characters_metadata.txt")
line = f.readline()
while line:
print "=== parse this line ==="
print line
list = parser.movie_characters_metadata(line)
character = MovieCharactersMetadata(list[0], list[1], list[-2], list[-1])
print character.name.encode("utf-8") + " (" + character.gender() + ") appears in '" + list[-3] + "'"
print ""
line = f.readline()
f.close
| 2.859375 | 3 |
one_fm/patches/v1_0/change_title_in_grd.py | askmetoo/One-FM | 16 | 12786871 | from __future__ import unicode_literals
import frappe
from frappe.desk.doctype.notification_log.notification_log import enqueue_create_notification,\
get_title, get_title_html
def execute():
set_title_wp_as_civil_id()
set_title_mi_as_civil_id()
set_title_moi_as_civil_id()
set_title_fp_as_civil_id()
def set_title_wp_as_civil_id():
for doc in frappe.get_all('Work Permit'):
wp_doc = frappe.get_doc('Work Permit',doc.name)
wp_doc.title = wp_doc.civil_id
print(doc.name)
print(wp_doc.title)
print("===========")
def set_title_mi_as_civil_id():
for doc in frappe.get_all('Medical Insurance'):
mi_doc = frappe.get_doc('Medical Insurance',doc.name)
mi_doc.title = mi_doc.civil_id
print(doc.name)
print(mi_doc.title)
print("===========")
def set_title_moi_as_civil_id():
for doc in frappe.get_all('MOI Residency Jawazat'):
moi_doc = frappe.get_doc('MOI Residency Jawazat',doc.name)
moi_doc.title = moi_doc.one_fm_civil_id
print(doc.name)
print(moi_doc.title)
print("===========")
def set_title_fp_as_civil_id():
for doc in frappe.get_all('Fingerprint Appointment'):
fp_doc = frappe.get_doc('Fingerprint Appointment',doc.name)
fp_doc.title = fp_doc.civil_id
print(doc.name)
print(fp_doc.title)
print("===========") | 1.890625 | 2 |
personal/Ervin/tf_collaborative_user.py | edervishaj/spotify-recsys-challenge | 3 | 12786872 | <reponame>edervishaj/spotify-recsys-challenge<filename>personal/Ervin/tf_collaborative_user.py
"""
@author <NAME>
@email <EMAIL>
"""
from recommenders.recommender import Recommender
from recommenders.similarity.s_plus import *
from utils.datareader import Datareader
from utils.evaluator import Evaluator
from utils.post_processing import eurm_to_recommendation_list
import time
import numpy as np
from scipy import sparse as sps
from sklearn.preprocessing import normalize
class TF_collaborative_user(Recommender):
def __init__(self):
super()
def compute_model(self, knn=100, power=1.0, verbose=False, save_model=False, target_items=None):
if verbose:
print("[ Creating model with user TF-IDF similarity ]")
start_time = time.time()
# Calculate DF[t] & IDF[t]
dft = self.urm.sum(axis=0).A1
idft = np.log(self.urm.shape[0] / (dft + 1e-8))
# Multiply each listened track with its respective idf
URM_enhanced = self.urm.multiply(idft).tocsr()
# Get the user similarity matrix
self.model = dot_product(URM_enhanced, self.urm.T, k=knn, verbose=verbose, target_items=target_items)
self.model = self.model.tolil()
self.model.setdiag(np.zeros(self.model.shape[0]))
self.model = self.model.tocsr()
self.model.eliminate_zeros()
self.model.data = np.power(self.model.data, power)
if save_model:
if verbose:
print('[ Saving the model ]')
sps.save_npz('tf_idf_user_sim_'+str(knn), self.model)
if verbose:
print("time: " + str(int(time.time() - start_time) / 60))
return self.model
def compute_rating(self, top_k=500, verbose=False, small=False):
if small:
self.model = sps.csr_matrix(self.model.tocsr()[self.pid])
self.urm = sps.csr_matrix(self.urm)
self.model = sps.csr_matrix(self.model)
if verbose:
print("[ Compute ratings ]")
start_time = time.time()
# Normalize the original URM to get cv for each track listened by the users
user_pen = normalize(self.urm, axis=1, norm='l1')
# Calculate DF[t] & IDF[t]
dft = self.urm.sum(axis=0).A1
idft = np.log(self.urm.shape[0] / (dft + 1e-8))
# Multiply each listened track with its respective idf
URM_enhanced = self.urm.multiply(idft).tocsr()
# Computer the eURM
self.eurm = dot_product(self.model, user_pen, k=top_k, verbose=verbose, target_items=self.pid)
if verbose:
print("time: " + str(int(time.time() - start_time) / 60))
return self.eurm
if __name__ == '__main__':
dr = Datareader(verbose=True, mode='offline', only_load=True)
urm = dr.get_urm(binary=True)
pid = dr.get_test_pids()
ev = Evaluator(dr)
topk = 750
configs = [
{'cat': 10, 'knn': 100, 'power': 2.4},
{'cat': 9, 'knn': 200, 'power': 0.4},
{'cat': 8, 'knn': 100, 'power': 2},
{'cat': 7, 'knn': 300, 'power': 1},
{'cat': 6, 'knn': 300, 'power': 2},
{'cat': 5, 'knn': 500, 'power': 2.4},
{'cat': 4, 'knn': 300, 'power': 1.8},
{'cat': 3, 'knn': 200, 'power': 2.2},
{'cat': 2, 'knn': 500, 'power': 1}
]
eurm = sp.csr_matrix(urm.shape)
rec = TF_collaborative_user()
for c in configs:
pid = dr.get_test_pids(cat=c['cat'])
rec.fit(urm, pid)
rec.compute_model(verbose=True, knn=c['knn'], save_model=False, power=c['power'], target_items=pid)
rec.compute_rating(top_k=topk, verbose=True, small=False)
eurm = eurm + rec.eurm
del rec.eurm
del rec.model
pids = dr.get_test_pids()
eurm = eurm[pids]
ev.evaluate(recommendation_list=eurm_to_recommendation_list(eurm, datareader=dr, remove_seed=True),
name="tfidf_collaborative_user", old_mode=False) | 2.421875 | 2 |
tenable_light.py | andrewspearson/tenable_light | 1 | 12786873 | <reponame>andrewspearson/tenable_light<filename>tenable_light.py<gh_stars>1-10
import configparser
import urllib.request
from urllib.error import HTTPError
import ssl
import json
config = configparser.ConfigParser()
config.read('tenable.ini')
def request(method, host, endpoint, url=None, headers=None, data=None, proxy=None, verify=True):
# url should only be used by the Downloads class
if url is None:
request_ = urllib.request.Request('https://' + host + endpoint)
else:
request_ = urllib.request.Request(url)
request_.method = method
request_.add_header('accept', 'application/json')
request_.add_header('content-type', 'application/json')
context = ''
if headers:
for key, value in headers.items():
request_.add_header(key, value)
if data:
request_.data = json.dumps(data).encode()
if proxy:
request_.set_proxy(proxy, 'https')
if verify is False:
# https://www.python.org/dev/peps/pep-0476
context = ssl._create_unverified_context()
try:
response = urllib.request.urlopen(request_, context=context)
return response
except HTTPError as error:
print('\nERROR: HTTP ' + str(error.code))
print(error.reason)
def auth_error(msg='ERROR: Invalid authentication data'):
print(msg)
quit()
class Downloads:
def __init__(self, bearer_token=None, proxy=None, verify=True):
# Set connection data in order of preference
self.host = 'www.tenable.com'
self.bearer_token = bearer_token
self.proxy = proxy
self.verify = verify
if self.bearer_token:
pass
elif config.has_option('downloads', 'bearer_token'):
self.bearer_token = config.get('downloads', 'bearer_token')
if config.has_option('downloads', 'proxy'):
self.proxy = config.get('downloads', 'proxy')
else:
self.proxy = None
if config.has_option('downloads', 'verify'):
self.verify = config.getboolean('downloads', 'verify')
else:
self.verify = True
else:
auth_error()
# Create authentication headers
self.headers = {
"Host": "www.tenable.com",
"User-agent": "Mozilla/5.0",
"Authorization": "Bearer " + self.bearer_token
}
def request(self, url):
# url is used for Downloads in order to easily work with the files_index_url, and file_url values
response = request('GET', None, None, url, self.headers, None, self.proxy, self.verify)
return response
class TenableIO:
def __init__(self, access_key=None, secret_key=None, username=None, password=<PASSWORD>,
proxy=None, verify=True):
# Set connection data in order of preference
self.host = 'cloud.tenable.com'
self.access_key = access_key
self.secret_key = secret_key
self.username = username
self.password = password
self.proxy = proxy
self.verify = verify
if self.access_key and self.secret_key:
pass
elif self.username and self.password:
pass
elif config.has_option('tenable_io', 'access_key') and config.has_option('tenable_io', 'secret_key'):
self.access_key = config.get('tenable_io', 'access_key')
self.secret_key = config.get('tenable_io', 'secret_key')
if config.has_option('tenable_io', 'proxy'):
self.proxy = config.get('tenable_io', 'proxy')
else:
self.proxy = None
if config.has_option('tenable_io', 'verify'):
self.verify = config.getboolean('tenable_io', 'verify')
else:
self.verify = True
else:
auth_error()
# Create authentication headers
if self.access_key and self.secret_key:
self.headers = {"x-apikeys": "accessKey=" + self.access_key + ';secretKey=' + self.secret_key}
else:
auth = self._login()
self.headers = {"x-cookie": "token=" + auth['token']}
def request(self, method, endpoint, data=None):
response = request(method, self.host, endpoint, None, self.headers, data, self.proxy, self.verify)
return response
def _login(self):
response = request('POST', self.host, '/session', data={"username": self.username, "password": self.password},
proxy=self.proxy, verify=self.verify)
return json.load(response)
def logout(self):
response = self.request('DELETE', '/session')
return response
class TenableSC:
def __init__(self, host=None, access_key=None, secret_key=None, username=None, password=<PASSWORD>,
proxy=None, verify=True):
# Set connection data in order of preference
self.host = host
self.access_key = access_key
self.secret_key = secret_key
self.username = username
self.password = password
self.proxy = proxy
self.verify = verify
if self.host and self.access_key and self.secret_key:
pass
elif self.host and self.username and self.password:
pass
elif (config.has_option('tenable_sc', 'host') and config.has_option('tenable_sc', 'access_key')
and config.has_option('tenable_sc', 'secret_key')):
self.host = config.get('tenable_sc', 'host')
self.access_key = config.get('tenable_sc', 'access_key')
self.secret_key = config.get('tenable_sc', 'secret_key')
if config.has_option('tenable_sc', 'proxy'):
self.proxy = config.get('tenable_sc', 'proxy')
else:
self.proxy = None
if config.has_option('tenable_sc', 'verify'):
self.verify = config.getboolean('tenable_sc', 'verify')
else:
self.verify = True
else:
auth_error()
# Create authentication headers
if self.access_key and self.secret_key:
self.headers = {"x-apikey": "accesskey=" + self.access_key + "; secretkey=" + self.secret_key}
else:
auth = self._login()
self.headers = {"X-SecurityCenter": auth['token'], 'Cookie': auth['cookie']}
def request(self, method, endpoint, data=None):
endpoint = '/rest' + endpoint
response = request(method, self.host, endpoint, None, self.headers, data, self.proxy, self.verify)
return response
def _login(self):
response = request('GET', self.host, '/rest/system', proxy=self.proxy, verify=self.verify)
cookie = response.headers['Set-Cookie'].split(';', 1)[0]
response = request('POST', self.host, '/rest/token', headers={"Cookie": cookie},
data={"username": self.username, "password": <PASSWORD>},
proxy=self.proxy, verify=self.verify)
token = json.load(response)['response']['token']
cookie = response.headers['Set-Cookie'].split(';', 1)[0]
return {'token': token, 'cookie': cookie}
def logout(self):
response = self.request('DELETE', '/token')
return response
| 2.625 | 3 |
app/db/models/bigintkey.py | sp0x/orion | 0 | 12786874 | <filename>app/db/models/bigintkey.py
import peewee as pw
class BigIntPrimaryKey(pw.PrimaryKeyField):
field_type = 'BIGAUTO'
| 1.851563 | 2 |
experiments.py | kafluette/ffnnet | 4 | 12786875 | <reponame>kafluette/ffnnet<gh_stars>1-10
import theano
import theano.tensor as T
k = T.iscalar("k")
A = T.vector("A")
y = T.ivector("y")
# Symbolic description of the result
result, updates = theano.scan(fn=lambda n: (1-y[n]) * T.log(T.nnet.sigmoid(1)),sequences=[T.arange(10)])
# We only care about A**k, but scan has provided us with A**1 through A**k.
# Discard the values that we don't care about. Scan is smart enough to
# notice this and not waste memory saving them.
final_result = result.sum()
# compiled function that returns A**k
power = theano.function(inputs=[y], outputs=final_result, updates=updates)
print power(range(10))
print power(range(10))
| 2.375 | 2 |
Matlab/PLNet/python_tensorflow/plnet_tf_demo.py | disc5/DyraLib | 0 | 12786876 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
PLNet for fixed sized input rankings.
Version: M fixed but generic
Idea: Working with M networks in parallel that permanently remain in memory.
Testbed - label ranking / housing dataset
@author: dirk
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import dypy as dp
import dypy.tf_plnet_utils as plnetutils
#%%
import os
os.chdir('.')
#%%
X,R = dp.io.read_xxl_file_as_matrices('data/housing_dense_tr.txt')
#%%
Orderings = dp.utils.convert_rankingmat_to_orderingmat(R).tolist()
M = len(Orderings[0])
Y = np.eye(M, dtype=np.int32).tolist()
#%%
Z = dp.utils.create_contextualized_concat_orderings(X.tolist(), Y, Orderings)
#%%
input_dim = len(Z[0][0])
#%%
tf.set_random_seed(1)
#%%
# Parameters
learn_rate = 0.01
n_epochs = 10
# Network parameters
num_input_neurons = input_dim
num_hidden_neurons = 5
# Network input
net_inputs = []
for i0 in range(M):
net_inputs.append(tf.placeholder(tf.float32, shape = (num_input_neurons,1), name='input_net'+str(i0+1)))
#%% Helper functions
def sigma(x):
return tf.div(tf.constant(1.0),
tf.add(tf.constant(1.0), tf.exp(tf.negative(x))))
def sigmaprime(x):
return tf.multiply(sigma(x), tf.subtract(tf.constant(1.0), sigma(x)))
#%%
# Create graph procedure
def create_graph(inp, name="fc"):
with tf.name_scope(name):
weights_h1 = tf.Variable(tf.truncated_normal([num_input_neurons, num_hidden_neurons], seed=1), name="w1", trainable=True)
biases_b1 = tf.Variable(tf.truncated_normal([1, num_hidden_neurons], seed=2), name="bias1", trainable=True)
weights_out = tf.Variable(tf.truncated_normal([num_hidden_neurons, 1], seed=3), name="w2", trainable=True)
tf.summary.histogram("weights_h1", weights_h1)
tf.summary.histogram("biases_b1", biases_b1)
tf.summary.histogram("weights_out", weights_out)
z1 = tf.add(tf.matmul(inp, weights_h1, transpose_a=True), biases_b1, name="z1_element")
a1 = tf.sigmoid(z1, name='a1_element')
z2 = tf.matmul(a1, weights_out)
#z2 = tf.add(tf.matmul(a1, weights_out), biases_b2)
u = tf.identity(z2, name="output") # this corresponds to utility u
return (u, [a1,z1])
#%%
net_outputs = []
net_elements = []
for i0 in range(M):
u, inner_parts = create_graph(net_inputs[i0], 'net'+str(i0+1))
net_outputs.append(u)
net_elements.append(inner_parts)
#%% Network ranking losses
net_ranking_losses = []
for i0 in range(M):
net_ranking_losses.append(plnetutils.tf_calculate_derivative((i0+1), net_outputs))
#%% Trainable network variables
trainable_network_vars = []
for i0 in range(M):
trainable_network_vars.append(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='net'+str(i0+1)))
#%% get_gradients func
def get_gradients(delta, tvs, inner_parts, input_element, name):
''' Preliminary custom gradients function.
It calculates gradients for a single-hidden layer feedforward neural
network SLFN only.
It expects the tvs to be the sequence (w1, b1, w2).
'''
#g=tf.get_default_graph()
#g.get_operations()[41].name
# todo filter operations to recieve a1 ...
a1 = inner_parts[0]
z1 = inner_parts[1]
w2 = tvs[2]
with tf.name_scope(name):
d_w_2 = tf.transpose(tf.matmul(delta, a1), name="dw2")
#d_b_2 = tf.constant(0.0)
sp = sigmaprime(z1)
delta2 = tf.multiply(tf.transpose(tf.multiply(w2,delta)),sp)
d_b_1 = delta2
d_w_1 = tf.transpose(tf.matmul(tf.transpose(delta2), tf.transpose(input_element)), name="dw1")
return [d_w_1, d_b_1, d_w_2]
#%% Net gradients (v2: with grad_ys info)
#net_gradients_orig = []
#for i0 in range(M):
# net_gradients_orig.append(tf.gradients(net_ranking_losses[i0], trainable_network_vars[i0], name = 'grad'+str(i0+1)))
#%%
net_gradients = []
for i0 in range(M):
net_gradients.append(get_gradients(net_ranking_losses[i0], trainable_network_vars[i0], net_elements[i0], net_inputs[i0], 'net'+str(i0+1)))
#%% Accumulation operation v2
acc_ops = []
num_layers = len(net_gradients[0])
for i0 in range(num_layers):
layer_list = []
for i1 in range(M):
layer_list.append(net_gradients[i1][i0])
acc_ops.append(tf.add_n(layer_list, name='acc_weights_layer'+str(i0+1)))
#%% Accumulation operation
#net_gradients_accumulation = net_gradients[0]
#for i1 in range(1,M):
# net_gradients_accumulation = net_gradients_accumulation + net_gradients[i1]
#%% Optimization operations
opt = tf.train.GradientDescentOptimizer(learn_rate)
training_ops = []
for i0 in range(M):
training_ops.append(opt.apply_gradients(zip(acc_ops, trainable_network_vars[i0])))
#%% Begin Session
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("/tmp/plnet_tf/2")
writer.add_graph(sess.graph)
#%% Training
N_tr = len(Z)
for epoch in range(n_epochs):
RP = np.random.permutation(N_tr)
for i1 in range(N_tr):
ct_observation = Z[RP[i1]]
current_dict = {}
for i0 in range(M):
current_dict[net_inputs[i0].name] = np.asarray(ct_observation[i0]).reshape((input_dim,1))
sess.run([net_gradients, training_ops], feed_dict=current_dict)
nll_data = plnetutils.get_NLL_dataset(sess, net_inputs[0], net_outputs[0], Z)
print("Epoch %i: NLL(tr) : %3.4f" %(epoch,nll_data))
#%% Close Session
sess.close()
| 2.46875 | 2 |
etreebrowser/sparql.py | CameronJRAllan/eTree-Browser | 1 | 12786877 | <gh_stars>1-10
from SPARQLWrapper import SPARQLWrapper, JSON, POSTDIRECTLY
import datetime
import urllib
from PyQt5 import QtWidgets
import calma
import graph
class SPARQL():
def __init__(self):
"""
Initializes an instance of the SPARQL class.
The SPARQL class is used for all interfacing with the SPARQL end-point provided from <NAME>'s research and work.
"""
self.sparql = SPARQLWrapper("https://etree.linkedmusic.org/sparql")
self.sparql.setReturnFormat(JSON)
self.sparql.setMethod("POST")
def get_calma_reference_release(self, releaseName):
queryString = """
PREFIX etree:<http://etree.linkedmusic.org/vocab/>
PREFIX mo:<http://purl.org/ontology/mo/>
PREFIX event:<http://purl.org/NET/c4dm/event.owl#>
PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX calma: <http://calma.linkedmusic.org/vocab/>
SELECT DISTINCT ?label ?calma {{
?perf event:hasSubEvent ?tracklist.
?tracklist skos:prefLabel ?label.
?tracklist etree:number ?num.
?perf rdf:type mo:Performance.
?perf skos:prefLabel "{0}".
?tracklist calma:data ?calma.
}} ORDER BY ?num
""".format(releaseName)
self.sparql.setQuery(queryString)
return self.sparql.query().convert()
def get_release_properties(self, releaseName):
"""
Retrieves the properties of a given release.
Parameters
----------
releaseName : string
Name of the release.
Returns
-------
properties : dict
The properties found.
"""
try:
queryGetURI = """
SELECT * {{
?s ?p "{0}".
}}
""".format(releaseName)
self.sparql.setQuery(queryGetURI)
queryResults = self.sparql.query().convert()
queryGetProperties = """
SELECT * {{
<{0}> ?p ?o.
}}
""".format(str(queryResults['results']['bindings'][0]['s']['value']))
self.sparql.setQuery(queryGetProperties)
return self.sparql.query().convert()
except urllib.error.URLError as e:
return e
def get_release_subproperties(self, subject):
"""
Retrieves the sub-properties of a given release.
Parameters
----------
subject : string
The release for which we want to retrieve the sub-properties of.
Returns
-------
results : dictionary
A JSON dictionary of the properties returned.
"""
queryGetProperties = """
SELECT * {{
<{0}> ?p ?o.
}}
""".format(str(subject))
self.sparql.setQuery(queryGetProperties)
try:
return self.sparql.query().convert()
except Exception as e:
print(e)
pass
def get_tracklist(self, label):
"""
Retrieves a track-list for a given recording.
Parameters
----------
label : string
The label of the released used to identify the tracks which belong to it.
Returns
-------
results : dict
A JSON representation of the results returned by the end-point.
"""
queryString = """
PREFIX etree:<http://etree.linkedmusic.org/vocab/>
PREFIX mo:<http://purl.org/ontology/mo/>
PREFIX event:<http://purl.org/NET/c4dm/event.owl#>
PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT DISTINCT ?audio ?label ?num ?tracklist ?name {{
?perf event:hasSubEvent ?tracklist.
?tracklist skos:prefLabel ?label.
?tracklist etree:number ?num.
?tracklist etree:audio ?audio.
?perf rdf:type mo:Performance.
?perf skos:prefLabel "{0}".
?perf mo:performer ?performer.
?performer foaf:name ?name.
}} GROUP BY ?label ?audio ?num ORDER BY ?num
""".format(label)
self.sparql.setQuery(queryString)
return self.sparql.query().convert()
def get_tracklist_grouped(self, label):
queryString = """
PREFIX etree:<http://etree.linkedmusic.org/vocab/>
PREFIX mo:<http://purl.org/ontology/mo/>
PREFIX event:<http://purl.org/NET/c4dm/event.owl#>
PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX calma: <http://calma.linkedmusic.org/vocab/>
SELECT DISTINCT (group_concat(distinct ?audio; separator = "\\n") AS ?audio) (group_concat(distinct ?calma; separator = "\\n") AS
?calma) ?label ?num ?tracklist {{
?perf event:hasSubEvent ?tracklist.
?tracklist skos:prefLabel ?label.
?tracklist etree:number ?num.
?tracklist etree:audio ?audio.
?perf rdf:type mo:Performance.
?perf skos:prefLabel "{0}".
OPTIONAL {{?tracklist calma:data ?calma}}.
}} ORDER BY ?num
""".format(label)
self.sparql.setQuery(queryString)
return self.sparql.query().convert()
def get_artist_releases(self, filterField, filterStr, sparqlField, sparqlTriple):
"""
Retrieves all the releases by a particular artist.
Parameters
----------
filterField : string
The filter field.
filterStr : string
The filter string.
sparqlField : string
If a custom field is required (e.g genre), this is the field value required.
sparqlTriple : string
If a custom field is required (e.g genre), this is the triple required.
Returns
-------
results : dictionary
A JSON representation of the results returned by the end-point.
"""
sparql = SPARQLWrapper("http://etree.linkedmusic.org/sparql")
sparql.setReturnFormat(JSON)
queryString = """
PREFIX etree:<http://etree.linkedmusic.org/vocab/>
PREFIX mo:<http://purl.org/ontology/mo/>
PREFIX event:<http://purl.org/NET/c4dm/event.owl#>
PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT DISTINCT ?performer ?name ?prefLabel ?place ?date {0} WHERE
{{
?art skos:prefLabel ?prefLabel.
?art event:place ?location.
?location etree:location ?place.
?performer foaf:name ?name.
?art etree:date ?date.
?art mo:performer ?performer.
?art event:hasSubEvent ?tracklist.
{1}
""".format(sparqlField, sparqlTriple)
# If we have multiple filters (hence type LIST)
if type(filterStr) == list:
# Add prefix
queryString +='\nFILTER('
# Add each filter statement
for item in filterStr:
queryString += """?{0}="{1}" ||\n""".format(filterField, item.strip())
# Add suffix to be syntactically correct
queryString = queryString[:-3]
queryString += ')'
# If we have a singlular filter (hence type STRING)
else:
if len(filterField) > 0:
queryString += """FILTER(?{0}="{1}")""".format(filterField, filterStr)
# Add ending line of query
queryString += "\n} GROUP BY (?name)"
# Set and run query
self.sparql.setQuery(queryString)
return self.sparql.query().convert()
def execute_string(self, queryString):
"""
Executes a string representing a SPARQL query.
Having a general purpose "execute whatever this query is" is quite useful.
Parameters
----------
queryString : string
The SPARQL query string to be executed.
Returns
-------
results : dictionary
A JSON representation of the results returned by the end-point.
"""
self.sparql.setReturnFormat(JSON)
try:
self.sparql.setQuery(queryString)
return self.sparql.query().convert()
except Exception as e:
return e
def date_range(self, start, end):
"""
Creates a filter for a given range of dates.
SPARQL supports filtering, and this may be used to provide more specific results for a given date range.
Parameters
----------
start : string
The start date.
end : string
The end date.
Returns
-------
dateRange : string
A structured string that is inserted into the query to provide date filtering.
"""
# Normalize dates
startDate = datetime.datetime.strptime(start, "%d-%m-%Y").date()
endDate = datetime.datetime.strptime(end, "%d-%m-%Y").date()
delta = endDate - startDate
# If there are days to be filtered
if (delta.days > 0 and delta.days < 10000):
dateRange = 'FILTER ('
# Calculate date difference between start + end
delta = endDate - startDate
# If there are days to be filtered
if (delta.days > 0):
# Generate filter string
for i in range(delta.days + 1):
dateRange = dateRange + """ str(?date) = '""" + str(
startDate + datetime.timedelta(days=i)) + """' \n ||"""
# Add suffix
dateRange = dateRange[:-2]
dateRange = dateRange + ')'
return dateRange
else:
return ''
def perform_search(self, dateFrom, dateTo, artists, genres, locations, limit, trackName, countries, customSearchString, venue, orderBy, onlyCalma):
"""
Executes a basic search on the SPARQL end-point.
Parameters
----------
dateFrom : string
Start date for our date range filter.
dateTo : string
End date for our date range filter.
artists : string
A list of artists, comma seperated.
genres : string
A list of genres, comma seperated.
locations : string
A list of locations, comma seperated.
lineage : string
A list of lineage steps, comma seperated.
limit : int
Number of results to be returned.
distinct : boolean
Represents whether or not results should be distinct (i.e. no duplicates).
fields : string
A list of fields, comma seperated.
Returns
-------
q : string
A string which may be executed as a SPARQL query.
"""
artists = artists.split(',')
genres = genres.split(',')
artists = [a.strip() for a in artists]
genres = [g.strip() for g in genres]
fields = " ?label ?performer ?description ?location ?place ?date ?genre"
whereString = """
?art skos:prefLabel ?label.
?art mo:performer ?performer.
?art etree:description ?description.
?performer foaf:name ?name.
?art event:place ?location.
?art etree:date ?date.
?location etree:location ?place.
?art event:hasSubEvent ?subEvent
OPTIONAL {?performer etree:mbTag ?genre}.
"""
if onlyCalma:
whereString += "\n ?subEvent calma:data ?calma."
else:
whereString += "\n OPTIONAL {?subEvent calma:data ?calma}."
# if customConditionType == 'AND':
if isinstance(customSearchString, list):
customSearchString = "\n".join(item for item in customSearchString)
# elif customConditionType == 'OR':
# customSearchString = "\n ||".join(item for item in customSearchString)
# else:
# customSearchString = ''
# print(customSearchString)
# Calculate date filters
dateString = self.date_range(dateFrom, dateTo)
# If limit is 0
if (limit == 0):
limit = ''
# If custom limit entered
else:
limit = 'LIMIT ' + str(limit)
# Generate filters for artists, genres, locations
artistString = self.text_parse(artists, 1)
genreString = self.text_parse(genres, 2)
locationString = self.text_parse(locations, 3)
trackString = self.text_parse(trackName, 4)
venueString = self.text_parse(venue, 5)
countriesString = self.text_parse(countries, 3)
orderByString = self.parse_order_by(orderBy)
# Add extra triples if required
#if len(genreString) > 2:
#fields += ' ?genre'
#whereString += '?performer etree:mbTag ?genre.'
if len(trackString) > 2:
fields += ' ?trackname'
whereString += '?track skos:prefLabel ?trackname.'
if len(venueString) > 2:
fields += ' ?venue'
whereString += '?location etree:name ?venue.'
q = """
PREFIX etree:<http://etree.linkedmusic.org/vocab/>
PREFIX mo:<http://purl.org/ontology/mo/>
PREFIX event:<http://purl.org/NET/c4dm/event.owl#>
PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX calma: <http://calma.linkedmusic.org/vocab/>
SELECT DISTINCT ?label ?name ?place ?location ?date (group_concat(distinct ?calma; separator = "\\n") AS ?calma) WHERE {{
{0}
{1}
{2}
{3}
{4}
{5}
{6}
{7}
{8}
}}
{9}
{10}
""".format(whereString, artistString, genreString, locationString, venueString, dateString, trackString, customSearchString,
countriesString, orderByString, limit)
print(q)
return q
def get_venue_information(self, label):
"""
Retrieves venue information for a particular release.
Parameters
----------
label : string
The release for which we want to retrieve the venue info of.
Returns
-------
resultsDict : dict
A dictionary of a mixture of the GeoNames and LastFM data
"""
self.sparql.setQuery("""
PREFIX etree:<http://etree.linkedmusic.org/vocab/>
PREFIX mo:<http://purl.org/ontology/mo/>
PREFIX event:<http://purl.org/NET/c4dm/event.owl#>
PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX sim: <http://purl.org/ontology/similarity/>
SELECT DISTINCT ?place ?location ?obj WHERE {{
?art skos:prefLabel "{0}".
?art event:place ?location.
?location sim:subjectOf ?external.
?external sim:object ?obj.
?location etree:location ?place.
}} GROUP BY ?place ?location ?obj
""".format(label))
results = self.sparql.query().convert()
resultsDict = {'geoname' : None,
'lastfm' : None}
for result in results['results']['bindings']:
if 'geoname' in result['obj']['value']:
resultsDict['geoname'] = result['obj']['value']
elif 'last.fm' in result['obj']['value']:
resultsDict['lastfm'] = result['obj']['value']
return resultsDict
def parse_order_by(self, orderBy):
"""
Generates a filter string for ordering by a give field.
Parameters
----------
orderBy : str
The input field.
Returns
-------
filterString : string
An ORDER-BY string relative to the input field.
"""
translate = {'Artist' : '?name',
'Label' : '?label',
'Date' : '?date',
'Genre' : '?genre',
'Location' : '?place'
}
return "ORDER BY {0}".format(translate[orderBy])
def text_parse(self, inputList, intType):
"""
Generates filter string for a given list, and type.
A complex query may have several seperate filters applied, in which case it makes sense to move this into
it's own class. The type is used to identify the attribute being filtered.
Parameters
----------
inputList : string[]
A list of filter conditions
type : int
The attribute to be filtered
Returns
-------
filterString : string
A appropriate filter string for the inputs
"""
# If no data to process, return
if inputList == None : return ''
# Determine correct field
if intType == 1:
fieldType = """?name=\""""
elif intType == 2:
fieldType = """?genre=\""""
elif intType == 3:
fieldType = """?place=\""""
elif intType == 4:
fieldType = """?trackname=\""""
elif intType == 5:
fieldType = """?venue=\""""
else:
raise ('No matching field type')
if isinstance(inputList, str):
inputList = [inputList]
# Join all possible filter clauses
temp = " ".join(str(x) for x in inputList)
if len(temp.rstrip()) < 3:
return ''
# If matches requirement for filter string
else:
# Create filter string
filterString = 'FILTER('
for entry in list(set(inputList)):
filterString = """ {0}{1}{2}" ||\n""".format(filterString, fieldType, entry.replace('"', '').replace("'",'').rstrip())
filterString = filterString[:-3].strip()
filterString += ')\n'
return filterString
def get_artist_from_tracklist(self, tracklistURL):
"""
Retrieves the artist from a particular track-list.
Parameters
----------
tracklistURL : string
A URL in the tracklist, for which we want to find the performer.
Returns
-------
artistName : str
The name of the artist found.
"""
name = self.execute_string("""
PREFIX etree:<http://etree.linkedmusic.org/vocab/>
PREFIX mo:<http://purl.org/ontology/mo/>
PREFIX event:<http://purl.org/NET/c4dm/event.owl#>
PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT DISTINCT ?name WHERE
{{
<{0}> mo:performer ?performer.
?performer foaf:name ?name.
}} LIMIT 1
""".format(tracklistURL))
return name['results']['bindings'][0]['name']['value']
def get_label_tracklist(self, eventurl):
label = self.execute_string("""
PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
PREFIX etree:<http://etree.linkedmusic.org/vocab/>
SELECT DISTINCT ?label WHERE
{{
<{0}> etree:isSubEventOf ?event.
?event skos:prefLabel ?label.
}} LIMIT 1
""".format(eventurl))
return label['results']['bindings'][0]['label']['value']
def get_audio_track(self, trackURI):
label = self.execute_string("""
PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
PREFIX etree:<http://etree.linkedmusic.org/vocab/>
SELECT DISTINCT ?url ?num ?label WHERE
{{
<{0}> etree:audio ?url.
<{0}> etree:number ?num.
<{0}> skos:prefLabel ?label.
}}
""".format(trackURI))
return label['results']['bindings'] | 2.609375 | 3 |
pymagnitude/third_party/allennlp/tests/data/token_indexers/dep_label_indexer_test.py | tpeng/magnitude | 1,520 | 12786878 | <gh_stars>1000+
# pylint: disable=no-self-use,invalid-name
from __future__ import absolute_import
from collections import defaultdict
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import DepLabelIndexer
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
class TestDepLabelIndexer(AllenNlpTestCase):
def setUp(self):
super(TestDepLabelIndexer, self).setUp()
self.tokenizer = SpacyWordSplitter(parse=True)
def test_count_vocab_items_uses_pos_tags(self):
tokens = self.tokenizer.split_words(u"This is a sentence.")
tokens = [Token(u"<S>")] + [t for t in tokens] + [Token(u"</S>")]
indexer = DepLabelIndexer()
counter = defaultdict(lambda: defaultdict(int))
for token in tokens:
indexer.count_vocab_items(token, counter)
assert counter[u"dep_labels"] == {u"ROOT": 1, u"nsubj": 1,
u"det": 1, u"NONE": 2, u"attr": 1, u"punct": 1}
def test_tokens_to_indices_uses_pos_tags(self):
tokens = self.tokenizer.split_words(u"This is a sentence.")
tokens = [t for t in tokens] + [Token(u"</S>")]
vocab = Vocabulary()
root_index = vocab.add_token_to_namespace(u'ROOT', namespace=u'dep_labels')
none_index = vocab.add_token_to_namespace(u'NONE', namespace=u'dep_labels')
indexer = DepLabelIndexer()
assert indexer.tokens_to_indices([tokens[1]], vocab, u"tokens1") == {u"tokens1": [root_index]}
assert indexer.tokens_to_indices([tokens[-1]], vocab, u"tokens-1") == {u"tokens-1": [none_index]}
def test_padding_functions(self):
indexer = DepLabelIndexer()
assert indexer.get_padding_token() == 0
assert indexer.get_padding_lengths(0) == {}
def test_as_array_produces_token_sequence(self):
indexer = DepLabelIndexer()
padded_tokens = indexer.pad_token_sequence({u'key': [1, 2, 3, 4, 5]}, {u'key': 10}, {})
assert padded_tokens == {u'key': [1, 2, 3, 4, 5, 0, 0, 0, 0, 0]}
| 2.453125 | 2 |
dragonhacks2019/config.py | jcarrete5/dragonhacks2019 | 0 | 12786879 | import json
class Config:
def __init__(self, file=None):
with open(file) as cfg_file:
self._cfg = json.load(cfg_file)
self._scopes = [scope for scope in self._cfg['scopes']]
self._scope_index = 0
self._current_scope: dict = self._scopes[0]
def next_scope(self) -> bool:
"""
Increments the current scope. Returns `True` if successful,
otherwise `False`.
"""
if self._scope_index + 1 >= len(self._scopes):
return False
self._scope_index += 1
self._current_scope = self._scopes[self._scope_index]
return True
def prev_scope(self) -> bool:
"""
Decrements the current scope. Returns `True` if successful,
otherwise `False`.
"""
if self._scope_index - 1 < 0:
return False
self._scope_index -= 1
self._current_scope = self._scopes[self._scope_index]
return True
def actions(self, phrase: str) -> list:
"""
Returns the actions to be executed when the `phrase` is said
or an empty list if the `phrase` isn't recognized.
"""
return self._current_scope.get(phrase, [])
def phrases(self) -> set:
"""
Return the possible phrases that can be said in the current
scope.
"""
return set(phrase for phrase in self._current_scope.keys())
def __repr__(self):
return str(self._scopes)
if __name__ == '__main__':
cfg = Config('test_format.json')
assert cfg.next_scope()
assert not cfg.next_scope()
assert cfg.prev_scope()
assert not cfg.prev_scope()
assert {'forward', 'back', 'next set'} == cfg.phrases()
assert ['right'] == cfg.actions('forward')
print("Passed")
| 3.21875 | 3 |
solutions/234.palindrome-linked-list/palindrome-linked-list.py | wangsongiam/leetcode | 3 | 12786880 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
1 2 3 4 5
1 2 3 4
| |
"""
slow = head
fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if fast and fast.next is None:
slow = slow.next
def reverse(prev, head):
if not head:
return prev
next = head.next
head.next = prev
return reverse(head, next)
ref = reverse(None, slow)
while ref:
if ref.val != head.val:
return False
ref = ref.next
head = head.next
return True
| 3.71875 | 4 |
request_examples/get_request.py | ParthSareen/Intro-to-APIs | 0 | 12786881 | import requests
import pprint
pp = pprint.PrettyPrinter(indent=4)
# Example for making a GET requests
link = 'http://localhost:5000/example-get-static'
response = requests.get(link)
responseDict = response.json()
pp.pprint(responseDict)
# access the dict
print(responseDict['num-example'])
# your name goes here
name = "name"
link2 = 'http://localhost:5000/example-get-dynamic?name={name}'.format(name=name)
response = requests.get(link2)
responseDict = response.json()
pp.pprint(responseDict)
| 3.203125 | 3 |
app/views.py | verowangari/pitch-post | 0 | 12786882 | from re import U
from flask import Blueprint, render_template,request,flash,redirect,url_for
from flask_login import login_required, current_user
from .models import Pitch,User,Comment,Like
from . import db
views = Blueprint("views", __name__)
@views.route("/")
@views.route("/home")
@login_required
def home():
pitches=Pitch.query.all()
return render_template("home.html", user=current_user,pitches=pitches)
@views.route("/create-pitch", methods=['GET', 'POST'])
@login_required
def create_pitch():
if request.method =="POST":
text = request.form.get('text')
if not text:
flash('This cannot be empty',category='error')
else:
pitch=Pitch(text=text,author=current_user.id)
db.session.add(pitch)
db.session.commit()
flash('Pitch Created!!',category='success')
return redirect(url_for('views.home'))
return render_template('create_pitch.html', user=current_user)
@views.route("/delete-pitch/<id>")
@login_required
def delete_pitch(id):
pitch = Pitch.query.filter_by(id=id).first()
if not pitch:
flash("Post does not exist.", category='error')
elif current_user.id != pitch.id:
flash('You do not have permission to delete this post.', category='error')
else:
db.session.delete(pitch)
db.session.commit()
flash('Pitch deleted.', category='success')
return redirect(url_for('views.home'))
@views.route("/pitches/<username>")
@login_required
def pitches(username):
user = User.query.filter_by(username=username).first()
if not user:
flash('No user with that username exists.', category='error')
return redirect(url_for('views.home'))
pitches=user.pitches
return render_template("pitch.html", user=current_user, pitches=pitches, username=username)
@views.route("/create-comment/<pitch_id>", methods=['POST'])
@login_required
def create_comment(pitch_id):
text = request.form.get('text')
if not text:
flash('Comment cannot be empty.', category='error')
else:
pitch = Pitch.query.filter_by(id=pitch_id)
if pitch:
comment = Comment(
text=text, author=current_user.id, pitch_id=pitch_id)
db.session.add(comment)
db.session.commit()
else:
flash('Post does not exist.', category='error')
return redirect(url_for('views.home'))
@views.route("/delete-comment/<comment_id>")
@login_required
def delete_comment(comment_id):
comment = Comment.query.filter_by(id=comment_id).first()
if not comment:
flash('Comment does not exist.', category='error')
elif current_user.id != comment.author and current_user.id != comment.pitch.author:
flash('You do not have permission to delete this comment.', category='error')
else:
db.session.delete(comment)
db.session.commit()
return redirect(url_for('views.home'))
| 2.5 | 2 |
torchlatent/semiring.py | speedcell4/torchlatent | 7 | 12786883 | <reponame>speedcell4/torchlatent<gh_stars>1-10
import torch
from torch import Tensor
from torch.types import Device
from torchrua.scatter import scatter_add, scatter_max, scatter_mul, scatter_logsumexp
from torchrua.tree_reduction import tree_reduce_sequence, TreeReduceIndices
from torchlatent.functional import logsumexp, logaddexp
__all__ = [
'Semiring',
'Std', 'Log', 'Max',
]
class Semiring(object):
zero: float
one: float
@classmethod
def eye_like(cls, tensor: Tensor, dtype: torch.dtype = None, device: Device = None) -> Tensor:
if dtype is None:
dtype = tensor.dtype
if device is None:
device = tensor.device
*_, n = tensor.size()
eye = torch.full((n, n), fill_value=cls.zero, dtype=dtype, device=device)
idx = torch.arange(n, dtype=torch.long, device=device)
eye[idx, idx] = cls.one
return eye
@classmethod
def add(cls, x: Tensor, y: Tensor) -> Tensor:
raise NotImplementedError
@classmethod
def mul(cls, x: Tensor, y: Tensor) -> Tensor:
raise NotImplementedError
@classmethod
def sum(cls, tensor: Tensor, dim: int, keepdim: bool = False) -> Tensor:
raise NotImplementedError
@classmethod
def prod(cls, tensor: Tensor, dim: int, keepdim: bool = False) -> Tensor:
raise NotImplementedError
@classmethod
def scatter_add(cls, tensor: Tensor, index: Tensor) -> Tensor:
raise NotImplementedError
@classmethod
def scatter_mul(cls, tensor: Tensor, index: Tensor) -> Tensor:
raise NotImplementedError
@classmethod
def bmm(cls, x: Tensor, y: Tensor) -> Tensor:
return cls.sum(cls.mul(x[..., :, :, None], y[..., None, :, :]), dim=-2, keepdim=False)
@classmethod
def reduce(cls, tensor: Tensor, indices: TreeReduceIndices) -> Tensor:
return tree_reduce_sequence(cls.bmm)(tensor=tensor, indices=indices)
class Std(Semiring):
zero = 0.
one = 1.
@classmethod
def add(cls, x: Tensor, y: Tensor) -> Tensor:
return x + y
@classmethod
def mul(cls, x: Tensor, y: Tensor) -> Tensor:
return x * y
@classmethod
def sum(cls, tensor: Tensor, dim: int, keepdim: bool = False) -> Tensor:
return torch.sum(tensor, dim=dim, keepdim=keepdim)
@classmethod
def prod(cls, tensor: Tensor, dim: int, keepdim: bool = False) -> Tensor:
return torch.prod(tensor, dim=dim, keepdim=keepdim)
@classmethod
def scatter_add(cls, tensor: Tensor, index: Tensor) -> Tensor:
return scatter_add(tensor=tensor, index=index)
@classmethod
def scatter_mul(cls, tensor: Tensor, index: Tensor) -> Tensor:
return scatter_mul(tensor=tensor, index=index)
class Log(Semiring):
zero = -float('inf')
one = 0.
@classmethod
def add(cls, x: Tensor, y: Tensor) -> Tensor:
return logaddexp(x, y)
@classmethod
def mul(cls, x: Tensor, y: Tensor) -> Tensor:
return x + y
@classmethod
def sum(cls, tensor: Tensor, dim: int, keepdim: bool = False) -> Tensor:
return logsumexp(tensor, dim=dim, keepdim=keepdim)
@classmethod
def prod(cls, tensor: Tensor, dim: int, keepdim: bool = False) -> Tensor:
return torch.sum(tensor, dim=dim, keepdim=keepdim)
@classmethod
def scatter_add(cls, tensor: Tensor, index: Tensor) -> Tensor:
return scatter_logsumexp(tensor=tensor, index=index)
@classmethod
def scatter_mul(cls, tensor: Tensor, index: Tensor) -> Tensor:
return scatter_add(tensor=tensor, index=index)
class Max(Semiring):
zero = -float('inf')
one = 0.
@classmethod
def add(cls, x: Tensor, y: Tensor) -> Tensor:
return torch.maximum(x, y)
@classmethod
def mul(cls, x: Tensor, y: Tensor) -> Tensor:
return x + y
@classmethod
def sum(cls, tensor: Tensor, dim: int, keepdim: bool = False) -> Tensor:
return torch.max(tensor, dim=dim, keepdim=keepdim).values
@classmethod
def prod(cls, tensor: Tensor, dim: int, keepdim: bool = False) -> Tensor:
return torch.sum(tensor, dim=dim, keepdim=keepdim)
@classmethod
def scatter_add(cls, tensor: Tensor, index: Tensor) -> Tensor:
return scatter_max(tensor=tensor, index=index)
@classmethod
def scatter_mul(cls, tensor: Tensor, index: Tensor) -> Tensor:
return scatter_add(tensor=tensor, index=index)
| 1.984375 | 2 |
train/coders/coder.py | mukkachaitanya/parity-models | 0 | 12786884 | <reponame>mukkachaitanya/parity-models<gh_stars>0
import torch
import torch.nn as nn
class Coder(nn.Module):
"""
Base class for implementing encoders and decoders. All new encoders and
decoders should derive from this class.
"""
def __init__(self, num_in, num_out, in_dim):
"""
Parameters
----------
num_in: int
Number of input units for a forward pass of the coder.
num_out: int
Number of output units from a forward pass of the coder.
in_dim: in_dim
Dimension of flattened inputs to the coder.
"""
super().__init__()
self.num_in = num_in
self.num_out = num_out
def forward(self, in_data):
"""
Parameters
----------
in_data: ``torch.autograd.Variable``
Input data for a forward pass of the coder.
"""
pass
class Decoder(Coder):
"""
Class for implementing decoders. All new decoders should derive from this
class.
"""
def __init__(self, num_in, num_out, in_dim):
super().__init__(num_in, num_out, in_dim)
def forward(self, in_data):
pass
def combine_labels(self, in_data):
"""
Parameters
----------
in_data: ``torch.autograd.Variable``
Input labels that are to be combined together.
Returns
-------
Combination over in_data that can be used directly for the label in
calculating loss for a parity model.
"""
pass
| 2.765625 | 3 |
servizi/spese/migrations/0002_auto_20161126_1756.py | l-dfa/django-spese | 0 | 12786885 | <filename>servizi/spese/migrations/0002_auto_20161126_1756.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-11-26 16:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('spese', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TransferFund',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name_plural': 'transfer funds',
'verbose_name': 'transfer fund',
},
),
migrations.AlterModelOptions(
name='percentdeduction',
options={'ordering': ['wc_type__name', '-valid_from'], 'verbose_name': 'percent deduction', 'verbose_name_plural': 'percent deductions'},
),
migrations.AlterField(
model_name='expense',
name='work_cost_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='work_cost_type', to='spese.WCType'),
),
migrations.AddField(
model_name='transferfund',
name='destination',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='destination', to='spese.Expense'),
),
migrations.AddField(
model_name='transferfund',
name='source',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='source', to='spese.Expense'),
),
]
| 1.453125 | 1 |
user_manage/views.py | FXuZ/colock-server | 0 | 12786886 | from django.shortcuts import render, render_to_response
from django import forms
from django.http import HttpResponse
from django.forms import ModelForm
from user_manage.models import User
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
import requests
from colock.key_generator import *
from colock.utils import hook
import json
import colock.Error
class RegisterForm(ModelForm):
class Meta:
model = User
fields = ['cid', 'phone_num', 'region_num', 'nickname', 'user_name', 'user_logo']
verify_code = forms.IntegerField()
class RegisterReturnForm(forms.Form):
uid = forms.IntegerField()
ukey = forms.CharField(max_length=32)
class MobSMS:
def __init__(self, appkey):
self.appkey = appkey
self.verify_url = 'https://api.sms.mob.com/sms/verify'
def verify_sms_code(self, zone, phone, code, debug=False):
if debug:
return 200
data = {'appkey': self.appkey, 'phone': phone, 'zone': zone, 'code': code}
req = requests.post(self.verify_url, data=data, verify=False)
if req.status_code == 200:
j = req.json()
return j
return json.dumps({'status': 500})
# this is not safe!!!
@csrf_exempt
def register(request):
def register_verify(user, vcode):
res = mobsms.verify_sms_code(user.region_num, user.phone_num, vcode)
if (json.loads(res))['status'] == 200:
return True
else:
return False
if request.method == "POST":
reg_form = RegisterForm(request.POST)
if reg_form.is_valid():
new_user = reg_form.save(commit=False)
new_user.reg_time = timezone.now()
new_user.ukey = user_key_gen(new_user.id, new_user.region_num, new_user.phone_num, new_user.reg_time)
new_user.phone_hash = phone_hash_gen(new_user.region_num, new_user.phone_num)
new_user.user_logo = request.FILES['user_logo']
verify_code = request.POST['verify_code']
if register_verify(new_user.region_num, new_user.phone_num, verify_code):
new_user.save()
return_value = {'uid': new_user.id, 'ukey': new_user.ukey}
# ensure_ascii=False to handle Chinese
return HttpResponse(json.dumps(return_value, ensure_ascii=False))
# success and created new user
else:
return HttpResponse('Authen Error', status=500)
else:
uf = RegisterForm()
return render_to_response('register.html', {'uf': uf})
mobsms = MobSMS("5fc5a301e100") ### add real keys here!!!
@hook("verify")
def verify(meta, data):
uid = meta['uid']
vcode = data['code']
user = User.objects.get(id=uid)
user.verify_code = vcode
user.verified = False
res = mobsms.verify_sms_code(user.region_num, user.phone_num, vcode)
if ( (json.loads(res))['status'] == 200 ):
user.verified = True
user.save()
return '', '', res
| 1.976563 | 2 |
accounts/i18n.py | j3ygh/ctdb | 1 | 12786887 | """
This file contains strings which need i18n but doesn't have a place in any files.
They maybe appear in DB only, so they can't be detected without being writed explicitly.
"""
from django.utils.translation import gettext_lazy as _
I18N_NEEDED = [
_('T00 member'),
_('T01 member'),
_('T02 member'),
_('T11 member'),
_('T12 member'),
_('T21 member'),
_('T22 member'),
_('T31 member'),
_('T32 member'),
_('T00 supervisor'),
_('T01 supervisor'),
_('T02 supervisor'),
_('T11 supervisor'),
_('T12 supervisor'),
_('T21 supervisor'),
_('T22 supervisor'),
_('T31 supervisor'),
_('T32 supervisor'),
]
| 1.992188 | 2 |
src/predict.py | Kosuke-Yamada/yans2021hackathon_teamd | 3 | 12786888 | #-*-coding:utf-8-*-
import os
import re
import json
import time
import glob
import random
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, AutoModel
from shiba import Shiba, CodepointTokenizer, get_pretrained_state_dict
from datasets import CharbertDataset, ShibaDataset, collate_fn
from models import CharbertForSequenceLabeling, ShibaForSequenceLabeling
from utils import epoch_time, decode_attr_bio, operate_bio, set_seed
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument("--input_plain_path", type=str)
parser.add_argument("--input_annotation_path", type=str)
parser.add_argument("--output_path", type=str)
parser.add_argument("--category", type=str)
parser.add_argument("--block", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--batch_size", type=int)
parser.add_argument("--cuda", type=int)
return parser.parse_args()
if __name__ == "__main__":
args = parse_arg()
INPUT_PLAIN_PATH = args.input_plain_path
INPUT_ANNOTATION_PATH = args.input_annotation_path
OUTPUT_PATH = args.output_path
CATEGORY = args.category
BLOCK = args.block
MODEL = args.model
BATCH_SIZE = args.batch_size
CUDA = args.cuda
OUTPUT_PATH = OUTPUT_PATH+CATEGORY.lower()+'_'+MODEL.lower()+'_'+BLOCK.lower()+'/'
with open(OUTPUT_PATH+'params.json', 'r') as f:
params = dict(json.load(f))
SEED = params['seed']
MAX_LENGTH = params['max_length']
set_seed(SEED)
device = torch.device("cuda:"+str(CUDA) if torch.cuda.is_available() else "cpu")
print('read annotation files')
df = pd.read_json(INPUT_ANNOTATION_PATH+CATEGORY+'_dist.json', orient='records', lines=True)
attr2idx = {attr:i for i, attr in enumerate(sorted(set(df['attribute'])))}
idx2attr = {v:k for k, v in attr2idx.items()}
bio2idx = {'B':0, 'I':1, 'O':2}
idx2bio = {v:k for k, v in bio2idx.items()}
page_id_list = [int(path.split('/')[-1][:-4]) for path in sorted(glob.glob(INPUT_PLAIN_PATH+CATEGORY+'/*'))]
print('read plain files')
pred_page2plain = {}
for page_id in page_id_list:
with open(INPUT_PLAIN_PATH+CATEGORY+'/'+str(page_id)+'.txt', 'r') as f:
pred_page2plain[page_id] = f.readlines()
print('load models')
if MODEL == 'charbert':
pretrained_model = 'cl-tohoku/bert-base-japanese-char-whole-word-masking'
tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
bert = AutoModel.from_pretrained(pretrained_model)
model = CharbertForSequenceLabeling(bert, attr_size=len(attr2idx), label_size=len(bio2idx))
else:
tokenizer = CodepointTokenizer()
shiba = Shiba()
shiba.load_state_dict(get_pretrained_state_dict())
model = ShibaForSequenceLabeling(shiba, attr_size=len(attr2idx), label_size=len(bio2idx))
model.load_state_dict(torch.load(OUTPUT_PATH+'best_model.pt'))
bar = tqdm(total=len(pred_page2plain))
result_list = []
for idx, page_id in enumerate(list(pred_page2plain.keys())):
page2plain = {page_id:pred_page2plain[page_id]}
if MODEL == 'charbert':
ds = CharbertDataset(page2plain, tokenizer, attr2idx, bio2idx, MAX_LENGTH, BLOCK, None)
else:
ds = ShibaDataset(page2plain, tokenizer, attr2idx, bio2idx, MAX_LENGTH, BLOCK, None)
dl = DataLoader(ds, batch_size=BATCH_SIZE, collate_fn=collate_fn)
_total_labels, _total_preds = torch.LongTensor(), torch.LongTensor()
for inputs, attention_masks, labels in dl:
with torch.no_grad():
model.to(device).eval()
output = model(inputs.to(device), attention_masks.to(device), labels.to(device))
probs = torch.stack(output[1]).transpose(0, 1).cpu()
preds = probs.argmax(axis=-1)
_total_labels = torch.cat([_total_labels, labels.transpose(0, 1).reshape(labels.shape[1], -1)], axis=1)
_total_preds = torch.cat([_total_preds, preds.transpose(0, 1).reshape(preds.shape[1], -1)], axis=1)
total_preds = _total_preds[(_total_labels != -1).nonzero(as_tuple=True)].reshape(_total_preds.shape[0], -1)
bio_preds = decode_attr_bio(total_preds.tolist(), idx2attr, idx2bio)
new_char_idx_dict = {page_dict['new_char_idx']:page_dict \
for page_dict in ds.df_new[page_id].to_dict('records')}
for attr_idx, bios in enumerate(bio_preds):
pre_bio = 'O'
result = {'page_id':page_id, 'title':ds.page2title[page_id], \
'attribute':idx2attr[attr_idx], 'text_offset':{}}
for idx, bio in enumerate(bios):
bio = bio.split('-')[0]
ope = operate_bio(pre_bio, bio)
if ope['insert'] == True:
result_list.append(result)
result = {'page_id':page_id, 'title':ds.page2title[page_id], \
'attribute':idx2attr[attr_idx], 'text_offset':{}}
if ope['start'] == True:
result['text_offset']['start'] = {
'line_id': new_char_idx_dict[idx]['line_id'],
'offset': new_char_idx_dict[idx]['offset']
}
result['text_offset']['text'] = new_char_idx_dict[idx]['char']
if ope['end'] == True:
result['text_offset']['end'] = {
'line_id': new_char_idx_dict[idx]['line_id'],
'offset': new_char_idx_dict[idx]['offset']+1
}
if ope['start'] == False:
result['text_offset']['text'] += new_char_idx_dict[idx]['char']
pre_bio = bio
if bio in ['B', 'I']:
result_list.append(result)
bar.update(1)
df_result = pd.DataFrame(result_list)
df_result.to_json(OUTPUT_PATH+'predict.json', orient='records', force_ascii=False, lines=True) | 1.875 | 2 |
test/transform/test_base.py | marrow/schema | 3 | 12786889 | <filename>test/transform/test_base.py
from io import StringIO
from marrow.schema.exc import Concern
from marrow.schema.testing import TransformTest
from marrow.schema.transform.base import BaseTransform, Transform, IngressTransform, EgressTransform, SplitTransform
PASSTHROUGH = (None, False, True, "", "Foo", 27, 42.0, [], {})
ST = SplitTransform(
reader = IngressTransform(ingress=int),
writer = EgressTransform(egress=str)
)
class TestForeignPassthrough(TransformTest):
transform = BaseTransform().foreign
valid = PASSTHROUGH
def test_loads_none(self):
assert BaseTransform().loads('') is None
def test_load(self):
assert BaseTransform().load(StringIO(str("bar"))) == "bar"
class TestNativePassthrough(TransformTest):
transform = BaseTransform().native
valid = PASSTHROUGH
def test_dumps_none(self):
assert BaseTransform().dumps(None) == ''
def test_dump(self):
fh = StringIO()
assert BaseTransform().dump(fh, "baz") == 3
assert fh.getvalue() == "baz"
class TestTransform(TransformTest):
transform = Transform().native
valid = PASSTHROUGH + ((' foo ', 'foo'), )
def test_decoding(self):
result = self.transform('Zoë'.encode('utf8'))
assert isinstance(result, str)
assert result == 'Zoë'
class TestIngress(TransformTest):
transform = IngressTransform(ingress=int).native
valid = (27, ("42", 42), (2.15, 2))
invalid = ('x', '', [], {})
direction = 'incoming'
def test_concern(self):
try:
self.transform('x')
except Concern as e:
assert self.direction in str(e)
assert 'invalid literal' in str(e)
class TestEgress(TestIngress):
transform = EgressTransform(egress=int).foreign
direction = 'outgoing'
class TestSplitTransform(object):
def test_construction(self):
try:
SplitTransform()
except Concern as e:
pass
else:
assert False, "Failed to raise a concern."
class TestSplitTransformReader(TransformTest):
transform = ST.native
valid = (('27', 27), (3.14159, 3), (0.5, 0))
invalid = TestIngress.invalid + (float('inf'), )
def test_loads_none(self):
assert ST.loads('') is None
def test_load(self):
assert ST.load(StringIO(str("42"))) == 42
class TestSplitTransformWriter(TransformTest):
transform = ST.foreign
valid = ((27, '27'), (42, '42'), (3.14, "3.14"))
def test_dumps_none(self):
assert ST.dumps(None) == ''
def test_dump(self):
fh = StringIO()
assert ST.dump(fh, 2.15) == 4
assert fh.getvalue() == "2.15"
| 2.15625 | 2 |
Outils/IHM/TRUST_PLOT2D/src/trust_plot2d/MenuTRUSTWidget.py | cea-trust-platform/trust-code | 12 | 12786890 | <reponame>cea-trust-platform/trust-code
from pyqtside.QtCore import Qt, QTimer, QDir, Slot
from pyqtside.QtWidgets import QMainWindow,QMenu, QFileDialog, QDockWidget
from pyqtside.uic import loadUiGen
from .utils import completeResPath
from .FileManager import FileManager
import os
import curveplot
class MenuTRUSTWidget(QDockWidget):
def __init__(self,fv):
QDockWidget.__init__( self)
loadUiGen(completeResPath("MenuTRUSTWidget.ui"), self)
# self._timer = QTimer()
# self.connect(self._timer, SIGNAL("timeout()"), self.onTimerTrigger)
# self._timer.start(self.refreshWidget.value()*1000)
# self._fileManager = FileManager()
self._showFileView = fv
def _getLongCaseName(self):
cutn=self._showFileView._caseName.split(".data")[0]
return os.path.join(self._showFileView._currDir,cutn)
def execute(self,cmd):
import subprocess
print("Starting ",cmd)
cmd=cmd.split()
subprocess.Popen(cmd)
@Slot()
def onTerminalButton(self):
""" Start the run
"""
case= self._getLongCaseName()
if case == "":
return
exterm=os.environ.get("Xterm")
if exterm == "":
exterm="xterm"
self.execute(exterm)
def EditFile(self,extension):
case= self._getLongCaseName()
if case == "":
return
case+=extension
# cmd="xemacs "+case
editor=os.environ.get("TRUST_EDITOR")
if editor == "":
editor="xemacs"
cmd=editor+" "+case
self.execute(cmd)
@Slot()
def onEditOutButton(self):
self.EditFile(".out")
@Slot()
def onEditErrButton(self):
self.EditFile(".err")
@Slot()
def onEditButton(self):
""" Start the run
"""
self.EditFile(".data")
@Slot()
def onVisuButton(self):
""" Start the run
"""
case= self._getLongCaseName()
if case == "":
return
case+=".lata"
cmd="visit -o "+case
#cmd="paraview "+case
self.execute(cmd)
| 2.015625 | 2 |
mapclientplugins/scaffoldfiniteelementmeshfitterstep/view/scaffoldmeshfitterwidget.py | mahyar-osn/mapclientplugins.scaffoldfiniteelementmeshfitterstep | 0 | 12786891 | from PySide import QtGui, QtCore
from opencmiss.zinchandlers.scenemanipulation import SceneManipulation
from mapclientplugins.scaffoldfiniteelementmeshfitterstep.handlers.datapointadder import DataPointAdder
from mapclientplugins.scaffoldfiniteelementmeshfitterstep.handlers.datapointadder import DataPointAdder
from mapclientplugins.scaffoldfiniteelementmeshfitterstep.handlers.datapointadder import DataPointAdder
from .ui_scaffoldmeshfitterwidget import Ui_ScaffoldfitterWidget
class ScaffoldMeshFitterWidget(QtGui.QWidget):
def __init__(self, model, parent=None):
super(ScaffoldMeshFitterWidget, self).__init__(parent)
self._ui = Ui_ScaffoldfitterWidget()
self._ui.setupUi(model.get_shareable_open_gl_widget(), self)
self._ui.sceneviewer_widget.set_context(model.get_context())
self._settings = {'view-parameters': {}}
self._model = model
self._model.reset()
self._model.register_time_value_update_callback(self._update_time_value)
| 1.84375 | 2 |
PP4E-Examples-1.4/Examples/PP4E/Lang/summer3.py | AngelLiang/PP4E | 0 | 12786892 | sums = {}
for line in open('table4.txt'):
cols = [float(col) for col in line.split()]
for pos, val in enumerate(cols):
sums[pos] = sums.get(pos, 0.0) + val
for key in sorted(sums):
print(key, '=', sums[key])
| 2.640625 | 3 |
working/bin/forms/mainMenu.py | grinchios/college_project-commented | 0 | 12786893 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# theme and sys for linking signals
import sys
# other forms
import managerMenu as managermenu
import staffForm as staffform
import stockForm as stockform
import treatmentsForm as treatmentsform
import appointmentForm as appointmentform
import chartForm as chartform
import customerForm as customerform
# GUI libraries
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtCore import pyqtSignal, QObject
# Theme
import qdarkstyle
class Ui_MainMenu(QMainWindow, QObject):
valueChange = pyqtSignal(int)
def setupUi(self, MainMenu):
# 'global' information
MainMenu.setObjectName("MainMenu")
MainMenu.resize(1280, 720)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainMenu.sizePolicy().hasHeightForWidth())
MainMenu.setSizePolicy(sizePolicy)
self.centralWidget = QtWidgets.QWidget(MainMenu)
self.centralWidget.setObjectName("centralWidget")
# True is manager
# False is staff
if sys.argv[2] == 'True':
self.accessLevel = True
else:
self.accessLevel = False
self.userLoggedIn = sys.argv[1]
# creating navigation buttons
def navButtons(self):
self.navManagerMenu = QtWidgets.QPushButton(self.centralWidget)
self.navManagerMenu.setGeometry(QtCore.QRect(11, 40, 121, 29))
font = QtGui.QFont()
font.setFamily("Arial")
self.navManagerMenu.setFont(font)
self.navManagerMenu.setObjectName("navManagerMenu")
self.navManagerMenu.setEnabled(self.accessLevel)
self.navCharts = QtWidgets.QPushButton(self.centralWidget)
self.navCharts.setGeometry(QtCore.QRect(10, 240, 121, 29))
font = QtGui.QFont()
font.setFamily("Arial")
self.navCharts.setFont(font)
self.navCharts.setObjectName("navCharts")
self.navAppointments = QtWidgets.QPushButton(self.centralWidget)
self.navAppointments.setGeometry(QtCore.QRect(10, 160, 121, 29))
font = QtGui.QFont()
font.setFamily("Arial")
self.navAppointments.setFont(font)
self.navAppointments.setObjectName("navAppointments")
self.navCustomers = QtWidgets.QPushButton(self.centralWidget)
self.navCustomers.setGeometry(QtCore.QRect(10, 120, 121, 29))
font = QtGui.QFont()
font.setFamily("Arial")
self.navCustomers.setFont(font)
self.navCustomers.setObjectName("navCustomers")
self.navStaff = QtWidgets.QPushButton(self.centralWidget)
self.navStaff.setGeometry(QtCore.QRect(10, 80, 121, 29))
font = QtGui.QFont()
font.setFamily("Arial")
self.navStaff.setFont(font)
self.navStaff.setObjectName("navStaff")
self.navStaff.setEnabled(self.accessLevel)
self.navStock = QtWidgets.QPushButton(self.centralWidget)
self.navStock.setGeometry(QtCore.QRect(10, 200, 121, 29))
font = QtGui.QFont()
font.setFamily("Arial")
self.navStock.setFont(font)
self.navStock.setObjectName("navStock")
self.navTreatments = QtWidgets.QPushButton(self.centralWidget)
self.navTreatments.setGeometry(QtCore.QRect(10, 280, 121, 29))
font = QtGui.QFont()
font.setFamily("Arial")
self.navTreatments.setFont(font)
self.navTreatments.setObjectName("navTreatments")
# whos logged in
self.user = QtWidgets.QLabel(self.centralWidget)
self.user.setGeometry(QtCore.QRect(10, 320, 121, 29))
font = QtGui.QFont()
font.setFamily("Arial Black")
self.user.setFont(font)
self.user.setObjectName("user")
self.label = QtWidgets.QLabel(self.centralWidget)
self.label.setGeometry(QtCore.QRect(10, 11, 101, 17))
font = QtGui.QFont()
font.setFamily("Arial Black")
self.label.setFont(font)
self.label.setObjectName("label")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralWidget)
self.stackedWidget.setGeometry(QtCore.QRect(140, 10, 1141, 691))
font = QtGui.QFont()
font.setFamily("Arial")
self.stackedWidget.setFont(font)
self.stackedWidget.setObjectName("stackedWidget")
# creation code
navButtons(self)
managermenu.createManagerMenu(self)
chartform.createChartForm(self)
staffform.createStaffForm(self)
customerform.createCustomerForm(self)
appointmentform.createAppointmentForm(self)
stockform.createStockForm(self)
treatmentsform.createTreatmentsForm(self)
# main window config
MainMenu.setCentralWidget(self.centralWidget)
self.mainToolBar = QtWidgets.QToolBar(MainMenu)
self.mainToolBar.setObjectName("mainToolBar")
MainMenu.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtWidgets.QStatusBar(MainMenu)
self.statusBar.setObjectName("statusBar")
MainMenu.setStatusBar(self.statusBar)
self.retranslateUi(MainMenu)
if self.accessLevel is True:
self.stackedWidget.setCurrentIndex(0)
else:
self.stackedWidget.setCurrentIndex(4)
QtCore.QMetaObject.connectSlotsByName(MainMenu)
def navigation(self):
# connecting the navigation buttons to the stacked widget
self.navManagerMenu.clicked.connect(lambda : self.stackedWidget.setCurrentIndex(0))
self.navCharts.clicked.connect(lambda : self.stackedWidget.setCurrentIndex(1))
self.navStaff.clicked.connect(lambda : self.stackedWidget.setCurrentIndex(2))
self.navCustomers.clicked.connect(lambda : self.stackedWidget.setCurrentIndex(3))
self.navAppointments.clicked.connect(lambda : self.stackedWidget.setCurrentIndex(4))
self.navStock.clicked.connect(lambda : self.stackedWidget.setCurrentIndex(5))
self.navTreatments.clicked.connect(lambda : self.stackedWidget.setCurrentIndex(6))
def retranslateUi(self, MainMenu):
# adding text to all the labels
_translate = QtCore.QCoreApplication.translate
MainMenu.setWindowTitle(_translate("MainMenu", "MainMenu"))
self.navManagerMenu.setText(_translate("MainMenu", "ManagerMenu"))
self.navCharts.setText(_translate("MainMenu", "Charts"))
self.navAppointments.setText(_translate("MainMenu", "Appointments"))
self.navCustomers.setText(_translate("MainMenu", "Customers"))
self.navStaff.setText(_translate("MainMenu", "Staff"))
self.navStock.setText(_translate("MainMenu", "Stock"))
self.navTreatments.setText(_translate("MainMenu", "Treatments"))
self.label.setText(_translate("MainMenu", "Navigation"))
self.label_5.setText(_translate("MainMenu", "Manager Menu"))
self.label_notifications.setText(_translate("MainMenu", "Notifications"))
self.label_7.setText(_translate("MainMenu", "Backup"))
self.btnBackup.setText(_translate("MainMenu", "Backup"))
self.user.setText(_translate("MainMenu", sys.argv[1]))
self.user.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setText(_translate("MainMenu", "To"))
self.label_2.setText(_translate("MainMenu", "Chart Type"))
self.cmbChartType.setItemText(0, _translate("MainMenu", "Most popular treatment"))
self.cmbChartType.setItemText(1, _translate("MainMenu", "Income"))
self.cmbChartType.setItemText(2, _translate("MainMenu", "Outgoing per stock type"))
self.label_3.setText(_translate("MainMenu", "From"))
self.btnChartCreate.setText(_translate("MainMenu", "Create"))
self.label_31.setText(_translate("MainMenu", "Charts"))
self.label_8.setText(_translate("MainMenu", "Staff Menu"))
self.label_9.setText(_translate("MainMenu", "Add new staff member"))
self.label_10.setText(_translate("MainMenu", "First name"))
self.label_staffsex.setText(_translate("MainMenu", "Staff sex"))
self.label_11.setText(_translate("MainMenu", "Surname"))
self.label_12.setText(_translate("MainMenu", "Username"))
self.label_13.setText(_translate("MainMenu", "Password"))
self.label_14.setText(_translate("MainMenu", "Is this user a manager?"))
self.checkBoxAdmin.setText(_translate("MainMenu", "Yes"))
self.label_15.setText(_translate("MainMenu", "Date of birth"))
self.label_16.setText(_translate("MainMenu", "StaffID"))
self.btnSaveStaff.setText(_translate("MainMenu", "Save"))
self.label_17.setText(_translate("MainMenu", "Search"))
self.btnStaffCancel.setText(_translate("MainMenu", "Cancel"))
self.label_18.setText(_translate("MainMenu", "Add new Customer"))
self.label_19.setText(_translate("MainMenu", "Email"))
self.label_20.setText(_translate("MainMenu", "Surname"))
self.label_21.setText(_translate("MainMenu", "Search"))
self.label_22.setText(_translate("MainMenu", "CustomerID"))
self.btnSaveCustomer.setText(_translate("MainMenu", "Save"))
self.label_23.setText(_translate("MainMenu", "Date of birth"))
self.label_24.setText(_translate("MainMenu", "Primary Contact info"))
self.label_25.setText(_translate("MainMenu", "Phone Number"))
self.label_26.setText(_translate("MainMenu", "First name"))
self.cmbCustomerContact.setItemText(0, _translate("MainMenu", "Phone number"))
self.cmbCustomerContact.setItemText(1, _translate("MainMenu", "Email address"))
self.label_27.setText(_translate("MainMenu", "Address"))
self.label_28.setText(_translate("MainMenu", "Postcode"))
self.label_29.setText(_translate("MainMenu", "Allergies"))
self.label_30.setText(_translate("MainMenu", "Customers"))
self.cmbCustomerSex.setItemText(0, _translate("MainMenu", "Male"))
self.cmbCustomerSex.setItemText(1, _translate("MainMenu", "Female"))
self.label_75.setText(_translate("MainMenu", "Sex"))
self.btnCustomerCancel.setText(_translate("MainMenu", "Cancel"))
self.label_62.setText(_translate("MainMenu", "Search"))
self.label_63.setText(_translate("MainMenu", "Date"))
self.label_65.setText(_translate("MainMenu", "AppointmentID"))
self.label_66.setText(_translate("MainMenu", "Customer"))
self.label_67.setText(_translate("MainMenu", "Add new Appointment"))
self.label_68.setText(_translate("MainMenu", "Amount Paid"))
self.label_70.setText(_translate("MainMenu", "Time"))
self.btnSaveAppointment.setText(_translate("MainMenu", "Save"))
self.label_72.setText(_translate("MainMenu", "Treatment"))
self.label_73.setText(_translate("MainMenu", "Staff"))
self.label_74.setText(_translate("MainMenu", "Appointments"))
self.label_64.setText(_translate("MainMenu", "£"))
self.label_69.setText(_translate("MainMenu", "£"))
self.label_71.setText(_translate("MainMenu", "Amount Due"))
self.btnAppointmentCancel.setText(_translate("MainMenu", "Cancel"))
self.label_76.setText(_translate("MainMenu", "Comment"))
self.label_77.setText(_translate("MainMenu", "Stock alert level"))
self.label_78.setText(_translate("MainMenu", "Add new Stock"))
self.label_81.setText(_translate("MainMenu", "StockID"))
self.btnSaveStock.setText(_translate("MainMenu", "Save"))
self.label_83.setText(_translate("MainMenu", "Amount left"))
self.label_84.setText(_translate("MainMenu", "Name"))
self.label_86.setText(_translate("MainMenu", "Search"))
self.btnStockCancel.setText(_translate("MainMenu", "Cancel"))
self.label_87.setText(_translate("MainMenu", "£"))
self.label_88.setText(_translate("MainMenu", "Price"))
self.label_89.setText(_translate("MainMenu", "Stock"))
# labels for treatmentsform
self.label_90.setText(_translate("MainMenu", "£"))
self.label_91.setText(_translate("MainMenu", "Search"))
self.label_92.setText(_translate("MainMenu", "Price"))
self.label_79.setText(_translate("MainMenu", "Stock amount to use"))
self.label_80.setText(_translate("MainMenu", "Add new Treatments"))
self.label_85.setText(_translate("MainMenu", "Name"))
self.label_82.setText(_translate("MainMenu", "TreatmentID"))
self.label_93.setText(_translate("MainMenu", "Stock name"))
self.btnTreatmentCancel.setText(_translate("MainMenu", "Cancel"))
self.btnSaveTreatment.setText(_translate("MainMenu", "Save"))
self.btnTreatmentAddStock.setText(_translate("MainMenu", "Add"))
self.label_94.setText(_translate("MainMenu", "Stock to use"))
self.label_95.setText(_translate("MainMenu", "Treatments"))
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
MainMenu = QtWidgets.QMainWindow()
ui = Ui_MainMenu()
ui.setupUi(MainMenu)
ui.navigation()
icon = QtGui.QIcon('database/company.png')
MainMenu.setWindowIcon(QtGui.QIcon(icon))
MainMenu.show()
sys.exit(app.exec_())
| 1.984375 | 2 |
SentimentAnalysis/pickleMe.py | sazlin/reTOracle | 0 | 12786894 | import cPickle
def load_pickle(filename):
pickled = open(filename, 'rb')
data = cPickle.load(pickled)
return data
def export_pickle(filename, the_object):
pickle_file = open(filename, 'w')
cPickle.dump(the_object, pickle_file)
pickle_file.close() | 2.875 | 3 |
turbustat/tests/test_stat_moments.py | keflavich/TurbuStat | 0 | 12786895 | # Licensed under an MIT open source license - see LICENSE
'''
Test functions for Kurtosis
'''
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from ..statistics import StatMoments, StatMomentsDistance
from ._testing_data import \
dataset1, dataset2, computed_data, computed_distances
class TestMoments(TestCase):
def setUp(self):
self.dataset1 = dataset1
self.dataset2 = dataset2
def test_moments(self):
self.tester = StatMoments(dataset1["integrated_intensity"][0], 5)
self.tester.run()
assert np.allclose(self.tester.kurtosis_hist[1],
computed_data['kurtosis_val'])
assert np.allclose(self.tester.skewness_hist[1],
computed_data['skewness_val'])
def test_moment_distance(self):
self.tester_dist = \
StatMomentsDistance(dataset1["integrated_intensity"][0],
dataset2["integrated_intensity"][0], 5)
self.tester_dist.distance_metric()
npt.assert_almost_equal(self.tester_dist.kurtosis_distance,
computed_distances['kurtosis_distance'])
npt.assert_almost_equal(self.tester_dist.skewness_distance,
computed_distances['skewness_distance'])
| 2.8125 | 3 |
sdcclient/secure/_policy_events_v1.py | DaveCanHaz/sysdig-sdk-python | 0 | 12786896 | import datetime
import requests
from sdcclient._common import _SdcCommon
class PolicyEventsClientV1(_SdcCommon):
def __init__(self, token="", sdc_url='https://secure.sysdig.com', ssl_verify=True, custom_headers=None):
super(PolicyEventsClientV1, self).__init__(token, sdc_url, ssl_verify, custom_headers)
self.customer_id = None
self.product = "SDS"
self._policy_v2 = None
def _get_policy_events_int(self, ctx):
limit = ctx.get("limit", 50)
policy_events_url = self.url + '/api/v1/secureEvents?limit={limit}{frm}{to}{filter}{cursor}'.format(
limit=limit,
frm=f"&from={int(ctx['from']):d}" if "from" in ctx else "",
to=f"&to={int(ctx['to']):d}" if "to" in ctx else "",
filter=f'&filter={ctx["filter"]}' if "filter" in ctx else "",
cursor=f'&cursor={ctx["cursor"]}' if "cursor" in ctx else "")
res = requests.get(policy_events_url, headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
ctx = {
"limit": limit,
"cursor": res.json()["page"].get("prev", None)
}
return [True, {"ctx": ctx, "data": res.json()["data"]}]
def get_policy_events_range(self, from_sec, to_sec, filter=None):
'''**Description**
Fetch all policy events that occurred in the time range [from_sec:to_sec]. This method is used in conjunction
with :func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events.
**Arguments**
- from_sec: the start of the timerange for which to get events
- end_sec: the end of the timerange for which to get events
- filter: this is a SysdigMonitor-like filter (e.g. filter: 'severity in ("4","5") and freeText in ("Suspicious")')
**Success Return Value**
An array containing:
- A context object that should be passed to later calls to get_more_policy_events.
- An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events`
for details on the contents of policy events.
**Example**
`examples/get_secure_policy_events.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_policy_events.py>`_
'''
options = {"from": int(from_sec) * 1_000_000_000,
"to": int(to_sec) * 1_000_000_000,
"limit": 50,
"filter": filter}
ctx = {k: v for k, v in options.items() if v is not None}
return self._get_policy_events_int(ctx)
def get_policy_events_duration(self, duration_sec, filter=None):
'''**Description**
Fetch all policy events that occurred in the last duration_sec seconds. This method is used in conjunction with
:func:`~sdcclient.SdSecureClient.get_more_policy_events` to provide paginated access to policy events.
**Arguments**
- duration_sec: Fetch all policy events that have occurred in the last *duration_sec* seconds.
- filter: this is a SysdigMonitor-like filter (e.g. filter: 'severity in ("4","5") and freeText in ("Suspicious")')
**Success Return Value**
An array containing:
- A context object that should be passed to later calls to get_more_policy_events.
- An array of policy events, in JSON format. See :func:`~sdcclient.SdSecureClient.get_more_policy_events`
for details on the contents of policy events.
**Example**
`examples/get_secure_policy_events.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_policy_events.py>`_
'''
to_sec = int((datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds())
from_sec = to_sec - (int(duration_sec))
return self.get_policy_events_range(from_sec, to_sec, filter)
def get_more_policy_events(self, ctx):
'''**Description**
Fetch additional policy events after an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` /
:func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events.
**Arguments**
- ctx: a context object returned from an initial call to :func:`~sdcclient.SdSecureClient.get_policy_events_range` /
:func:`~sdcclient.SdSecureClient.get_policy_events_duration` or a prior call to get_more_policy_events.
**Success Return Value**
An array containing:
- A context object that should be passed to later calls to get_more_policy_events()
- An array of policy events, in JSON format. Each policy event contains the following:
- id: a unique identifier for this policy event
- cursor: unique ID that can be used with get_more_policy_events context to retrieve paginated policy events
- timestamp: when the event occurred (ns since the epoch)
- source: the source of the policy event. It can be "syscall" or "k8s_audit"
- description: the description of the event
- severity: a severity level from 1-7
- agentId: the agent that reported this event
- machineId: the MAC of the machine that reported this event
- content: More information about what triggered the event
- falsePositive: if the event is considered a false-positive
- fields: raw information from the rule that fired this event
- output: Output from the rule that fired this event
- policyId: the ID of the policy that fired this event
- ruleName: name of the rule that fired this event
- ruleTags: tags from the rule that fired this event
- labels: more information from the scope of this event
When the number of policy events returned is 0, there are no remaining events and you can stop calling get_more_policy_events().
**Example**
`examples/get_secure_policy_events.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_policy_events.py>`_
'''
return self._get_policy_events_int(ctx)
| 2.28125 | 2 |
logwriter/logwriter.py | mbukatov/ocs-workload | 5 | 12786897 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf8 -*-
from datetime import datetime
import argparse
import hashlib
import logging
import os
import sys
import time
import uuid
def main():
ap = argparse.ArgumentParser(description="logwriter")
ap.add_argument(
"directory",
help="directory where to create a log file")
ap.add_argument(
"-p",
type=int,
help="pause between 2 consequent writes in seconds")
ap.add_argument(
"--fsync",
action="store_true",
help="run fsync after each write")
ap.add_argument(
"-d",
"--debug",
action="store_true",
help="set log level to DEBUG")
args = ap.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
# name of a target log file is unique for each run
log_name = str(uuid.uuid4()) + ".log"
# TODO: limit size of the log file, rotate log file
# TODO: check given log file
with open(log_name, "w") as log_file:
logging.info("log file %s created", log_name)
timestamp = datetime.now()
log_line = f"{timestamp.isoformat()} started\n"
log_file.write(log_line)
while True:
try:
data = hashlib.sha256(log_line.encode('utf8')).hexdigest()
timestamp = datetime.now()
log_line = f"{timestamp.isoformat()} {data}\n"
log_file.write(log_line)
log_file.flush()
if args.fsync:
os.fsync(log_file.fileno())
time.sleep(args.p)
except KeyboardInterrupt:
logging.info("stopped")
break
if __name__ == '__main__':
sys.exit(main())
| 2.890625 | 3 |
shepherding/util/plot_ss.py | argyili/shepherding-public | 0 | 12786898 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as anim
import time
''' Recorded simulation data into csv files '''
''' Initiliaze plot line by csv '''
def init_plot_line_csv(ax, param, sheeps_color, sheeps_pos, shepherds_color, shepherds_pos):
# Sheep
for i in range(len(sheeps_color)):
ax.plot(sheeps_pos[i][0], sheeps_pos[i][1], sheeps_color[i] + 'o', alpha=0.5)
# Shepherd
for i in range(len(shepherds_color)):
# shepherd_color = shepherds[i][0]
# shepherd_pos = shepherds[i][1]
ax.plot(shepherds_pos[i][0], shepherds_pos[i][1], shepherds_color[i] + 'o', alpha=0.5)
# Goal
goal_zone = patches.Circle(param["goal"], param["goal_radius"], ec="k", fc='white', alpha=1)
ax.add_patch(goal_zone)
# x,y axis range
xy_range = param["sheep_initial_pos_radius"] + 40
ax.set_xlim(-xy_range, xy_range)
ax.set_ylim(-xy_range, xy_range)
ax.set_aspect('equal')
''' Initiliaze plot line by csv, more obvious '''
def init_plot_line_csv_spec(ax, param, sheeps_color, sheeps_pos, shepherds_color, shepherds_pos):
# Sheep
for i in range(len(sheeps_color)):
ax.plot(sheeps_pos[i][0], sheeps_pos[i][1], sheeps_color[i] + 'o', alpha=0.5)
# Shepherd
for i in range(len(shepherds_color)):
# shepherd_color = shepherds[i][0]
# shepherd_pos = shepherds[i][1]
ax.plot(shepherds_pos[i][0], shepherds_pos[i][1], shepherds_color[i] + 'o', alpha=0.9, markersize=10)
# Goal
goal_zone = patches.Circle(param["goal"], param["goal_radius"], ec="k", fc='white', alpha=1)
ax.add_patch(goal_zone)
# x,y axis range
xy_range = param["sheep_initial_pos_radius"] + 40
ax.set_xlim(-xy_range, xy_range)
ax.set_xticks([-100, 100])
ax.set_ylim(-xy_range, xy_range)
ax.set_yticks([-100, 100])
ax.tick_params(axis='both', which='major', labelsize=14)
ax.set_aspect('equal')
''' Initiliaze trace by csv '''
def init_trace_csv(ax, param, sheeps_color, sheeps_pos, shepherds_color, shepherds_pos):
# Sheep
for i in range(len(sheeps_color)):
# Only k, not sheeps_color[i]
ax.plot(sheeps_pos[i][0], sheeps_pos[i][1], 'k' + 'o', alpha=0.1, markersize='1')
# Shepherd
for i in range(len(shepherds_color)):
# shepherd_color = shepherds[i][0]
# shepherd_pos = shepherds[i][1]
ax.plot(shepherds_pos[i][0], shepherds_pos[i][1], shepherds_color[i] + 'o', alpha=0.4, markersize='4')
# Goal
goal_zone = patches.Circle(param["goal"], param["goal_radius"], ec="k", fc='white', alpha=1)
ax.add_patch(goal_zone)
# x,y axis range
xy_range = param["sheep_initial_pos_radius"] + 40
ax.set_xlim(-xy_range, xy_range)
ax.set_xticks([-100, 100])
ax.set_ylim(-xy_range, xy_range)
ax.set_yticks([-100, 100])
ax.tick_params(axis='both', which='major', labelsize=14)
ax.set_aspect('equal')
''' Figure out which sheep is target sheep '''
def check_target(sheeps, target_sheep):
for i in range(len(sheeps)):
# 2D space with x,y attribute
if sheeps[i].position[0] == target_sheep.position[0] and sheeps[i].position[1] == target_sheep.position[1]:
return i
return 0
''' Write a line into csv in each step '''
def write_line_csv(writer, sheeps, shepherds):
# k for sheep, r for alive shepherds, c for fault shepherds
s = ["k " + str(sheeps[i].position) for i in range(len(sheeps))]
for i in range(len(shepherds)):
# Plot special color dot at target sheep
if shepherds[i].target_sheep and shepherds[i].target_sheep != []:
target = check_target(sheeps, shepherds[i].target_sheep)
s[target] = s[target].replace('k', 'y')
writer.writerow(s)
''' Write last line into csv with a result '''
def write_last_line_csv(writer, result):
writer.writerow(result) | 2.6875 | 3 |
serial_scripts/llgr/base.py | atsgen/tf-test | 5 | 12786899 | <filename>serial_scripts/llgr/base.py
#
# To run tests, you can do 'python -m testtools.run tests'. To run specific tests,
# You can do 'python -m testtools.run -l tests'
# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD
#
from common.neutron.base import BaseNeutronTest
from tcutils.wrappers import preposttest_wrapper
from tcutils.util import *
import test_v1
from common import isolated_creds
from common.connections import ContrailConnections
from tcutils.control.cn_introspect_utils import ControlNodeInspect
from common.device_connection import NetconfConnection
from control_node import CNFixture
import time
class TestLlgrBase(BaseNeutronTest):
@classmethod
def setUpClass(cls):
'''
It will set up a topology where agent is connected to only one of the control node
This way we make sure that the route is learned from a different agent via bgp
'''
super(TestLlgrBase, cls).setUpClass()
cls.cn_introspect = ControlNodeInspect(cls.inputs.bgp_ips[0])
cls.host_list = cls.connections.orch.get_hosts()
if len(cls.host_list) > 1 and len(cls.inputs.bgp_ips) > 1 :
cls.set_xmpp_peering(compute_ip=cls.inputs.host_data[cls.host_list[0]]['host_control_ip'] ,
ctrl_node=cls.inputs.bgp_ips[0],mode='disable')
cls.set_xmpp_peering(compute_ip=cls.inputs.host_data[cls.host_list[1]]['host_control_ip'] ,
ctrl_node=cls.inputs.bgp_ips[1],mode='disable')
if cls.inputs.ext_routers:
cls.mx1_ip = cls.inputs.ext_routers[0][1]
# TODO remove the hard coding once we get this parameters populated from testbed
cls.mx_user = 'root'
cls.mx_password = '<PASSWORD>'
cls.mx1_handle = NetconfConnection(host = cls.mx1_ip,username=cls.mx_user,password=<PASSWORD>)
cls.mx1_handle.connect()
time.sleep(20)
# end setUp
@classmethod
def tearDownClass(cls):
'''
It will remove topology where agent is connected to only one of the control node
'''
cls.set_bgp_peering(mode='enable')
cls.set_gr_llgr(mode='disable')
cls.set_xmpp_peering(compute_ip=cls.inputs.host_data[cls.host_list[0]]['host_control_ip'] ,
mode='enable')
cls.set_xmpp_peering(compute_ip=cls.inputs.host_data[cls.host_list[1]]['host_control_ip'] ,
mode='enable')
super(TestLlgrBase, cls).tearDownClass()
# end cleanUp
@classmethod
def set_gr_llgr(self, **kwargs):
'''
Enable/Disable GR / LLGR configuration with gr/llgr timeout values as parameters
'''
gr_timeout = kwargs['gr']
llgr_timeout = kwargs['llgr']
gr_enable = True if kwargs['mode'] == 'enable' else False
eor_timeout = '60'
router_asn = '64512' if gr_enable == True else self.inputs.router_asn
cntrl_fix = self.useFixture(CNFixture(
connections=self.connections,
router_name=self.inputs.ext_routers[0][0],
router_ip=self.mx1_ip,
router_type='mx',
inputs=self.inputs))
cntrl_fix.set_graceful_restart(gr_restart_time=gr_timeout,
llgr_restart_time = llgr_timeout,
eor_timeout = eor_timeout,
gr_enable = gr_enable,
router_asn = router_asn,
bgp_helper_enable = True,
xmpp_helper_enable = False)
return True
@classmethod
def set_bgp_peering(self,**kwargs):
'''
Stop and start of BGP peer communication so that GR/LLGR timers are triggered
'''
mode = kwargs['mode']
if mode == 'disable':
cmd = 'iptables -A OUTPUT -p tcp --destination-port 179 -j DROP; \
iptables -A INPUT -p tcp --destination-port 179 -j DROP'
else:
cmd = 'iptables -D OUTPUT -p tcp --destination-port 179 -j DROP; \
iptables -D INPUT -p tcp --destination-port 179 -j DROP'
self.logger.debug('%s bgp peering : %s' %(mode,cmd))
self.inputs.run_cmd_on_server(self.inputs.bgp_ips[0],cmd)
return True
def verify_traffic_loss(self,**kwargs):
vm1_fixture = kwargs['vm_fixture']
result_file = kwargs['result_file']
pkts_trans = '0'
pkts_recv = '0'
ret = False
cmd = 'cat %s | grep loss'% result_file
res = vm1_fixture.run_cmd_on_vm(cmds=[cmd])
self.logger.debug('results %s' %(res))
if not res[cmd]:
self.logger.error("Not able to get the log file %s"%res)
return (ret,pkts_trans,pkts_recv)
pattern = '''(\S+) packets transmitted, (\S+) received, (\S+)% packet loss'''
res = re.search(pattern,res[cmd])
if res:
pkts_trans = res.group(1)
pkts_recv = res.group(2)
loss = res.group(3)
ret = True
return (ret,pkts_trans,pkts_recv)
def verify_gr_llgr_flags(self,**kwargs):
'''
Validate Stale / LLgrStale flags after GR/LLGR timer is triggered
'''
flags = kwargs['flags']
vn_fix = kwargs['vn_fix']
prefix = kwargs['prefix']
ri = vn_fix.vn_fq_name+":"+vn_fix.vn_name
rtbl_entry = self.cn_introspect.get_cn_route_table_entry(prefix,ri)
if not rtbl_entry:
self.logger.error("Not able to find route table entry %s"%prefix)
return False
rtbl = self.cn_introspect.get_cn_route_table_entry(prefix,ri)[0]
if not rtbl :
self.logger.error("Not able to find route table for prefix %s"%prefix)
return False
self.logger.debug('prefix flags %s' %(rtbl['flags']))
if flags != rtbl['flags'] :
self.logger.error("Not able to find route flags for prefix %s:%s"%flags,rtbl['flags'])
return False
return True
@classmethod
def set_xmpp_peering(self,**kwargs):
'''
Enabling / Disabling XMPP peer communication
'''
compute_ip = kwargs['compute_ip']
mode = kwargs['mode']
control_ips = []
if mode == 'disable':
ctrl_host = kwargs['ctrl_node']
ctrl_ip = self.inputs.host_data[ctrl_host]['host_data_ip']
ctrl_ip = ctrl_ip + ":"+'5269'
self.configure_server_list(compute_ip, 'contrail-vrouter-agent',
'CONTROL-NODE', 'servers' , [ctrl_ip], container = "agent")
else :
control_ips = [self.inputs.host_data[x]['host_data_ip']+":"+'5269' for x in self.inputs.bgp_ips]
self.configure_server_list(compute_ip, 'contrail-vrouter-agent',
'CONTROL-NODE', 'servers' , control_ips , container = "agent")
return True
@classmethod
def set_headless_mode(self,**kwargs):
'''
Enabling/Disabling headless mode in agent
'''
mode = kwargs['mode']
if mode == 'enable':
cmd = '/usr/bin/openstack-config --set /etc/contrail/contrail-vrouter-agent.conf DEFAULT headless_mode true'
else:
cmd = '/usr/bin/openstack-config --del /etc/contrail/contrail-vrouter-agent.conf DEFAULT headless_mode'
for host in self.host_list:
self.logger.debug('enable headless mode %s : %s' %(host,cmd))
self.inputs.run_cmd_on_server(host,cmd,container='agent')
self.inputs.restart_service('contrail-vrouter-agent',self.host_list)
return True
def verify_gr_bgp_flags(self,**kwargs):
'''
Check for Notification bit,GR timeout,Forwarding state are sent properly during BGP Open message.
'''
pcap_file = kwargs['pcap_file']
host = kwargs['host']
control_ip = self.inputs.host_data[host]['control-ip']
# Graceful Restart (64), length: 18
# Restart Flags: [none], Restart Time 35s
# AFI IPv4 (1), SAFI labeled VPN Unicast (128), Forwarding state preserved: yes
# AFI VPLS (25), SAFI Unknown (70), Forwarding state preserved: yes
# AFI IPv4 (1), SAFI Route Target Routing Information (132), Forwarding state preserved: yes
# AFI IPv6 (2), SAFI labeled VPN Unicast (128), Forwarding state preserved: yes
# 0x0000: 4023 0001 8080 0019 4680 0001 8480 0002 (Check for 4023) , where 4 is notification bit
# 4023 notification bit is set
# 8080 forwarding state is set
# 23 is 35 sec of gr time
cmd = 'tcpdump -r %s -vvv -n | grep Open -A28 | grep %s -A28 | grep \'Restart Flags\' -A5 | grep 0x0000'%(pcap_file,control_ip)
res = self.inputs.run_cmd_on_server(host,cmd)
self.logger.debug('results %s' %(res))
res = res.split(':')
flags = res[1].split(' ')
if not flags:
self.logger.error("not able to get flags %s"%flags)
return False
flags = [x for x in flags if x != '']
self.logger.info("flags set properly %s"%flags[0])
if flags[0] != '4023' and flags[2] != 8080:
self.logger.error("flags not set properly %s"%flags[0])
return False
return True
def verify_llgr_bgp_flags(self,**kwargs):
'''
Check for Notification bit, LLGR timeout,Forwarding state are sent properly during BGP Open message.
'''
pcap_file = kwargs['pcap_file']
host = kwargs['host']
control_ip = self.inputs.host_data[host]['control-ip']
# Unknown (71), length: 28
# no decoder for Capability 71
# 0x0000: 0001 8080 0000 3c00 1946 8000 003c 0001
# 8080 forwarding state is set
# c0 is 65 sec of llgr time
cmd = 'tcpdump -r %s -vvv -n | grep Open -A28 | grep %s -A28 | grep \'Unknown (71)\' -A3 | grep 0x0000'%(pcap_file,control_ip)
res = self.inputs.run_cmd_on_server(host,cmd)
self.logger.debug('results %s' %(res))
res = res.split(':')
flags = res[1].split(' ')
if not flags:
self.logger.error("not able to get flags %s"%flags)
return False
flags = [x for x in flags if x != '']
self.logger.info("flags set properly %s"%flags[0])
if flags[3] != '3c00' and flags[1] != '8080':
self.logger.error("flags not set properly %s"%flags[0])
return False
return True
@classmethod
def configure_server_list(self, client_ip, client_process,
section, option, server_list, container):
'''
This function configures the .conf file with new server_list
and then send a sighup to the client so that configuration
change is effective.
'''
client_conf_file = client_process + ".conf"
server_string = " ".join(server_list)
cmd_set = "openstack-config --set /etc/contrail/" + client_conf_file
cmd = cmd_set + " " + section + " " + option + ' "%s"' % server_string
self.inputs.run_cmd_on_server(client_ip, cmd,
self.inputs.host_data[client_ip]['username']\
, self.inputs.host_data[client_ip]['password'],
container = container)
if "nodemgr" in client_process:
nodetype = client_process.rstrip("-nodemgr")
client_process = "contrail-nodemgr --nodetype=%s" % nodetype
else:
client_process = "/usr/bin/" + client_process
pid_cmd = 'pgrep -f -o "%s"' % client_process
pid = int(self.inputs.run_cmd_on_server(client_ip, pid_cmd,
self.inputs.host_data[client_ip]['username']\
, self.inputs.host_data[client_ip]['password'],
container = container))
sighup_cmd = "kill -SIGHUP %d " % pid
self.inputs.run_cmd_on_server(client_ip, sighup_cmd,
self.inputs.host_data[client_ip]['username']\
, self.inputs.host_data[client_ip]['password'],
container = container)
def is_test_applicable(self):
# check for atleast 2 compute nodes
if len(self.host_list) < 2 :
return (False ,"compute nodes are not sufficient")
# check for atleast 2 control nodes
if len(self.inputs.bgp_ips) < 2 :
return (False, "compute nodes are not sufficient")
# check for 1 mx
if len(self.inputs.ext_routers) < 1:
self.logger.error("MX routers are not sufficient : %s"%len(self.inputs.ext_routers))
return (False ,"MX routers are not sufficient")
return (True,None)
| 1.976563 | 2 |
sorts/bucket_sort.py | coderpower0/Python | 2 | 12786900 | <filename>sorts/bucket_sort.py<gh_stars>1-10
#!/usr/bin/env python
# Author: <NAME>
# This program will illustrate how to implement bucket sort algorithm
# Wikipedia says: Bucket sort, or bin sort, is a sorting algorithm that works by distributing the
# elements of an array into a number of buckets. Each bucket is then sorted individually, either using
# a different sorting algorithm, or by recursively applying the bucket sorting algorithm. It is a
# distribution sort, and is a cousin of radix sort in the most to least significant digit flavour.
# Bucket sort is a generalization of pigeonhole sort. Bucket sort can be implemented with comparisons
# and therefore can also be considered a comparison sort algorithm. The computational complexity estimates
# involve the number of buckets.
# Time Complexity of Solution:
# Best Case O(n); Average Case O(n); Worst Case O(n)
DEFAULT_BUCKET_SIZE=5
def bucket_sort(my_list,bucket_size=DEFAULT_BUCKET_SIZE):
if(my_list==0):
print("you don't have any elements in array!")
min_value=min(my_list)
max_value=max(my_list)
bucket_count=(max_value-min_value)//bucket_size+1
buckets=[]
for i in range(bucket_count):
buckets.append([])
for i in range(len(my_list)):
buckets[(my_list[i]-min_value)//bucket_size].append(my_list[i])
sorted_array=[]
for i in range(len(buckets)):
buckets[i].sort()
for j in range(len(buckets[i])):
sorted_array.append(buckets[i][j])
return sorted_array
#test
#besd on python 3.7.3
user_input =input('Enter numbers separated by a comma:').strip()
unsorted =[int(item) for item in user_input.split(',')]
print(bucket_sort(unsorted))
| 4.5 | 4 |
python/src/heapa.py | tankersleyj/algorithms | 0 | 12786901 | # MIT (c) jtankersley 2019-06-04
from enum import Enum
"""
Array based Heap.
left_child_index = index * 2 + 1
right_child_index = index * 2 + 2
parentIndex = (index - 1) // 2
00
01 02
03 04 05 06
07 08 09 10 11 12 13 14
"""
class HeapType(Enum):
MINIMUM = 1
MAXIMUM = 2
""" Min or Max array based Heap."""
class Heap():
def __init__(self, type, list=[]):
self.heap = []
self.type = type
if len(list) > 0:
for index, value in enumerate(list):
self.push(value)
def __str__(self):
"""Print value in-order."""
if self.type == HeapType.MINIMUM:
return str(self.get_sorted_list())
else:
return str(self.get_sorted_list(True))
def is_empty(self):
return True if len(self.heap) == 0 else False
def _swap(self, index1, index2):
self.heap[index1], self.heap[index2] = self.heap[index2], self.heap[index1]
def _parent_index(self, index):
return (index - 1) // 2
def _child_left_index(self, index):
return index * 2 + 1
def _child_right_index(self, index):
return index * 2 + 2
def _balance_up(self, index):
if index > 0:
value = self.heap[index]
parent_index = self._parent_index(index)
parent_value = self.heap[parent_index]
if self.type == HeapType.MAXIMUM and value > parent_value:
self._swap(index, parent_index)
self._balance_up(parent_index)
if self.type == HeapType.MINIMUM and value < parent_value:
self._swap(index, parent_index)
self._balance_up(parent_index)
def push(self, value):
self.heap.append(value)
last_index = len(self.heap) - 1
self._balance_up(last_index)
def _balance_down(self, index):
max_index = len(self.heap) - 1
left_child_index = self._child_left_index(index)
right_child_index = self._child_right_index(index)
if left_child_index <= max_index:
self._balance_up(left_child_index)
self._balance_down(left_child_index)
if right_child_index <= max_index:
self._balance_up(right_child_index)
self._balance_down(right_child_index)
def pop(self):
last_index = len(self.heap) - 1
if last_index >= 0:
value = self.heap[0]
if last_index > 0:
self.heap[0] = self.heap.pop()
self._balance_down(0)
else:
self.heap.pop()
return value
def get_sorted_list(self, reverse=False):
copy_list = self.heap.copy()
copy_list.sort(reverse=reverse)
return copy_list
| 3.8125 | 4 |
scripts/db_tools/init_db.py | Chaoyingz/paper_trading | 0 | 12786902 | import click
from motor.motor_asyncio import AsyncIOMotorClient
from pymongo.errors import CollectionInvalid
from app import settings
from scripts.utils import coro
async def create_collection(db: AsyncIOMotorClient, collection_name: str):
"""创建表."""
try:
await db[settings.MONGO_DB].create_collection(collection_name)
except CollectionInvalid as e:
click.echo(e)
else:
click.echo(f"创建{collection_name}成功\n")
@click.command("initdb")
@coro
async def init_db():
"""初始化数据库."""
if click.confirm("初始化数据库可能会导致原数据丢失,确认要继续吗?"):
client = AsyncIOMotorClient(settings.db.url)
await create_collection(client, settings.db.collections.user)
await create_collection(client, settings.db.collections.order)
await client[settings.MONGO_DB][settings.db.collections.order].create_index(
"entrust_id"
)
await create_collection(client, settings.db.collections.position)
await client[settings.MONGO_DB][settings.db.collections.position].create_index(
"user"
)
await client[settings.MONGO_DB][settings.db.collections.position].create_index(
"symbol"
)
await client[settings.MONGO_DB][settings.db.collections.position].create_index(
"exchange"
)
await create_collection(client, settings.db.collections.user_assets_record)
await create_collection(client, settings.db.collections.statement)
click.echo("初始化数据库完成.")
else:
click.echo("初始化数据库失败,用户操作中止.")
| 2.328125 | 2 |
nb4/run_policy_simulation.py | Agoric/testnet-notes | 2 | 12786903 | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Block Scheduling Policy Simulation
#
# context: [build model of computron\-to\-wallclock relationship · Issue \#3459 · Agoric/agoric\-sdk](https://github.com/Agoric/agoric-sdk/issues/3459)
# ## Preface: PyData
import pandas as pd
import numpy as np
dict(pandas=pd.__version__,
numpy=np.__version__)
# ## MySql Access
TOP = __name__ == '__main__'
# +
import logging
from sys import stderr
logging.basicConfig(level=logging.INFO, stream=stderr,
format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger(__name__)
if TOP:
log.info('notebook start')
# +
from slogdata import mysql_socket, show_times
def _slog4db(database='slog4'):
from sqlalchemy import create_engine
return create_engine(mysql_socket(database, create_engine))
_db4 = _slog4db()
_db4.execute('show tables').fetchall()
# -
# ## Global compute results for agorictest-16
#
# Based on one validator, **Provalidator**.
show_times(pd.read_sql('''
select *
from slog_run
where parent = 'Provalidator'
limit 10
''', _db4), ['time_lo', 'time_hi', 'blockTime_lo', 'blockTime_hi']).iloc[0]
# +
# _db4.execute('''drop table if exists delivery_compute_16''');
# +
def build_consensus_compute(theValidator, db,
table='delivery_compute_16'):
"""We include duration from 1 validator as well.
"""
log.info('creating %s', table)
db.execute(f'''
create table if not exists {table} as
with slog1 as (
select file_id
from file_info
where parent = %(theValidator)s
)
select blockHeight, blockTime, crankNum, vatID, deliveryNum, compute, dur
from j_delivery r
cross join slog1
where r.file_id = slog1.file_id
order by crankNum
''', dict(theValidator=theValidator))
agg = pd.read_sql(f'select count(*) from {table}', db)
log.info('done:\n%s', agg)
return pd.read_sql(f'select * from {table} limit 5', db)
build_consensus_compute('Provalidator', _db4)
# -
_dc16 = pd.read_sql('select * from delivery_compute_16 order by crankNum', _db4, index_col='crankNum')
_dc16.tail()
# `crankNum` goes from 31 to 34; 32 and 33 are missing. Perhaps `create-vat` cranks?
# +
def simulate_run_policy(df, threshold=8e6):
# does 10e6 help with bank update latency?
# only a little: max ~50 rather than ~70
meter = 0
# t_in = t_out = df.blockTime[0]
block_in = block_out = df.blockHeight.values[0]
for crankNum, d in df.iterrows():
if d.blockHeight > block_in:
block_in = d.blockHeight
if block_in > block_out:
block_out = block_in
meter = 0
yield block_out # do the work
meter += d.compute
if meter > threshold:
meter = 0
block_out += 1
df = _dc16[_dc16.blockHeight > _dc16.blockHeight.min()]
_sim16 = df.assign(bkSim=list(simulate_run_policy(df)))
_sim16
# -
_sim16[_sim16.bkSim < _sim16.blockHeight]
# +
def sim_aux_stats(df):
df = _sim16
original = df.groupby('blockHeight')
df = original.apply(lambda g: g.assign(
computeInBlock=g.compute.cumsum(),
durationInBlock=g.dur.cumsum()))
df = df.reset_index(level=0, drop=True)
simulated = df.groupby('bkSim')
df = simulated.apply(lambda g: g.assign(
computeInSimBlk=g.compute.cumsum(),
durationInSimBlk=g.dur.cumsum()))
df = df.reset_index(level=0, drop=True)
return df
df = sim_aux_stats(_sim16)
# -
df[['vatID', 'deliveryNum',
'blockHeight',
'compute', 'computeInBlock', 'computeInSimBlk']].describe()
df[['computeInBlock', 'computeInSimBlk']].plot(figsize=(12, 4));
df[['vatID', 'deliveryNum',
'blockHeight',
'dur', 'durationInBlock', 'durationInSimBlk']].describe()
df[['durationInBlock', 'durationInSimBlk']].plot(figsize=(12, 6));
# ## Computrons go 3.7x faster early in agorictest-16
sim_lo = df[(df.index >= 20000) & (df.index <= 70000)]
sim_lo = sim_lo.reset_index().set_index(['blockHeight', 'crankNum'])
sim_lo = sim_lo.assign(rate=sim_lo.compute / sim_lo.dur)
sim_lo[['durationInBlock', 'durationInSimBlk']].plot(figsize=(12, 4));
sim_lo[['compute', 'computeInBlock', 'computeInSimBlk',
'dur','durationInBlock', 'durationInSimBlk', 'rate']].describe()
sim_hi = df[df.index >= 250000]
sim_hi = sim_hi.reset_index().set_index(['blockHeight', 'crankNum'])
sim_hi = sim_hi.assign(rate=sim_hi.compute / sim_hi.dur)
sim_hi[['durationInBlock', 'durationInSimBlk']].plot(figsize=(12, 4));
sim_hi[['compute', 'computeInBlock', 'computeInSimBlk',
'dur','durationInBlock', 'durationInSimBlk', 'rate']].describe()
# +
rate_lo_median = 1.738564e+06
rate_hi_median = 4.711452e+05
round(rate_lo_median / rate_hi_median, 1)
# -
# ## Latency
df['delay'] = df.bkSim - df.blockHeight
df[['delay']].describe()
df[['durationInBlock', 'durationInSimBlk', 'delay']].plot(figsize=(12, 4))
df.sort_values('delay', ascending=False).head(50)
# ## Zoom in on the X axis
103200 and 105500
# +
_zoom = df.loc[103200:105500]
_zoom = _zoom.reset_index().set_index(['blockHeight', 'crankNum'])
_zoom[['computeInBlock', 'computeInSimBlk']].plot(figsize=(12, 4), rot=-75);
# -
x = _zoom.reset_index()
g = x.groupby('bkSim')
x = pd.concat([
g[['blockHeight']].min(),
g[['compute']].sum(),
g[['dur']].sum()
])
x = x.assign(delay=x.index - x.blockHeight)
x
x[['compute', 'dur', 'delay']].plot(subplots=True, figsize=(15, 9))
_zoom[['durationInBlock', 'durationInSimBlk']].plot(figsize=(12, 4), rot=-75);
(df.bkSim - df.blockHeight).describe()
(df.bkSim - df.blockHeight).hist(figsize=(10, 5), bins=72, log=True)
# ## Elapsed time* on the X axis
#
# *estimated as cumulative crank duration
_zoom = df.loc[103273:104400].copy()
# _zoom = df
_zoom['t'] = _zoom.dur.cumsum()
_zoom.set_index('t')[['durationInBlock', 'durationInSimBlk']].plot(figsize=(12, 4), rot=-75);
# ### Detailed Data
_zoom.groupby('bkSim').apply(lambda g: g.head(10))[50:100][[
'dur', 't', 'durationInSimBlk', 'durationInBlock',
'compute', 'computeInSimBlk', 'computeInBlock',
'blockHeight', 'delay'
]]
df.loc[103200:105500][['delay']].plot()
x = pd.read_sql('''
select *
from j_delivery
where crankNum between 103200 and 105500
and file_id = 3288529541296525
''', _db4)
x
show_times(x[x.index.isin([x.index.min(), x.index.max()])])[['crankNum', 'blockHeight', 'blockTime']]
x.blockHeight.max() - x.blockHeight.min()
x.blockHeight.describe()
x[x.compute > 1000000].groupby('method')[['compute', 'dur']].aggregate(['count', 'median', 'mean'])
x = pd.read_sql('''
select *
from t_delivery
where method = 'fromBridge'
and blockHeight between 68817 and 69707
and file_id = 3288529541296525
''', _db4)
x
_db4.execute('''
create index if not exists slog_entry_bk_ix on slog_entry(blockHeight)
''');
_db4.execute('drop index if exists slog_entry_ty_ix on slog_entry');
# +
def bank_trace(db,
limit=250,
file_id=3288529541296525,
bk_lo=68817,
bk_hi=69707):
df = pd.read_sql(
'''
with d as (
select file_id, run_line_lo
, line
, blockHeight
, blockTime
, time
, crankNum
, cast(substr(json_unquote(json_extract(record, '$.vatID')), 2) as int) vatID
, coalesce(cast(json_extract(record, '$.deliveryNum') as int), -1) deliveryNum
, json_extract(record, '$.kd') kd
from slog_entry e
where blockHeight between %(bk_lo)s and %(bk_hi)s
and file_id = %(file_id)s
and type = 'deliver'
limit %(limit)s
),
detail as (
select d.*
, json_unquote(json_extract(d.kd, '$[0]')) tag
, json_unquote(json_extract(d.kd, '$[1]')) target
, json_unquote(json_extract(d.kd, '$[2].method')) method
, json_length(json_unquote(json_extract(d.kd, '$[2].args.body')), '$[1].updated') updated
from d
)
select blockHeight, blockTime, crankNum, vatID, deliveryNum
, tag
, case when tag = 'message' then target else null end target
, method, updated
, time
-- validator-specific: file_id, run_line_lo, line, time
from detail
-- where method = 'fromBridge'
order by blockHeight, crankNum
''', db, params=dict(limit=limit, file_id=file_id, bk_lo=bk_lo, bk_hi=bk_hi))
return df
# x1 = bank_trace(_db4, bk_hi=68817 + 100)
# x2 = bank_trace(_db4, bk_lo=69707 - 100)
# x = pd.concat([x1, x2])
x = bank_trace(_db4, limit=1000)
show_times(x)
# -
x.updated.describe()
x1 = x[~x.updated.isnull()]
color = np.where(x1.vatID == 1, 'blue', 'red')
show_times(x1).plot.scatter(x='time', y='updated', color=color,
figsize=(10, 4), alpha=0.45,
title='Accounts Updated per delivery');
# +
import json
def notifer_traffic(df):
kd = df.record.apply(lambda txt: json.loads(txt)['kd'])
dt = kd.apply(lambda k: k[0])
method = kd.apply(lambda k: k[2].get('method') if k[0] == 'message' else None)
body = kd.apply(lambda k: k[2].get('args', {}).get('body') if k[0] == 'message' else None)
body = body.apply(lambda v: json.loads(v) if v else None)
updated = body.apply(lambda b: len(b[1].get('updated')) if b else None)
# time = pd.as_datetime(df.time.dt.time)
df = df.assign(dt=dt, method=method, body=body, updated=updated)
dur = df.time.diff()
return df.assign(dur=dur)
notifer_traffic(show_times(x2)).drop(columns=['file_id', 'run_line_lo', 'line', 'record', 'body'])
# -
show_times(x2)
x2.record[0]
len(x2.record[0])
# +
import json
r = json.loads(x2.record[0])
body = r['kd'][2]['args']['body']
x = json.loads(body)
# print(json.dumps(r, indent=2))
len(x[1]['updated'])
# -
x2.record[1]
x2.record[2]
| 1.9375 | 2 |
sections/contributor.py | codeine-coding/APA-References | 1 | 12786904 | <gh_stars>1-10
from tkinter import Entry, W, StringVar, Listbox, BOTH
from utils.apa_widgets import *
class ContributorSection(Section):
def __init__(self, master, **options):
Section.__init__(self, master, **options)
self.contributor_first_name = StringVar()
self.contributor_middle_name = StringVar()
self.contributor_last_name = StringVar()
contributors_frame = Section(self.master)
PrimaryLabel(contributors_frame, text='Contributors:', font=('', 11, 'bold')).grid(row=0, columnspan=3,
sticky=W)
# first name
PrimaryLabel(contributors_frame, text='First Name').grid(row=1, sticky=W)
self.contributor_first_name_entry = Entry(contributors_frame, width=20,
textvariable=self.contributor_first_name)
self.contributor_first_name_entry.grid(row=2, column=0, sticky=W)
# middle name
PrimaryLabel(contributors_frame, text='MI').grid(row=1, column=1, sticky=W)
Entry(contributors_frame, width=5, textvariable=self.contributor_middle_name).grid(row=2, column=1, sticky=W)
# last name
PrimaryLabel(contributors_frame, text='Last Name').grid(row=1, column=2, sticky=W)
Entry(contributors_frame, width=20, textvariable=self.contributor_last_name).grid(row=2, column=2, sticky=W)
contributors_frame.pack(anchor=W)
class ContributorListBox(Section):
def __init__(self, master, **options):
Section.__init__(self, master, **options)
PrimaryLabel(self.master, text='Current Article Contributors').pack()
self.contributor_listbox = Listbox(self.master)
self.contributor_listbox.pack(fill=BOTH) | 3.0625 | 3 |
track_people_py/scripts/track_sort_3d_people.py | kufusha/cabot | 0 | 12786905 | <filename>track_people_py/scripts/track_sort_3d_people.py
#!/usr/bin/env python3
# Copyright (c) 2021 IBM Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import cv2
from cv_bridge import CvBridge, CvBridgeError
from matplotlib import pyplot as plt
import numpy as np
import rospy
import time
from track_abstract_people import AbsTrackPeople
from track_utils.tracker_sort_3d import TrackerSort3D
from track_people_py.msg import BoundingBox, TrackedBox, TrackedBoxes
class TrackSort3dPeople(AbsTrackPeople):
def __init__(self, device, minimum_valid_track_time_length,
iou_threshold, iou_circle_size, n_frames_inactive_to_remove):
AbsTrackPeople.__init__(self, device, minimum_valid_track_time_length)
# set tracker
self.tracker = TrackerSort3D(iou_threshold=iou_threshold, iou_circle_size=iou_circle_size,
n_frames_min_valid_track_length=minimum_valid_track_time_length,
n_frames_inactive_to_remove=n_frames_inactive_to_remove)
self.combined_detected_boxes_pub = rospy.Publisher('track_people_py/combined_detected_boxes', TrackedBoxes, queue_size=10)
self.buffer = {}
def detected_boxes_cb(self, detected_boxes_msg):
# check if tracker is initialized
if not hasattr(self, 'tracker'):
return
# 2022.01.12: remove time check for multiple detection
# check if image is received in correct time order
# cur_detect_time_sec = detected_boxes_msg.header.stamp.to_sec()
# if cur_detect_time_sec<self.prev_detect_time_sec:
# return
if detected_boxes_msg.camera_id not in self.buffer:
self.buffer[detected_boxes_msg.camera_id] = detected_boxes_msg
return
# make sure if only one camera is processed
if self.processing_detected_boxes:
return
self.processing_detected_boxes = True
combined_msg = None
for key in self.buffer:
msg = self.buffer[key]
if not combined_msg:
combined_msg = msg
else:
combined_msg.tracked_boxes.extend(msg.tracked_boxes)
self.buffer = {detected_boxes_msg.camera_id: detected_boxes_msg}
detect_results, center_bird_eye_global_list = self.preprocess_msg(combined_msg)
self.combined_detected_boxes_pub.publish(combined_msg)
start_time = time.time()
try:
_, id_list, color_list, tracked_length = self.tracker.track(detect_results, center_bird_eye_global_list, self.frame_id)
except:
rospy.logerror("tracking error")
self.processing_detected_boxes = False
return
elapsed_time = time.time() - start_time
# rospy.loginfo("time for tracking :{0}".format(elapsed_time) + "[sec]")
self.pub_result(combined_msg, id_list, color_list, tracked_length)
self.vis_result(combined_msg, id_list, color_list, tracked_length)
self.frame_id += 1
# self.prev_detect_time_sec = cur_detect_time_sec
self.processing_detected_boxes = False
def main():
device = "cuda"
# minimum valid track length should be always 0 for multi camera
#minimum_valid_track_time_length = 3 # Minimum time length to consider track is valid
minimum_valid_track_time_length = 0 # Minimum time length to consider track is valid
iou_threshold = 0.01 # IOU threshold to consider detection between frames as same person
iou_circle_size = 1.0 # radius of circle in bird-eye view to calculate IOU
n_frames_inactive_to_remove = 30 # number of frames for a track to be inactive before removal
track_people = TrackSort3dPeople(device, minimum_valid_track_time_length, iou_threshold, iou_circle_size, n_frames_inactive_to_remove)
try:
plt.ion()
plt.show()
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo("Shutting down")
if __name__=='__main__':
main()
| 1.390625 | 1 |
redmine_shell/shell/input.py | shlomo90/redmine_shell | 1 | 12786906 | <gh_stars>1-10
""" Input for Redmine shell. """
import sys
import os
import termios
import contextlib
from enum import Enum
from .command import Command
from redmine_shell.command.system.commands import History, HistoryMove
class State(Enum):
''' Character Key Event State. '''
CONTINUE = -1
BREAK = -2
@contextlib.contextmanager
def _raw_mode(file):
""" Make terminal raw mode for getting an event pressing a key. """
old_attrs = termios.tcgetattr(file.fileno())
new_attrs = old_attrs[:]
new_attrs[3] = new_attrs[3] & ~(termios.ECHO | termios.ICANON)
try:
termios.tcsetattr(file.fileno(), termios.TCSADRAIN, new_attrs)
yield
finally:
termios.tcsetattr(file.fileno(), termios.TCSADRAIN, old_attrs)
def redmine_input(prompt='', complete_command=None, history=False):
""" Customized input function for redmine shell. """
if complete_command is None:
complete_command = []
# TODO: inline
sys.stdout.write(prompt)
sys.stdout.flush()
with _raw_mode(sys.stdin):
def rewrite(new, old):
origin_len = len(old)
sys.stdout.write('\r{}\r'.format(' ' * (origin_len + len(prompt))))
sys.stdout.write(prompt + ''.join(new))
sys.stdout.flush()
def complete(buf):
target = ''.join(buf).strip()
if not target:
sys.stdout.write('\r{}\r'.format(' ' * (len(buf) + len(prompt))))
for command in complete_command:
print(command)
sys.stdout.write(prompt)
sys.stdout.flush()
return []
str_len = len(target)
filtered = [x for x in complete_command if len(x) >= str_len]
filtered = [x for x in filtered if x.startswith(target) is True]
if filtered:
min_cmd = sorted(filtered)[0]
if min_cmd == target:
return list(target)
i = start = len(target)
until = len(min_cmd)
while start <= i < until:
compare = filtered[0][i]
is_diff = False
for cmd in filtered:
if compare != cmd[i]:
is_diff = True
break
if is_diff is True:
break
i += 1
return list(min_cmd[:i])
else:
return buf
def finder(buf):
target = ''.join(buf)
lookup = []
for cmd in complete_command:
if cmd.startswith(target) is True:
lookup.append(cmd)
if lookup:
sys.stdout.write('\r{}\r'.format(' ' * (len(buf) + len(prompt))))
print("---------- CMDS ---------")
for cmd in lookup:
print(cmd)
sys.stdout.write(prompt + ''.join(target))
sys.stdout.flush()
def ctrl_d(keyword):
raise EOFError
def ctrl_p(keyword):
# chr(16)
# history up
if keyword['history'] is True:
old = keyword['type_buf']
cmd = keyword['history_move'].move_up()
if cmd is None:
pass
else:
new = list(cmd)
rewrite(new, old)
keyword['type_buf'] = new
return State.CONTINUE
def ctrl_j(keyword):
# char(14)
# Ctrl + j
# history down
if keyword['history'] is True:
old = keyword['type_buf']
cmd = keyword['history_move'].move_down()
if cmd is None:
new = ['']
else:
new = list(cmd)
rewrite(new, old)
keyword['type_buf'] = new
return State.CONTINUE
def ctrl_l(keyword):
# chr(12)
# Ctrl + l
return State.CONTINUE
def ctrl_h(keyword):
# chr(8)
# Ctrl + h
old = keyword['type_buf']
new = keyword['type_buf'][:-1]
rewrite(new, old)
keyword['type_buf'] = new
return State.CONTINUE
def tab(keyword):
# chr(9)
# Tab
old = keyword['type_buf']
new = complete(old)
if new:
if ''.join(new) == ''.join(old):
finder(new)
else:
rewrite(new, old)
keyword['type_buf'] = new
return State.CONTINUE
def newline(keyword):
# chr(10)
# Newline
print("")
return ''.join(keyword['type_buf'])
def backspace(keyword):
# chr(127)
# Backspace
old = keyword['type_buf']
new = keyword['type_buf'][:-1]
rewrite(new, old)
keyword['type_buf'] = new
return State.CONTINUE
def normal(keyword):
keyword['type_buf'].append(keyword['char'])
rewrite(keyword['type_buf'], keyword['type_buf'])
return State.CONTINUE
def other(keyword):
return State.CONTINUE
keyword = {'prompt': prompt, 'complete_command': complete_command,
'history': history,}
keyword['type_buf'] = []
keyword['history_move'] = HistoryMove(
History.instance().load())
special_key_handlers = {chr(4): ctrl_d,
chr(16): ctrl_p,
chr(14): ctrl_j,
# MacOS uses 13 as ctrl-j
chr(13): ctrl_j,
chr(12): ctrl_l,
chr(8): ctrl_h,
chr(9): tab,
chr(10): newline,
chr(127): backspace, }
while True:
char = sys.stdin.read(1)
if not char:
break
if char in special_key_handlers:
handler = special_key_handlers[char]
elif 41 <= ord(char) <= 176 or ord(char) == 32:
handler = normal
else:
handler = other
keyword['char'] = char
ret = handler(keyword)
if ret == State.CONTINUE:
continue
elif ret == State.BREAK:
break
else:
return ret
| 2.375 | 2 |
newBoCodeCodingChallenge.py | RobbieNesmith/Code-Challenge | 0 | 12786907 | def odometer(arr, change):
pos = len(arr) - 1
while pos >= 0 and change != 0:
sum = arr[pos] + change
if sum < 0:
change = sum
else:
change = sum // 10
arr[pos] = sum % 10
pos = pos - 1
if change == 1:
arr[0] = 1
tests = [{"input": [4,3,9,5], "output": [4,3,9,6]}, {"input": [4,3,4,9], "output": [4,3,5,0]}, {"input": [9,9,9,9], "output": [1,0,0,0]}]
for test in tests:
testArr = []
for i in range(len(test["input"])):
testArr.append(test["input"][i])
odometer(testArr, 1)
valid = True
for i in range(len(testArr)):
if testArr[i] != test["output"][i]:
valid = False
if valid:
print(f"Test succeeded! {test['input']} => {test['output']}")
else:
print(f"Test failed! Expected {test['output']}, got {testArr}") | 3.640625 | 4 |
src/118.pascals-triangle/118.pascals-triangle.py | AnestLarry/LeetCodeAnswer | 0 | 12786908 | #
# @lc app=leetcode id=118 lang=python3
#
# [118] Pascal's Triangle
#
# Given a non-negative integer numRows, generate the first numRows of Pascal's triangle.
# In Pascal's triangle, each number is the sum of the two numbers directly above it.
# Example:
# Input: 5
# Output:
# [
# [1],
# [1,1],
# [1,2,1],
# [1,3,3,1],
# [1,4,6,4,1]
# ]
class Solution:
def generate1(self, numRows: int) -> List[List[int]]:
# Accepted
# 15/15 cases passed (28 ms)
# Your runtime beats 90.21 % of python3 submissions
# Your memory usage beats 32.78 % of python3 submissions (13.5 MB)
base = [[1]]
if numRows < 1:
return []
elif numRows == 1:
return base
numRows -= 1
while numRows > 0:
base.append(
[1] +
[base[-1][x]+base[-1][x+1] for x in range(len(base[-1]) - 1)]
+ [1]
)
numRows -= 1
return base
def generate(self, numRows):
# Accepted
# 15/15 cases passed (28 ms)
# Your runtime beats 90.21 % of python3 submissions
# Your memory usage beats 32.88 % of python3 submissions (13.1 MB) pascal = [[1]*(i+1) for i in range(numRows)]
pascal = [[1]*(i+1) for i in range(numRows)]
for i in range(numRows):
for j in range(1, i):
pascal[i][j] = pascal[i-1][j-1] + pascal[i-1][j]
return pascal
| 3.625 | 4 |
Snake Game/snake.py | zYxDevs/Python_Scripts | 14 | 12786909 | <reponame>zYxDevs/Python_Scripts<gh_stars>10-100
from turtle import Turtle
MOVE=20
TURTLE_POS=[(0,0),(-20,0),(-40,0)]
UP=90
DOWN=270
LEFT=180
RIGHT=0
class Snake:
"""Creates snakes objects and stores them in a list"""
def __init__(self):
self.all_segment=[]
self.create_snake()
self.head=self.all_segment[0]
def create_snake(self):
for coordinate in TURTLE_POS:
self.add_segment(coordinate)
def add_segment(self,coordinate):
new_segment=Turtle(shape="square")
new_segment.color("yellow")
new_segment.penup()
new_segment.goto(coordinate)
self.all_segment.append(new_segment)
def extend(self):
self.add_segment(self.all_segment[-1].position())
def move(self):
for pos in range(len(self.all_segment)-1,0,-1):
x_value=self.all_segment[pos-1].xcor()
y_value=self.all_segment[pos-1].ycor()
self.all_segment[pos].goto(x_value,y_value)
self.head.forward(MOVE)
def up(self):
if self.head.heading()!=DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading()!=UP:
self.head.setheading(DOWN)
def left(self):
if self.head.heading()!=RIGHT:
self.head.setheading(LEFT)
def right(self):
if self.head.heading()!=LEFT:
self.head.setheading(RIGHT) | 3.640625 | 4 |
gravity_wave/solver.py | thomasgibson/firedrake-hybridization | 0 | 12786910 | <reponame>thomasgibson/firedrake-hybridization
from firedrake import *
from firedrake.assemble import create_assembly_callable
from firedrake.parloops import par_loop, READ, INC
from firedrake.utils import cached_property
from pyop2.profiling import timed_stage, timed_region
from ksp_monitor import KSPMonitorDummy
from p1_hybrid_mg import P1HMultiGrid
import numpy as np
class GravityWaveSolver(object):
"""Solver for the linearized compressible Boussinesq equations
(includes Coriolis term). The equations are solved in three stages:
(1) First analytically eliminate the buoyancy perturbation term from
the discrete equations. This is possible since there is currently
no orography. Note that it is indeed possible to eliminate buoyancy
when orography is present, however this must be done at the continuous
level first.
(2) Eliminating buoyancy produces a saddle-point system for the velocity
and pressure perturbations. The resulting system is solved using
either an approximate full Schur-complement procedure or a
hybridized mixed method.
(3) Once the velocity and perturbation fields are computed from the
previous step, the buoyancy term is reconstructed.
"""
def __init__(self, W2, W3, Wb, dt, c, N, Omega, R, rtol=1.0E-6,
solver_type="gamg", hybridization=False,
local_invert_method=None,
local_solve_method=None,
monitor=False):
"""The constructor for the GravityWaveSolver.
:arg W2: The HDiv velocity space.
:arg W3: The L2 pressure space.
:arg Wb: The "Charney-Phillips" space for the buoyancy field.
:arg dt: A positive real number denoting the time-step size.
:arg c: A positive real number denoting the speed of sound waves
in dry air.
:arg N: A positive real number describing the Brunt–Väisälä frequency.
:arg Omega: A positive real number; the angular rotation rate of the
Earth.
:arg R: A positive real number denoting the radius of the spherical
mesh (Earth-size).
:arg rtol: The relative tolerance for the solver.
:solver_type: A string describing which inner-most solver to use on
the pressure space (approximate Schur-complement) or
the trace space (hybridization). Currently, only the
parameter "AMG" is supported, which uses smoothed
aggregation algebraic multigrid (GAMG).
:arg hybridization: A boolean switch between using a hybridized
mixed method (True) on the velocity-pressure
system, or GMRES with an approximate Schur-
complement preconditioner (False).
:arg local_invert_method: Optional argument detailing what kind of
factorization to perform in Eigen when
computing local inverses.
:arg local_solve_method: Optional argument detailing what kind of
factorization to perform in Eigen when
computing the local solves in the hybridized
solver.
:arg monitor: A boolean switch with turns on/off KSP monitoring
of the problem residuals (primarily for debugging
and checking convergence of the solver). When profiling,
keep this set to `False`.
"""
self.hybridization = hybridization
self._local_solve_method = local_solve_method
self._local_invert_method = local_invert_method
self.monitor = monitor
self.rtol = rtol
self.hybrid_mg = False
if solver_type == "gamg":
self.params = self.gamg_paramters
elif solver_type == "preonly-gamg":
self.params = self.preonly_gamg_parameters
elif solver_type == "hypre":
self.params = self.hypre_parameters
elif solver_type == "direct":
self.params = self.direct_parameters
elif solver_type == "hybrid_mg":
self.params = {"ksp_type": "cg",
"ksp_rtol": self.rtol}
self.hybrid_mg = True
else:
raise ValueError("Unknown inner solver type")
# Timestepping parameters and physical constants
self._dt = dt
self._c = c
self._N = N
self._dt_half = Constant(0.5*dt)
self._dt_half_N2 = Constant(0.5*dt*N**2)
self._dt_half_c2 = Constant(0.5*dt*c**2)
self._omega_N2 = Constant((0.5*dt*N)**2)
self._omega_c2 = Constant((0.5*dt*c)**2)
# Compatible finite element spaces
self._Wmixed = W2 * W3
self._W2 = self._Wmixed.sub(0)
self._W3 = self._Wmixed.sub(1)
self._Wb = Wb
mesh = self._W3.mesh()
# Hybridized finite element spaces
broken_W2 = BrokenElement(self._W2.ufl_element())
self._W2disc = FunctionSpace(mesh, broken_W2)
h_deg, v_deg = self._W2.ufl_element().degree()
tdegree = (h_deg - 1, v_deg - 1)
self._WT = FunctionSpace(mesh, "HDiv Trace", tdegree)
self._Whybrid = self._W2disc * self._W3 * self._WT
self._hybrid_update = Function(self._Whybrid)
self._facet_normal = FacetNormal(mesh)
shapes = (self._W2.finat_element.space_dimension(),
np.prod(self._W2.shape))
weight_kernel = """
for (int i=0; i<%d; ++i) {
for (int j=0; j<%d; ++j) {
w[i][j] += 1.0;
}}""" % shapes
self.weight = Function(self._W2)
par_loop(weight_kernel, dx, {"w": (self.weight, INC)})
self.average_kernel = """
for (int i=0; i<%d; ++i) {
for (int j=0; j<%d; ++j) {
vec_out[i][j] += vec_in[i][j]/w[i][j];
}}""" % shapes
# Functions for state solutions
self._up = Function(self._Wmixed)
self._b = Function(self._Wb)
self._btmp = Function(self._Wb)
self._state = Function(self._W2 * self._W3 * self._Wb, name="State")
# Outward normal vector
x = SpatialCoordinate(mesh)
R = sqrt(inner(x, x))
self._khat = interpolate(x/R, mesh.coordinates.function_space())
# Coriolis term
fexpr = 2*Omega*x[2]/R
Vcg = FunctionSpace(mesh, "CG", 1)
self._f = interpolate(fexpr, Vcg)
# Construct linear solvers
if self.hybridization:
self._build_hybridized_solver()
else:
self._build_up_solver()
self._build_b_solver()
self._ksp_monitor = KSPMonitorDummy()
self.up_residual_reductions = []
@property
def direct_parameters(self):
"""Solver parameters using a direct method (LU)"""
inner_params = {'ksp_type': 'gmres',
'pc_type': 'lu',
'pc_factor_mat_solver_package': 'mumps'}
if self.hybridization:
params = inner_params
else:
params = {'ksp_type': 'preonly',
'pc_type': 'fieldsplit',
'pc_fieldsplit_type': 'schur',
'pc_fieldsplit_schur_fact_type': 'FULL',
'fieldsplit_0': inner_params,
'fieldsplit_1': inner_params}
return params
@property
def hypre_parameters(self):
"""Solver parameters using hypre's boomeramg
implementation of AMG.
"""
inner_params = {'ksp_type': 'cg',
'ksp_rtol': self.rtol,
'pc_type': 'hypre',
'pc_hypre_type': 'boomeramg',
'pc_hypre_boomeramg_no_CF': False,
'pc_hypre_boomeramg_coarsen_type': 'HMIS',
'pc_hypre_boomeramg_interp_type': 'ext+i',
'pc_hypre_boomeramg_P_max': 0,
'pc_hypre_boomeramg_agg_nl': 0,
'pc_hypre_boomeramg_max_level': 5,
'pc_hypre_boomeramg_strong_threshold': 0.25}
if self.monitor:
inner_params['ksp_monitor_true_residual'] = True
if self.hybridization:
params = inner_params
else:
params = {'ksp_type': 'gmres',
'ksp_rtol': self.rtol,
'pc_type': 'fieldsplit',
'pc_fieldsplit_type': 'schur',
'ksp_max_it': 100,
'ksp_gmres_restart': 50,
'pc_fieldsplit_schur_fact_type': 'FULL',
'pc_fieldsplit_schur_precondition': 'selfp',
'fieldsplit_0': {'ksp_type': 'preonly',
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'},
'fieldsplit_1': inner_params}
if self.monitor:
params['ksp_monitor_true_residual'] = True
return params
@property
def gamg_paramters(self):
"""Solver parameters for the velocity-pressure system using
algebraic multigrid.
"""
inner_params = {'ksp_type': 'cg',
'pc_type': 'gamg',
'ksp_rtol': self.rtol,
'mg_levels': {'ksp_type': 'chebyshev',
'ksp_max_it': 2,
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'}}
if self.monitor:
inner_params['ksp_monitor_true_residual'] = True
if self.hybridization:
params = inner_params
else:
params = {'ksp_type': 'gmres',
'ksp_rtol': self.rtol,
'pc_type': 'fieldsplit',
'pc_fieldsplit_type': 'schur',
'ksp_max_it': 100,
'ksp_gmres_restart': 50,
'pc_fieldsplit_schur_fact_type': 'FULL',
'pc_fieldsplit_schur_precondition': 'selfp',
'fieldsplit_0': {'ksp_type': 'preonly',
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'},
'fieldsplit_1': inner_params}
if self.monitor:
params['ksp_monitor_true_residual'] = True
return params
@property
def preonly_gamg_parameters(self):
inner_params = {'ksp_type': 'preonly',
'pc_type': 'gamg',
'mg_levels': {'ksp_type': 'chebyshev',
'ksp_chebyshev_esteig': True,
'ksp_max_it': 1,
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'}}
if self.hybridization:
# We need an iterative method for the trace system
params = self.gamg_paramters
else:
params = {'ksp_type': 'gmres',
'ksp_rtol': self.rtol,
'pc_type': 'fieldsplit',
'pc_fieldsplit_type': 'schur',
'ksp_max_it': 100,
'ksp_gmres_restart': 50,
'pc_fieldsplit_schur_fact_type': 'FULL',
'pc_fieldsplit_schur_precondition': 'selfp',
'fieldsplit_0': {'ksp_type': 'preonly',
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'},
'fieldsplit_1': inner_params}
if self.monitor:
params['ksp_monitor_true_residual'] = True
return params
@cached_property
def _build_up_bilinear_form(self):
"""Bilinear form for the gravity wave velocity-pressure
subsystem.
"""
utest, ptest = TestFunctions(self._Wmixed)
u, p = TrialFunctions(self._Wmixed)
def outward(u):
return cross(self._khat, u)
# Linear gravity wave system for the velocity and pressure
# increments (buoyancy has been eliminated in the discrete
# equations since there is no orography)
a_up = (ptest*p
+ self._dt_half_c2*ptest*div(u)
- self._dt_half*div(utest)*p
+ (dot(utest, u)
+ self._dt_half*dot(utest, self._f*outward(u))
+ self._omega_N2
* dot(utest, self._khat)
* dot(u, self._khat))) * dx
return a_up
@cached_property
def _build_hybridized_bilinear_form(self):
"""Bilinear form for the hybrid-mixed velocity-pressure
subsystem.
"""
utest, ptest, lambdatest = TestFunctions(self._Whybrid)
u, p, lambdar = TrialFunctions(self._Whybrid)
def outward(u):
return cross(self._khat, u)
n = self._facet_normal
# Hybridized linear gravity wave system for the velocity,
# pressure, and trace subsystem (buoyancy has been eliminated
# in the discrete equations since there is no orography).
# NOTE: The no-slip boundary conditions are applied weakly
# in the hybridized problem.
a_uplambdar = ((ptest*p
+ self._dt_half_c2*ptest*div(u)
- self._dt_half*div(utest)*p
+ (dot(utest, u)
+ self._dt_half*dot(utest, self._f*outward(u))
+ self._omega_N2
* dot(utest, self._khat)
* dot(u, self._khat))) * dx
+ lambdar * jump(utest, n=n) * (dS_v + dS_h)
+ lambdar * dot(utest, n) * ds_tb
+ lambdatest * jump(u, n=n) * (dS_v + dS_h)
+ lambdatest * dot(u, n) * ds_tb)
return a_uplambdar
def _build_up_rhs(self, u0, p0, b0):
"""Right-hand side for the gravity wave velocity-pressure
subsystem.
"""
def outward(u):
return cross(self._khat, u)
utest, ptest = TestFunctions(self._Wmixed)
L_up = (dot(utest, u0)
+ self._dt_half*dot(utest, self._f*outward(u0))
+ self._dt_half*dot(utest, self._khat*b0)
+ ptest*p0) * dx
return L_up
def _build_hybridized_rhs(self, u0, p0, b0):
"""Right-hand side for the hybridized gravity wave
velocity-pressure-trace subsystem.
"""
def outward(u):
return cross(self._khat, u)
# No residual for the traces; they only enforce continuity
# of the discontinuous velocity normals
utest, ptest, _ = TestFunctions(self._Whybrid)
L_uplambdar = (dot(utest, u0)
+ self._dt_half*dot(utest, self._f*outward(u0))
+ self._dt_half*dot(utest, self._khat*b0)
+ ptest*p0) * dx
return L_uplambdar
def up_residual(self, old_state, new_up):
"""Returns the residual of the velocity-pressure system."""
u0, p0, b0 = old_state.split()
res = self._build_up_rhs(u0, p0, b0)
L = self._build_up_bilinear_form
res -= action(L, new_up)
return res
def _build_up_solver(self):
"""Constructs the solver for the velocity-pressure increments."""
# strong no-slip boundary conditions on the top
# and bottom of the atmospheric domain)
bcs = [DirichletBC(self._Wmixed.sub(0), 0.0, "bottom"),
DirichletBC(self._Wmixed.sub(0), 0.0, "top")]
# Mixed operator
A = assemble(self._build_up_bilinear_form, bcs=bcs)
# Set up linear solver
linear_solver = LinearSolver(A, solver_parameters=self.params)
self.linear_solver = linear_solver
# Function to store RHS for the linear solver
u0, p0, b0 = self._state.split()
self._up_rhs = Function(self._Wmixed)
self._assemble_up_rhs = create_assembly_callable(
self._build_up_rhs(u0, p0, b0),
tensor=self._up_rhs)
def _build_hybridized_solver(self):
"""Constructs the Schur-complement system for the hybridized
problem. In addition, all reconstruction calls are generated
for recovering velocity and pressure.
"""
# Matrix operator has the form:
# | A00 A01 A02 |
# | A10 A11 0 |
# | A20 0 0 |
# for the U-Phi-Lambda system.
# Create Slate tensors for the 3x3 block operator:
A = Tensor(self._build_hybridized_bilinear_form)
# Define the 2x2 mixed block:
# | A00 A01 |
# | A10 A11 |
# which couples the potential and momentum.
Atilde = A.block(((0, 1), (0, 1)))
# and the off-diagonal blocks:
# |A20 0| & |A02 0|^T:
Q = A.block((2, (0, 1)))
Qt = A.block(((0, 1), 2))
# Schur complement operator:
S = assemble(Q * Atilde.inv(self._local_invert_method) * Qt)
# Set up linear solver
linear_solver = LinearSolver(S, solver_parameters=self.params)
self.linear_solver = linear_solver
if self.hybrid_mg:
pc = self.linear_solver.ksp.pc
pc.setType(pc.Type.PYTHON)
self._mgpc = P1HMultiGrid(S, Function(self._WT),
omega_c2=self._omega_c2)
pc.setPythonContext(self._mgpc)
# Tensor for the residual
u0, p0, b0 = self._state.split()
R = Tensor(self._build_hybridized_rhs(u0, p0, b0))
R01 = R.block(((0, 1),))
# Function to store the rhs for the trace system
self._S_rhs = Function(self._WT)
self._assemble_Srhs = create_assembly_callable(
Q * Atilde.inv(self._local_invert_method) * R01,
tensor=self._S_rhs)
# Individual blocks: 0 indices correspond to u coupling;
# 1 corresponds to p coupling; and 2 is trace coupling.
A00 = A.block((0, 0))
A01 = A.block((0, 1))
A10 = A.block((1, 0))
A11 = A.block((1, 1))
A02 = A.block((0, 2))
R0 = R.block((0,))
R1 = R.block((1,))
# Local coefficient vectors
Lambda = AssembledVector(self._hybrid_update.sub(2))
P = AssembledVector(self._hybrid_update.sub(1))
Sp = A11 - A10 * A00.inv(self._local_invert_method) * A01
p_problem = Sp.solve(R1 - A10 *
A00.inv(self._local_invert_method) *
(R0 - A02 * Lambda),
method=self._local_solve_method)
u_problem = A00.solve(R0 - A01 * P - A02 * Lambda,
method=self._local_solve_method)
# Two-stage reconstruction
self._assemble_pressure = create_assembly_callable(
p_problem, tensor=self._hybrid_update.sub(1))
self._assemble_velocity = create_assembly_callable(
u_problem, tensor=self._hybrid_update.sub(0))
@property
def ksp_monitor(self):
"""Returns the KSP monitor attached to this solver. Note
that the monitor is for the velocity-pressure system.
"""
return self._ksp_monitor
@ksp_monitor.setter
def ksp_monitor(self, kspmonitor):
"""Set the monitor for the velocity-pressure or trace system.
:arg kspmonitor: a monitor to use.
"""
self._ksp_monitor = kspmonitor
ksp = self.linear_solver.ksp
ksp.setMonitor(self._ksp_monitor)
def _build_b_solver(self):
"""Constructs the solver for the buoyancy update."""
# Computed velocity perturbation
u0, _, _ = self._state.split()
# Expression for buoyancy reconstruction
btest = TestFunction(self._Wb)
L_b = dot(btest*self._khat, u0) * dx
a_b = btest*TrialFunction(self._Wb) * dx
b_problem = LinearVariationalProblem(a_b, L_b, self._btmp)
b_params = {'ksp_type': 'cg',
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'}
if self.monitor:
b_params['ksp_monitor_true_residual'] = True
# Solver for buoyancy update
b_solver = LinearVariationalSolver(b_problem,
solver_parameters=b_params)
self.b_solver = b_solver
def initialize(self, u, p, b):
"""Initialized the solver state with initial conditions
for the velocity, pressure, and buoyancy fields.
:arg u: An initial condition (`firedrake.Function`)
for the velocity field.
:arg p: An initial condition for the pressure field.
:arg b: And finally an function describing the initial
state of the buoyancy field.
"""
u0, p0, b0 = self._state.split()
u0.assign(u)
p0.assign(p)
b0.assign(b)
def solve(self):
"""Solves the linear gravity wave problem at a particular
time-step in two-stages. First, the velocity and pressure
solutions are computed, then buoyancy is reconstructed from
the computed fields. The solver state is then updated.
"""
# Previous state
un, pn, bn = self._state.split()
# Initial residual
self._hybrid_update.assign(0.0)
self._up.assign(0.0)
self._b.assign(0.0)
r0 = assemble(self.up_residual(self._state, self._up))
# Main solver stage
with timed_stage("Velocity-Pressure-Solve"):
if self.hybridization:
# Solve for the Lagrange multipliers
with timed_region("Trace-Solver"):
self._assemble_Srhs()
self.linear_solver.solve(self._hybrid_update.sub(2),
self._S_rhs)
# Recover pressure, then velocity
with timed_region("Hybrid-Reconstruct"):
self._assemble_pressure()
self._assemble_velocity()
# Transfer hybridized solutions to the conforming spaces
self._up.sub(1).assign(self._hybrid_update.sub(1))
par_loop(self.average_kernel, dx,
{"w": (self.weight, READ),
"vec_in": (self._hybrid_update.sub(0), READ),
"vec_out": (self._up.sub(0), INC)})
else:
self._assemble_up_rhs()
self.linear_solver.solve(self._up, self._up_rhs)
# Residual after solving
rn = assemble(self.up_residual(self._state, self._up))
self.up_residual_reductions.append(rn.dat.norm/r0.dat.norm)
# Update state
un.assign(self._up.sub(0))
pn.assign(self._up.sub(1))
# Reconstruct b
self._btmp.assign(0.0)
with timed_stage("Buoyancy-Solve"):
self.b_solver.solve()
bn.assign(assemble(bn - self._dt_half_N2*self._btmp))
| 2.171875 | 2 |
observers.py | rnowling/integrator-experiments | 0 | 12786911 | <filename>observers.py
import numpy as np
class TotalEnergyObserver(object):
def __init__(self, period):
self.period = period
self.energies = []
self.eps = 1e-5
def observe(self, state):
if state.simulated_time % self.period < self.eps:
self.energies.append(state.total_energy)
def stats(self):
return np.mean(self.energies), np.std(self.energies)
| 2.984375 | 3 |
tests/apps/pages/component_as_trigger.py | T4rk1n/dazzler | 15 | 12786912 | <reponame>T4rk1n/dazzler
"""
Page component_as_trigger of dazzler
Created 2019-06-16
"""
import json
from dazzler.components import core
from dazzler.system import Page, Trigger, BindingContext, State
page = Page(
__name__,
core.Container([
core.Container(core.Container('from children'), identity='component'),
core.Container(identity='output'),
core.Container([
core.Container(str(x)) for x in range(0, 10)
], identity='array-components'),
core.Container(identity='array-output'),
core.Container(core.Container(core.Container([
core.Html(
'div', core.Container('inside html div'),
identity='inside-html'
),
core.Html(
'div',
attributes={'children': core.Html('span', 'attribute')}
),
])), identity='nested-components'),
core.Container(identity='nested-output'),
core.Button('get-aspect-click', identity='get-aspect-trigger'),
core.Container(
core.Input(value='input-value'),
identity='input'
),
core.Container(core.Input(value=4747), identity='as-state'),
core.Container(identity='get-aspect-output'),
core.Container(identity='as-state-output')
])
)
@page.bind(Trigger('component', 'children'))
async def trigger(ctx: BindingContext):
await ctx.set_aspect(
'output', children=f'From component: {ctx.trigger.value.children}'
)
@page.bind(Trigger('array-components', 'children'))
async def trigger_array_components(ctx: BindingContext):
# The value is an array of container.
value = sum(int(x.children) for x in ctx.trigger.value)
await ctx.set_aspect(
'array-output',
children=f'Sum: {value}'
)
@page.bind(Trigger('nested-components', 'children'))
async def trigger_nested_components(ctx: BindingContext):
children = ctx.trigger.value.children.children
output = {
'len': len(children),
'insider': children[0].children.children,
# This one in the attributes, not the children as per the
# original component, the children prop is a different aspect.
'as_prop': children[1].attributes['children'].children,
}
await ctx.set_aspect('nested-output', children=json.dumps(output))
@page.bind(
Trigger('get-aspect-trigger', 'clicks'), State('as-state', 'children')
)
async def get_aspect_component_with_state(ctx: BindingContext):
component = await ctx.get_aspect('input', 'children')
await ctx.set_aspect(
'get-aspect-output', children=json.dumps({
'get-aspect': component.value,
'state': ctx.states['as-state']['children'].value
})
)
| 2.0625 | 2 |
examples/layout_form.py | pzahemszky/guizero | 320 | 12786913 | from guizero import App, Text, TextBox, Combo, PushButton, Box
app = App()
Text(app, text="My form")
form = Box(app, width="fill", layout="grid")
form.border = True
Text(form, text="Title", grid=[0,0], align="right")
TextBox(form, grid=[1,0])
Text(form, text="Name", grid=[0,1], align="right")
TextBox(form, grid=[1,1])
Text(form, text="Age", grid=[0,2], align="right")
TextBox(form, grid=[1,2])
buttons = Box(app, width="fill", align="bottom")
PushButton(buttons, text="Ok", align="left")
PushButton(buttons, text="Cancel", align="left")
app.display() | 2.96875 | 3 |
Unsupervised_Clustering/Variational_Autoencoders/Vol_VAE_Utils.py | DavidBMcCoy/ZSFG-UCSF_Machine_Learning | 1 | 12786914 | <filename>Unsupervised_Clustering/Variational_Autoencoders/Vol_VAE_Utils.py
import numpy as np
def convert_to_one_hot(Y, C):
"""
make the label a 1x2 array for each image
"""
Y = np.eye(C)[Y.reshape(-1)].T
return Y
| 2.296875 | 2 |
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/Volume/VisionInterface/ContourSpectrum.py | J-E-J-S/aaRS-Pipeline | 8 | 12786915 | ## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
########################################################################
#
# Date: Mars 2006 Authors: <NAME>, <NAME>
#
# <EMAIL>
# <EMAIL>
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: <NAME>, <NAME> and TSRI
#
#########################################################################
#
# $Header$
#
# $Id$
#
# third party packages
import types
import Tkinter
import numpy.oldnumeric as Numeric
import numpy
#import Pmw
#import os
#import types
#import string
#import warnings
# TSRI MGL packages
from NetworkEditor.items import NetworkNode
from NetworkEditor.widgets import PortWidget
from mglutil.util.callback import CallbackManager
from mglutil.gui.BasicWidgets.Tk.optionsPanel import OptionsPanel
from mglutil.util.misc import ensureFontCase
# current package
class ContourSpectrumNE(NetworkNode):
"""
Input Ports
contourspectrum: (bound to contourspectrum widget)
mini: minimum value (optional)
maxi: maximum value (optional)
Output Ports
value: value is of type int or float, depending on the contourspectrum settings
"""
def __init__(self, name='contourspectrum', **kw):
#import pdb;pdb.set_trace()
kw['name'] = name
apply( NetworkNode.__init__, (self,), kw )
self.inNodeWidgetsVisibleByDefault = True
self.widgetDescr['contourspectrum'] = {
'class':'NEContourSpectrum', 'master':'node', 'size':50,
'oneTurn':1, 'type':'float', 'lockedOnPort':True,
'initialValue':0.0,
'labelCfg':{'text':''}}
self.inputPortsDescr.append(datatype='float', name='contourspectrum')
self.inputPortsDescr.append(datatype='float', name='mini',
required=False)
self.inputPortsDescr.append(datatype='float', name='maxi',
required=False)
self.inputPortsDescr.append(datatype='Grid3D', name='grid', required=False)
self.outputPortsDescr.append(datatype='float', name='value')
code = """def doit(self, contourspectrum, mini, maxi, grid):
if contourspectrum is not None:
w = self.inputPortByName['contourspectrum'].widget
if w:
if grid is not None and self.inputPortByName['grid'].hasNewValidData():
data = grid.data
origin = Numeric.array(grid.origin).astype('f')
stepsize = Numeric.array(grid.stepSize).astype('f')
self.newgrid3D = Numeric.reshape( Numeric.transpose(grid.data),
(1, 1)+tuple(grid.data.shape) )
from UTpackages.UTisocontour import isocontour
gridData = isocontour.newDatasetRegFloat3D(
self.newgrid3D, origin, stepsize)
sig = [gridData.getSignature(0, 0, 0),
gridData.getSignature(0, 0, 1),
gridData.getSignature(0, 0, 2),
gridData.getSignature(0, 0, 3)]
w.widget.setSignatures(sig)
if mini is not None and self.inputPortByName['mini'].hasNewValidData():
w.configure(min=mini)
if maxi is not None and self.inputPortByName['maxi'].hasNewValidData():
w.configure(max=maxi)
self.outputData(value=contourspectrum)
"""
self.setFunction(code)
def afterAddingToNetwork(self):
NetworkNode.afterAddingToNetwork(self)
# run this node so the value is output
self.run()
self.inputPorts[0].widget.configure = self.configure_NEContourSpectrum
self.inputPorts[0].widget.widget.setType = self.setType_NEContourSpectrum
def configure_NEContourSpectrum(self, rebuild=True, **kw):
"""specialized configure method for contourspectrum widget"""
# overwrite the contourspectrum widget's configure method to set the outputPort
# data type when the contourspectrum is configured
w = self.inputPorts[0].widget
apply( NEContourSpectrum.configure, (w, rebuild), kw)
dtype = kw.pop('type', None)
if dtype:
self.updateDataType(dtype)
def setType_NEContourSpectrum(self, dtype):
"""specialized setTyp method for mglutil contourspectrum object"""
# overwrite the Dial's setType method to catch type changes through
# the optionsPanel
from mglutil.gui.BasicWidgets.Tk.Dial import ContourSpectrum
contourspectrum = self.inputPorts[0].widget.widget
apply( ContourSpectrum.setType, (contourspectrum, dtype), {})
if type(dtype) == types.TypeType:
dtype = dtype.__name__
self.updateDataType(dtype)
self.inputPorts[1].setDataType(dtype, makeOriginal= True)
self.inputPorts[2].setDataType(dtype, makeOriginal= True)
def updateDataType(self, dtype):
port = self.outputPorts[0]
port.setDataType(dtype, tagModified=False)
if port.data is not None:
if type(dtype) == types.TypeType:
port.data = dtype(port.data)
else:
port.data = eval("%s(port.data)"%dtype)
class NEContourSpectrum(PortWidget):
"""NetworkEditor wrapper for ContourSpectrum widget.
Handles all PortWidget arguments and all ContourSpectrum arguments except for value.
Name: default:
callback None
continuous 1
lockContinuous 0
lockBMin 0
lockBMax 0
lockMin 0
lockMax 0
lockPrecision 0
lockShowLabel 0
lockType 0
lockValue 0
min None
max None
precision 2
showLabel 1
size 50
type 'float'
"""
configOpts = PortWidget.configOpts.copy()
configOpts['initialValue'] = {
'defaultValue':0.0, 'type':'float',
}
ownConfigOpts = {
'callback': {
'defaultValue':None, 'type': 'None',
'description':"???",
},
'continuous': {
'defaultValue':True, 'type':'boolean',
'description':"",
},
'lockContinuous': {
'defaultValue':False, 'type':'boolean',
'description':"",
},
'lockBMin': {
'defaultValue':False, 'type':'boolean',
'description':"",
},
'lockBMax': {
'defaultValue':False, 'type':'boolean',
'description':"",
},
'lockMin': {
'defaultValue':False, 'type':'boolean',
'description':"",
},
'lockMax': {
'defaultValue':False, 'type':'boolean',
'description':"",
},
'lockOneTurn': {
'defaultValue':False, 'type':'boolean',
'description':"",
},
'lockPrecision': {
'defaultValue':False, 'type':'boolean',
'description':"",
},
'lockShowLabel': {
'defaultValue':False, 'type':'boolean',
'description':"",
},
'lockType': {
'defaultValue':False, 'type':'boolean',
'description':"",
},
'lockValue': {
'defaultValue':False, 'type':'boolean',
'description':"",
},
'min': {
'defaultValue':None, 'type':'float',
'description':"",
},
'max': {
'defaultValue':None, 'type':'float',
'description':"",
},
'oneTurn': {
'defaultValue':360., 'type':'float',
'description':"",
},
'precision': {
'defaultValue':2, 'type':'int',
'description':"number of decimals used in label",
},
'showLabel': {
'defaultValue':True, 'type':'boolean',
'description':"",
},
'size':{
'defaultValue': 50, 'min':20, 'max':500, 'type':'int'
},
'type': {
'defaultValue':'float', 'type':'string',
'validValues': ['float', 'int'],
'description':"",
},
}
configOpts.update( ownConfigOpts )
def __init__(self, port, **kw):
## # create all attributes that will not be created by configure because
## # they do not appear on kw
## for key in self.ownConfigOpts.keys():
## v = kw.get(key, None)
## if v is None: # self.configure will not do anyting for this key
## setattr(self, key, self.ownConfigOpts[key]['defaultValue'])
# get all arguments handled by NEThumbweel and not by PortWidget
widgetcfg = {}
for k in self.ownConfigOpts.keys():
if k in kw:
widgetcfg[k] = kw.pop(k)
# call base class constructor
apply( PortWidget.__init__, ( self, port), kw)
# create the Dial widget
#from NetworkEditor.spectrum import ContourSpectrumGUI
self.widget = apply( ContourSpectrum, (self.widgetFrame,), widgetcfg)
self.widget.callbacks.AddCallback(self.newValueCallback)
# rename Options Panel to port name
self.widget.opPanel.setTitle("%s : %s"%(port.node.name, port.name) )
# overwrite right mouse button click
self.widget.canvas.bind("<Button-3>", self.postWidgetMenu)
self.widget.canvas.configure(cursor='cross')
# add menu entry to open configuration panel
self.menu.insert_command(0, label='Option Panel', underline=0,
command=self.toggleOptionsPanel)
# register new callback for widget's optionsPanel Apply button
# NOTE: idf.entryByName is at this time not built
for k in self.widget.opPanel.idf:
name = k.get('name', None)
if name and name == 'ApplyButton':
k['command'] = self.optionsPanelApply_cb
elif name and name == 'OKButton':
k['command'] = self.optionsPanelOK_cb
# first set default value, in case we have a min or max, else the
# node would run
if self.initialValue is not None:
self.set(self.widget.type(self.initialValue), run=0)
# configure without rebuilding to avoid enless loop
apply( self.configure, (False,), widgetcfg)
self._setModified(False) # will be set to True by configure method
def configure(self, rebuild=True, **kw):
# call base class configure with rebuild=Flase. If rebuilt is needed
# rebuildDescr will contain w=='rebuild' and rebuildDescr contains
# modified descr
action, rebuildDescr = apply( PortWidget.configure, (self, False), kw)
# handle ownConfigOpts entries
if self.widget is not None:
widgetOpts = {}
for k, v in kw.items():
if k in self.ownConfigOpts:
if k =='size':
action = 'rebuild'
rebuildDescr[k] = v
else:
widgetOpts[k] = v
if len(widgetOpts):
apply( self.widget.configure, (), widgetOpts)
if action=='rebuild' and rebuild:
action, rebuildDescr = self.rebuild(rebuildDescr)
elif action=='resize' and rebuild:
if self.widget and rebuild: # if widget exists
action = None
return action, rebuildDescr
def set(self, value, run=1):
#print "set NEContourSpectrum"
self._setModified(True)
self.widget.setValue(value)
self._newdata = True
if run:
self.scheduleNode()
def get(self):
return self.widget.get()
def optionsPanelOK_cb(self, event=None):
# register this widget to be modified when opPanel is used
self.widget.opPanel.OK_cb()
self._setModified(True)
def optionsPanelApply_cb(self, event=None):
# register this widget to be modified when opPanel is used
self.widget.opPanel.Apply_cb()
self._setModified(True)
def toggleOptionsPanel(self, event=None):
# rename the options panel title if the node name or port name has
# changed.
self.widget.opPanel.setTitle(
"%s : %s"%(self.port.node.name, self.port.name) )
self.widget.toggleOptPanel()
def getDescr(self):
cfg = PortWidget.getDescr(self)
for k in self.ownConfigOpts.keys():
if k == 'type': # type has to be handled separately
_type = self.widget.type
if _type == int:
_type = 'int'
else:
_type = 'float'
if _type != self.ownConfigOpts[k]['defaultValue']:
cfg[k] = _type
continue
val = getattr(self.widget, k)
if val != self.ownConfigOpts[k]['defaultValue']:
cfg[k] = val
return cfg
class ContourSpectrum(Tkinter.Frame):
"""This class implements a ContourSpectrum widget.
it is fully a copy/paste from Dial
"""
def __init__(self, master=None, type='float',
labCfg={'fg':'black','side':'left', 'text':None},
min=None, max=None,
showLabel=1, value=0.0, continuous=1, precision=2,
callback=None, lockMin=0, lockBMin=0, lockMax=0, lockBMax=0,
lockPrecision=0,lockShowLabel=0, lockValue=0,
lockType=0, lockContinuous=0, signatures=None, **kw):
Tkinter.Frame.__init__(self, master)
Tkinter.Pack.config(self)
self.callbacks = CallbackManager() # object to manage callback
# functions. They get called with the
# current value as an argument
# initialize various attributes with default values
self.height = 100 # widget height
self.width = 256 # widget height
self.widthMinusOne = self.width - 1
self.min = 0 # minimum value
self.max = 1 # maximum value
self.range = self.max - self.min
self.precision = 2 # decimal places
self.minOld = 0. # used to store old values
self.maxOld = 0.
self.size = 50 # defines widget size
self.offsetValue = 0. # used to set increment correctly
self.lab = None # label
self.callback = None # user specified callback
self.opPanel = None # option panel widget
self.value = 0.0 # current value of widget
self.oldValue = 0.0 # old value of widget
self.showLabel = 1 # turn on to display label on
self.continuous = 1 # set to 1 to call callbacks at
# each value change, else gets called
# on button release event
self.labCfg = labCfg # Tkinter Label options
self.labelFont = (
ensureFontCase('helvetica'), 14, 'bold') # label font
self.labelColor = 'yellow' # label color
self.canvas = None # the canvas to create the widget in
self.lockMin = lockMin # lock<X> vars are used in self.lock()
self.lockMax = lockMax # to lock/unlock entries in optionpanel
self.lockBMin = lockBMin
self.lockBMax = lockBMax
self.lockPrecision = 0
self.lockShowLabel = lockShowLabel
self.lockValue = lockValue
self.lockType = lockType
self.lockContinuous = lockContinuous
# configure with user-defined values
self.setCallback(callback)
self.setContinuous(continuous)
self.setType(type)
self.setPrecision(precision)
self.setMin(min)
self.setMax(max)
self.setShowLabel(showLabel)
self.setValue(value)
self.setLabel(self.labCfg)
if master is None:
master = Tkinter.Toplevel()
self.master = master # widget master
self.createCanvas(master)
Tkinter.Widget.bind(self.canvas, "<ButtonPress-1>", self.mouseDown)
Tkinter.Widget.bind(self.canvas, "<B1-Motion>", self.mouseMove)
Tkinter.Widget.bind(self.canvas, "<ButtonRelease-1>", self.mouseUp)
# create cursor
self.cursorTk = self.canvas.create_line( 0, 0, 0, 0, tags=['cursor'])
self.increment = 0.0
self.incrementOld = 0.
self.lockIncrement = 0
self.lockBIncrement = 0
self.oneTurn = 360.
self.lockOneTurn = 0
self.opPanel = OptionsPanel(master = self, title="Slider graph Options")
self.signatures = None # Signature objects fro isocontouring lib
self.sigData = [] # list of (x,y) values arrays for each signature
self.maxFun = [] # max Y value in each signature
self.minFun = [] # min Y value in each signature
self.yratios = [] # normalization factors
self.colors = ['red', 'green', 'blue', 'orange']
self.tkLines = [] # list of Tkids for lines
if signatures:
self.setSignatures(signatures)
def setCallback(self, cb):
"""Set widget callback. Must be callable function. Callback is called
every time the widget value is set/modified"""
assert cb is None or callable(cb) or type(cb) is types.ListType,\
"Illegal callback: must be either None or callable, or list. Got %s"%cb
if cb is None: return
elif type(cb) is types.ListType:
for func in cb:
assert callable(func), "Illegal callback must be callable. Got %s"%func
self.callbacks.AddCallback(func)
else:
self.callbacks.AddCallback(cb)
self.callback = cb
def toggleOptPanel(self, event=None):
if self.opPanel.flag:
self.opPanel.Dismiss_cb()
else:
if not hasattr(self.opPanel, 'optionsForm'):
self.opPanel.displayPanel(create=1)
else:
self.opPanel.displayPanel(create=0)
def mouseDown(self, event):
# remember where the mouse went down
#self.lastx = event.x
#self.lasty = event.y
self.setXcursor(event.x)
def mouseUp(self, event):
# call callbacks if not in continuous mode
if not self.continuous:
self.callbacks.CallCallbacks(self.opPanel.valInput.get())
if self.showLabel == 2:
# no widget labels on mouse release
self.canvas.itemconfigure(self.labelId2, text='')
self.canvas.itemconfigure(self.labelId, text='')
def mouseMove(self, event):
# move the cursor
self.setXcursor(event.x)
#self.lastx = event.x
def printLabel(self):
if self.canvas is None:
return
self.canvas.itemconfigure(self.labelId2,
text=self.labelFormat%self.value)#newVal)
self.canvas.itemconfigure(self.labelId,
text=self.labelFormat%self.value)#newVal)
def drawCursor(self, x):
if self.canvas:
self.canvas.coords(self.cursorTk, x, 0, x, self.height)
def get(self):
return self.type(self.value)
def setXcursor(self, x, update=1, force=0):
""" x is a cursor position in pixel between 1 and self.width
"""
if x < 1:
x = 1
if x > self.width:
x = self.width
self.drawCursor(x)
# the mouse return position from 1 to self.width (x=0 is not drawn)
# we need cursor position from 0 (so last x is self.width-1)
x = x - 1
if self.range is not None:
self.value = self.min + x * self.range / float(self.widthMinusOne)
newVal = self.get()
if self.continuous or force:
if update and self.oldValue != newVal or force:
self.oldValue = newVal
self.callbacks.CallCallbacks(newVal)
if self.showLabel==2:
self.printLabel()
else:
if self.showLabel==2:
self.printLabel()
if self.showLabel==1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%newVal)
def set(self, x, update=1, force=0):
""" x is a value between self.min and self.max
"""
#print "set ContourSpectrum"
if self.range is not None:
xcursor = (x - self.min) * float(self.widthMinusOne) / self.range
xcursor = xcursor + 1
self.drawCursor(xcursor)
self.setXcursor(xcursor,update,force)
def createCanvas(self, master):
self.frame = Tkinter.Frame(self, borderwidth=3, relief='sunken')
self.canvas = Tkinter.Canvas(self.frame, width=self.width, height=self.height)
self.xm = 25
self.ym = 25
self.labelId2 = self.canvas.create_text(self.xm+2, self.ym+2,
fill='black',
justify='center', text='',
font = self.labelFont)
self.labelId = self.canvas.create_text(self.xm, self.ym,
fill=self.labelColor,
justify='center', text='',
font = self.labelFont)
# pack em up
self.canvas.pack(side=Tkinter.TOP)
self.frame.pack(expand=1, fill='x')
self.toggleWidgetLabel(self.showLabel)
def toggleWidgetLabel(self, val):
if val == 0:
# no widget labels
self.showLabel=0
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
if val == 1:
# show always widget labels
self.showLabel=1
self.printLabel()
if val == 2:
# show widget labels only when mouse moves
self.showLabel=2
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
def setValue(self, val):
#print "setValue"
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for value: expected %s or %s, got %s"%(
type(1), type(1.0), type(val) )
# setValue does NOT call a callback!
if self.min is not None and val < self.min:
val = self.min
if self.max is not None and val > self.max:
val = self.max
self.value = self.type(val)
self.offsetValue=self.value
self.oldValue = self.value
#print "setValue ContourSpectrum"
if self.range is not None:
xcursor = (val - self.min) * float(self.widthMinusOne) / self.range
xcursor = xcursor + 1
self.drawCursor(xcursor)
if self.showLabel == 1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%self.value)
def setLabel(self, labCfg):
self.labCfg = labCfg
text = labCfg.get('text', None)
if text is None or text=='':
return
d={}
for k, w in self.labCfg.items():
if k == 'side': continue
else: d[k] = w
if not 'side' in self.labCfg.keys():
self.labCfg['side'] = 'left'
if not self.lab:
self.lab = Tkinter.Label(self, d)
self.lab.pack(side=self.labCfg['side'])
self.lab.bind("<Button-3>", self.toggleOptPanel)
else:
self.lab.configure(text)
#####################################################################
# the 'configure' methods:
#####################################################################
def configure(self, **kw):
for key,value in kw.items():
# the 'set' parameter callbacks
if key=='labCfg': self.setLabel(value)
elif key=='type': self.setType(value)
elif key=='min': self.setMin(value)
elif key=='max': self.setMax(value)
elif key=='precision': self.setPrecision(value)
elif key=='showLabel': self.setShowLabel(value)
elif key=='continuous': self.setContinuous(value)
# the 'lock' entries callbacks
elif key=='lockType': self.lockTypeCB(value)
elif key=='lockMin': self.lockMinCB(value)
elif key=='lockBMin': self.lockBMinCB(value)
elif key=='lockMax': self.lockMaxCB(value)
elif key=='lockBMax': self.lockBMaxCB(value)
elif key=='lockPrecision': self.lockPrecisionCB(value)
elif key=='lockShowLabel': self.lockShowLabelCB(value)
elif key=='lockValue': self.lockValueCB(value)
elif key=='lockContinuous': self.lockContinuousCB(value)
def setType(self, Type):
assert type(Type) in [types.StringType, types.TypeType],\
"Illegal type for datatype. Expected %s or %s, got %s"%(
type('a'), type(type), type(Type) )
if type(Type) == type(""): # type str
assert Type in ('int','float'),\
"Illegal type descriptor. Expected 'int' or 'float', got '%s'"%Type
self.type = eval(Type)
else:
self.type = Type
if self.type == int:
self.labelFormat = "%d"
self.int_value = self.value
else:
self.labelFormat = "%."+str(self.precision)+"f"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togIntFloat']['widget']
if self.type == int:
w.setvalue('int')
elif self.type == 'float':
w.setvalue('float')
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setMin(self, min):
if min is not None:
assert type(min) in [types.IntType, types.FloatType,
numpy.int, numpy.int8, numpy.int16,
numpy.int32, numpy.int64,
numpy.uint, numpy.uint8, numpy.uint16,
numpy.uint32, numpy.uint64,
numpy.float, numpy.float32, numpy.float64],\
"Illegal type for minimum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(min) )
if self.max and min > self.max:
min = self.max
self.min = self.type(min)
if self.showLabel == 1:
self.printLabel()
if self.value < self.min:
self.set(self.min)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.minInput.set(self.labelFormat%self.min)
self.opPanel.toggleMin.set(1)
self.opPanel.min_entry.configure(state='normal', fg='gray0')
self.minOld = self.min
else:
self.min = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMin.set(0)
self.opPanel.min_entry.configure(state='disabled',
fg='gray40')
if self.min is not None and self.max is not None:
self.range = float(self.max - self.min)
else:
self.range = None
def setMax(self, max):
if max is not None:
assert type(max) in [types.IntType, types.FloatType,
numpy.int, numpy.int8, numpy.int16,
numpy.int32, numpy.int64,
numpy.uint, numpy.uint8, numpy.uint16,
numpy.uint32, numpy.uint64,
numpy.float, numpy.float32, numpy.float64],\
"Illegal type for maximum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(max) )
if self.min and max < self.min:
max = self.min
self.max = self.type(max)
if self.showLabel == 1:
self.printLabel()
if self.value > self.max:
self.set(self.max)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.maxInput.set(self.labelFormat%self.max)
self.opPanel.toggleMax.set(1)
self.opPanel.max_entry.configure(state='normal', fg='gray0')
self.maxOld = self.max
else:
self.max = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMax.set(0)
self.opPanel.max_entry.configure(state='disabled', fg='gray40')
if self.min is not None and self.max is not None:
self.range = float(self.max - self.min)
else:
self.range = None
def setPrecision(self, val):
assert type(val) in [types.IntType, types.FloatType,
numpy.int, numpy.float32],\
"Illegal type for precision. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(val) )
val = int(val)
if val > 10:
val = 10
if val < 1:
val = 1
self.precision = val
if self.type == float:
self.labelFormat = "%."+str(self.precision)+"f"
else:
self.labelFormat = "%d"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['selPrec']['widget']
w.setvalue(val)
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setContinuous(self, cont):
""" cont can be None, 0 or 1 """
assert cont in [None, 0, 1],\
"Illegal value for continuous: expected None, 0 or 1, got %s"%cont
if cont != 1:
cont = None
self.continuous = cont
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togCont']['widget']
if cont:
w.setvalue('on')#i=1
else:
w.setvalue('off')#i=0
if self.opPanel:
self.opPanel.updateDisplay()
def setShowLabel(self, val):
"""Show label can be 0, 1 or 2
0: no label
1: label is always shown
2: show label only when value changes"""
assert val in [0,1,2],\
"Illegal value for showLabel. Expected 0, 1 or 2, got %s"%val
if val != 0 and val != 1 and val != 2:
print "Illegal value. Must be 0, 1 or 2"
return
self.showLabel = val
self.toggleWidgetLabel(val)
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togLabel']['widget']
if self.showLabel == 0:
label = 'never'
elif self.showLabel == 1:
label = 'always'
elif self.showLabel == 2:
label = 'move'
w.setvalue(label)
if self.opPanel:
self.opPanel.updateDisplay()
#####################################################################
# the 'lock' methods:
#####################################################################
def lockTypeCB(self, mode):
if mode != 0: mode = 1
self.lockType = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockMinCB(self, mode): #min entry field
if mode != 0: mode = 1
self.lockMin = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBMinCB(self, mode): # min checkbutton
if mode != 0: mode = 1
self.lockBMin = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockMaxCB(self, mode): # max entry field
if mode != 0: mode = 1
self.lockMax = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBMaxCB(self, mode): # max checkbutton
if mode != 0: mode = 1
self.lockBMax = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockPrecisionCB(self, mode):
if mode != 0: mode = 1
self.lockPrecision = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockShowLabelCB(self, mode):
if mode != 0: mode = 1
self.lockShowLabel = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockValueCB(self, mode):
if mode != 0: mode = 1
self.lockValue = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockContinuousCB(self, mode):
if mode != 0: mode = 1
self.lockContinuous = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def setSignatures(self, signatures):
self.signatures = signatures
self.sigData = []
# get the values
self.maxFun = []
self.minFun = []
for s in self.signatures:
x = Numeric.zeros( (s.nval,), 'f')
s.getFx(x)
self.minix = mini = min(x)
if (isinstance(mini, Numeric.ArrayType)) and (mini.shape == () ):
mini = mini[0]
maxi = max(x)
if (isinstance(maxi, Numeric.ArrayType)) and (maxi.shape == () ):
maxi = maxi[0]
self.rangex = range = maxi-mini
if range != 0:
x = (((x-mini)/range)*self.widthMinusOne).astype('i')
y = Numeric.zeros( (s.nval,), 'f')
s.getFy(y)
self.sigData.append( (x,y) )
self.maxFun.append(max(y))
self.minFun.append(min(y))
self.setMin(mini)
self.setMax(maxi)
# iso value with hightest value in first function
if len(self.sigData):
ind = list(self.sigData[0][1]).index(max(self.sigData[0][1]))
self.setXcursor(ind)
else:
self.setXcursor(0.0)
self.drawSignatures()
def drawSignatures(self):
# compute normalization factors
self.yratios = []
maxi = max(self.maxFun)
for i in range(4):
h = self.height-1.
if maxi != 0 and self.maxFun[i] != 0:
self.yratios.append( (h/maxi)*(maxi/self.maxFun[i]) )
else:
self.yratios.append( 0 )
for l in self.tkLines:
self.canvas.delete(l)
for i, f in enumerate(self.sigData):
coords = []
for x,y in zip (f[0], f[1]):
coords.append(x)
coords.append(self.height-y*self.yratios[i])
self.tkLines.append( apply( self.canvas.create_line, coords,
{'fill':self.colors[i]}) )
| 2.25 | 2 |
resources/lib/ttml2srt.py | dhoffend/plugin.video.mediathekview | 0 | 12786916 | # -*- coding: utf-8 -*-
# Copyright 2017 <NAME>
# See https://github.com/codingcatgirl/ttml2srt
#
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import io
from datetime import timedelta
from defusedxml import ElementTree as ET
def ttml2srt( infile, outfile ):
tree = ET.parse( infile )
root = tree.getroot()
# strip namespaces
for elem in root.getiterator():
elem.tag = elem.tag.split('}', 1)[-1]
elem.attrib = {name.split('}', 1)
[-1]: value for name, value in elem.attrib.items()}
# get styles
styles = {}
for elem in root.findall('./head/styling/style'):
style = {}
if 'color' in elem.attrib:
color = elem.attrib['color']
if color not in ('#FFFFFF', '#000000'):
style['color'] = color
if 'fontStyle' in elem.attrib:
fontstyle = elem.attrib['fontStyle']
if fontstyle in ('italic', ):
style['fontstyle'] = fontstyle
styles[elem.attrib['id']] = style
body = root.find('./body')
# parse correct start and end times
def parse_time_expression(expression, default_offset=timedelta(0)):
offset_time = re.match(r'^([0-9]+(\.[0-9]+)?)(h|m|s|ms|f|t)$', expression)
if offset_time:
time_value, _, metric = offset_time.groups()
time_value = float(time_value)
if metric == 'h':
return default_offset + timedelta(hours=time_value)
elif metric == 'm':
return default_offset + timedelta(minutes=time_value)
elif metric == 's':
return default_offset + timedelta(seconds=time_value)
elif metric == 'ms':
return default_offset + timedelta(milliseconds=time_value)
elif metric == 'f':
raise NotImplementedError(
'Parsing time expressions by frame is not supported!')
elif metric == 't':
raise NotImplementedError(
'Parsing time expressions by ticks is not supported!')
clock_time = re.match(
r'^([0-9]{2,}):([0-9]{2,}):([0-9]{2,}(\.[0-9]+)?)$', expression)
if clock_time:
hours, minutes, seconds, _ = clock_time.groups()
return timedelta(hours=int(hours), minutes=int(minutes), seconds=float(seconds))
clock_time_frames = re.match(
r'^([0-9]{2,}):([0-9]{2,}):([0-9]{2,}):([0-9]{2,}(\.[0-9]+)?)$', expression)
if clock_time_frames:
raise NotImplementedError(
'Parsing time expressions by frame is not supported!')
raise ValueError('unknown time expression: %s' % expression)
def parse_times(elem, default_begin=timedelta(0)):
if 'begin' in elem.attrib:
begin = parse_time_expression(
elem.attrib['begin'], default_offset=default_begin)
else:
begin = default_begin
elem.attrib['{abs}begin'] = begin
end = None
if 'end' in elem.attrib:
end = parse_time_expression(
elem.attrib['end'], default_offset=default_begin)
dur = None
if 'dur' in elem.attrib:
dur = parse_time_expression(elem.attrib['dur'])
if dur is not None:
if end is None:
end = begin + dur
else:
end = min(end, begin + dur)
elem.attrib['{abs}end'] = end
for child in elem:
parse_times(child, default_begin=begin)
parse_times(body)
timestamps = set()
for elem in body.findall('.//*[@{abs}begin]'):
timestamps.add(elem.attrib['{abs}begin'])
for elem in body.findall('.//*[@{abs}end]'):
timestamps.add(elem.attrib['{abs}end'])
timestamps.discard(None)
# render subtitles on each timestamp
def render_subtitles(elem, timestamp, parent_style=None):
if timestamp < elem.attrib['{abs}begin']:
return ''
if elem.attrib['{abs}end'] is not None and timestamp >= elem.attrib['{abs}end']:
return ''
result = ''
style = parent_style.copy() if parent_style is not None else {}
if 'style' in elem.attrib:
style.update(styles[elem.attrib['style']])
if 'color' in style:
result += '<font color="%s">' % style['color']
if style.get('fontstyle') == 'italic':
result += '<i>'
if elem.text:
result += elem.text.strip()
if len(elem):
for child in elem:
result += render_subtitles(child, timestamp)
if child.tail:
result += child.tail.strip()
if 'color' in style:
result += '</font>'
if style.get('fontstyle') == 'italic':
result += '</i>'
if elem.tag in ('div', 'p', 'br'):
result += '\n'
return result
rendered = []
for timestamp in sorted(timestamps):
rendered.append((timestamp, re.sub(r'\n\n\n+', '\n\n',
render_subtitles(body, timestamp)).strip()))
if not rendered:
exit(0)
# group timestamps together if nothing changes
rendered_grouped = []
last_text = None
for timestamp, content in rendered:
if content != last_text:
rendered_grouped.append((timestamp, content))
last_text = content
# output srt
rendered_grouped.append((rendered_grouped[-1][0] + timedelta(hours=24), ''))
def format_timestamp(timestamp):
return ('%02d:%02d:%02.3f' % (timestamp.total_seconds() // 3600,
timestamp.total_seconds() // 60 % 60,
timestamp.total_seconds() % 60)).replace('.', ',')
if isinstance( outfile, str ) or isinstance( outfile, unicode ):
file = io.open( outfile, 'w', encoding='utf-8' )
else:
file = outfile
srt_i = 1
for i, (timestamp, content) in enumerate(rendered_grouped[:-1]):
if content == '':
continue
file.write( bytearray( '%d\n' % srt_i, 'utf-8' ) )
file.write( bytearray(
format_timestamp( timestamp ) +
' --> ' +
format_timestamp( rendered_grouped[i + 1][0] ) +
'\n'
) )
file.write( bytearray( content + '\n\n', 'utf-8' ) )
srt_i += 1
file.close()
| 1.9375 | 2 |
gw_full_latest/loraWAN.py | cuongdodinh/LowCostLoRaGw | 0 | 12786917 | #------------------------------------------------------------
# Copyright 2016 <NAME>, University of Pau, France.
#
# <EMAIL>
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------
import sys
import re
import string
import base64
import LoRaWAN
from LoRaWAN.MHDR import MHDR
AppSKey = '<KEY>'
NwkSKey = '<KEY>'
PKT_TYPE_DATA=0x10
#to display non printable characters
replchars = re.compile(r'[\x00-\x1f]')
def replchars_to_hex(match):
return r'\x{0:02x}'.format(ord(match.group()))
def loraWAN_process_pkt(lorapkt):
appskey=bytearray.fromhex(AppSKey)
appskeylist=[]
for i in range (0,len(appskey)):
appskeylist.append(appskey[i])
nwkskey=bytearray.fromhex(NwkSKey)
nwkskeylist=[]
for i in range (0,len(nwkskey)):
nwkskeylist.append(nwkskey[i])
lorawan = LoRaWAN.new(nwkskeylist)
lorawan.read(lorapkt)
lorawan.compute_mic()
if lorawan.valid_mic():
print "?loraWAN: valid MIC"
lorawan = LoRaWAN.new(appskeylist)
lorawan.read(lorapkt)
plain_payload = ''.join(chr(x) for x in lorawan.get_payload())
print "?loraWAN: plain payload is "+replchars.sub(replchars_to_hex, plain_payload)
return plain_payload
else:
return "###BADMIC###"
if __name__ == "__main__":
argc=len(sys.argv)
if argc>1:
#we assume that the input frame is given in base64 format
lorapktstr_b64=sys.argv[1]
else:
sys.exit("loraWAN.py needs at least a base64 encoded string argument")
if argc>2:
pdata=sys.argv[2]
arr = map(int,pdata.split(','))
dst=arr[0]
ptype=arr[1]
ptype=PKT_TYPE_DATA
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
if argc>3:
rdata=sys.argv[3]
lorapktstr=base64.b64decode(lorapktstr_b64)
lorapkt=[]
for i in range (0,len(lorapktstr)):
lorapkt.append(ord(lorapktstr[i]))
plain_payload=loraWAN_process_pkt(lorapkt)
if plain_payload=="###BADMIC###":
print '?'+plain_payload
else:
print "?plain payload is : "+plain_payload
if argc>2:
print "^p%d,%d,%d,%d,%d,%d,%d" % (dst,ptype,src,seq,len(plain_payload),SNR,RSSI)
if argc>3:
print "^r"+rdata
print "\xFF\xFE"+plain_payload
| 2.28125 | 2 |
binding.gyp | codyrigney92/node-rpi-si4703 | 0 | 12786918 | <reponame>codyrigney92/node-rpi-si4703
{
"targets": [
{
"target_name": "node-rpi-si4703",
"cflags!": ['-fno-exceptions -std=c++11'],
"cflags_cc!": ['-fno-exceptions -std=c++11'],
"sources": ["src/node-rpi-si4703.cpp", "src/FMTuner.cpp", "src/FMTuner.h", "src/rpi-si4703/Si4703_Breakout.cpp", "src/rpi-si4703/Si4703_Breakout.h"],
"libraries": ["-lwiringPi"]
},
{
"target_name": "action_after_build",
"type": "none",
"dependencies": ["node-rpi-si4703"],
"copies": [
{
"files": ["<(PRODUCT_DIR)/node-rpi-si4703.node"],
"destination": "node-rpi-si4703"
}
]
}
]
}
| 1.109375 | 1 |
timer_decor.py | romchegue/Python | 0 | 12786919 | # file: timer_decor.py
import time
class timer:
def __init__(self, func):
self.func = func
self.alltime = 0
def __call__(self, *args, **kwargs):
start = time.time()
result = self.func(*args, **kwargs)
elapsed = time.time() - start
self.alltime += elapsed
# print('%s: %.5f, %.5f' % (self.func.__name__, elapsed, self.alltime))
print('{0}: {1:.5f}, {2:.5f}'.format(self.func.__name__, elapsed, self.alltime))
return result
@timer
def listcomp(N):
return [x * 2 for x in range(N)]
@timer
def mapcall(N):
return map((lambda x: x * 2), range(N))
result = listcomp(5)
listcomp(50000)
listcomp(500000)
listcomp(1000000)
print(result)
print('allTime = {0}'.format(listcomp.alltime))
print('')
result = mapcall(5)
mapcall(50000)
mapcall(500000)
mapcall(1000000)
print(result)
print('allTime = {0}'.format(mapcall.alltime))
print('map/comp = {0}'.format(round(mapcall.alltime / listcomp.alltime, 3)))
| 3.765625 | 4 |
keras_dgl/__init__.py | michael-cowan/keras-deep-graph-learning | 0 | 12786920 | from keras_dgl._version import __version__
| 1.023438 | 1 |
src/app/models.py | 510908220/heartbeats | 23 | 12786921 | <reponame>510908220/heartbeats
# -*- coding:utf-8 -*-
from django.db import models
from django.conf import settings
# Create your models here.
class Tag(models.Model):
class Meta:
db_table = "tag"
name = models.CharField(max_length=200, unique=True, blank=False, null=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Service(models.Model):
class Meta:
db_table = "service"
STATUS = (
('running', 'running'),
('stoped', 'stoped'),
)
TYPES = (
('at', 'at'),
('every', 'every'),
)
name = models.CharField(max_length=200, unique=True, blank=False, null=False)
status = models.CharField(choices=STATUS, default=STATUS[0][0], max_length=20)
tp = models.CharField(choices=TYPES, default=TYPES[0][0], max_length=20)
value = models.CharField(max_length=200, default='')
notify_to = models.TextField(default="")
grace = models.IntegerField(default=0)
short_url = models.CharField(max_length=200, unique=True, blank=True, null=True)
tags = models.ManyToManyField(Tag, related_name='services')
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Ping(models.Model):
class Meta:
db_table = "ping"
service = models.ForeignKey(Service, related_name='pings', on_delete=models.CASCADE)
remote_addr = models.GenericIPAddressField(blank=True, null=True)
ua = models.CharField(max_length=200, blank=True)
data = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{}-{}".format(self.service.name, self.id)
| 2.09375 | 2 |
bkt.py | JonathanSilver/pyKT | 1 | 12786922 | <gh_stars>1-10
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error
from math import sqrt
import os
import json
from pprint import pprint
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--problems', type=str, help='file path to problems.json')
parser.add_argument('-s', '--submissions', type=str, help='file path to user_submissions.json')
parser.add_argument('-d', '--dir', type=str, help='dir to models')
parser.add_argument('-e', '--epochs', type=int, default=10, help='number of epochs to train for each BKT')
parser.add_argument('-f', '--fits', type=int, default=10, help='number of BKTs to train for each skill')
parser.add_argument('--forget', action='store_true', default=False, help='enable BKT to forget')
parser.add_argument('--restore', action='store_true', default=False, help='restore models from the dir')
parser.add_argument('--alpha', type=float, default=.05, help='adam-alpha')
parser.add_argument('--betas', type=float, nargs=2, default=[.9, .999], help='adam-betas')
parser.add_argument('-k', type=int, default=1, help='k-fold cross validation')
parser.add_argument('--seed', type=int, default=1, help='random seed')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
class BKT(nn.Module):
def __init__(self, forgetting=True):
super(BKT, self).__init__()
self.forgetting = forgetting
self.L0 = nn.Parameter(torch.tensor(np.random.randn()), requires_grad=True)
self.T = nn.Parameter(torch.tensor(np.random.randn()), requires_grad=True)
if forgetting:
self.F = nn.Parameter(torch.tensor(np.random.randn()), requires_grad=True)
self.G = nn.Parameter(torch.tensor(np.random.randn()), requires_grad=True)
self.S = nn.Parameter(torch.tensor(np.random.randn()), requires_grad=True)
def forward(self, x):
"""
:param x: (num_action, batch_size)
:return: (num_pred, batch_size)
"""
trans = torch.sigmoid(self.T)
if self.forgetting:
forget = torch.sigmoid(self.F)
else:
forget = torch.tensor(0.)
guess = torch.sigmoid(self.G)
slip = torch.sigmoid(self.S)
one = torch.ones(x.size(1))
learn = one * torch.sigmoid(self.L0)
y = torch.zeros(x.size())
for t in range(x.size(0)):
# P(correct(t)) = P(L(t)) * (1 - P(S)) + (1 - P(L(t))) * P(G)
correct = learn * (one - slip) + (one - learn) * guess
y[t] = correct
# action = correct:
# P(L(t)|correct(t)) = (P(L(t)) * (1 - P(S))) / P(correct(t))
# action = incorrect
# P(L(t)|incorrect(t)) = (P(L(t)) * P(S)) / P(incorrect(t))
conditional_probability = x[t] * (learn * (one - slip) / correct) \
+ (one - x[t]) * (learn * slip / (one - correct))
# P(L(t+1)) = P(L(t)|action(t)) + (1 - P(L(t)|action(t))) * P(T)
learn = conditional_probability * (one - forget) + (one - conditional_probability) * trans
return y
def fit(x, mask, num_epochs=args.epochs, lr=args.alpha, betas=args.betas, num_fit=args.fits,
forgetting=args.forget, restore_model=args.restore,
test_x=None, test_mask=None, title=None):
"""
randomly initialize num_fit BKT models,
use Adam to optimize the MSE loss function,
the training set is used to estimate the parameters,
the best estimation is the one with the highest
ROC AUC score on the training set,
the prediction for the test set
and the best estimated parameters are returned.
:param x: training set, sized (num_action, batch)
:param mask: training set mask, sized (num_action, batch)
:param num_epochs: for each BKT, the number of epochs used to train the model
:param lr: learning rate for optimizer
:param num_fit: number of random initialized BKTs to estimate parameters
:param forgetting: whether to enable forgetting in BKT
:param restore_model: whether to restore model if model exists
:param test_x: test set, sized (num_action, batch)
:param test_mask: test set mask, sized (num_action, batch)
:param title: the name of the model (a.k.a. the chart title)
:return: the prediction for the test set, along with the model parameters
"""
counter = 0
best_bkt = None
best_score = 0
best_loss = None
if title:
model_path = os.path.join(args.dir, 'bkt - ' + title + (' - f' if forgetting else '') + '.pth')
else:
model_path = None
if model_path and restore_model and os.path.exists(model_path):
best_bkt = BKT(forgetting=forgetting)
best_bkt.load_state_dict(torch.load(model_path))
else:
while counter != num_fit:
counter += 1
bkt = BKT(forgetting=forgetting)
loss_fn = nn.MSELoss()
optimizer = optim.Adam(bkt.parameters(), lr=lr, betas=betas)
epoch = 0
loss_list = []
while epoch != num_epochs:
epoch += 1
bkt.train()
with torch.enable_grad():
optimizer.zero_grad()
y = bkt(x)
loss = loss_fn(y * mask, x)
loss.backward()
nn.utils.clip_grad_norm_(bkt.parameters(), max_norm=2.)
optimizer.step()
loss_list.append(loss.item())
print(counter, epoch, loss_list[-1])
bkt.eval()
with torch.no_grad():
y = bkt(x) * mask
y_true = x.masked_select(mask != 0).numpy()
y_pred = y.masked_select(mask != 0).numpy()
try:
score = roc_auc_score(y_true, y_pred)
if score > best_score:
best_bkt = bkt
best_score = score
best_loss = loss_list
except ValueError as e:
print('during fitting model %d:' % counter)
print(e)
print('refitting...')
counter -= 1
if model_path:
torch.save(best_bkt.state_dict(), model_path)
best_bkt.eval()
with torch.no_grad():
if test_x is not None and test_mask is not None:
y = best_bkt(test_x)
else:
y = best_bkt(x)
# if best_loss:
# plt.plot(best_loss)
# if title:
# plt.title(title)
# plt.show()
return y * test_mask if test_x is not None and test_mask is not None else y * mask, {
'prior': torch.sigmoid(best_bkt.L0).item(),
'learn': torch.sigmoid(best_bkt.T).item(),
'forget': torch.sigmoid(best_bkt.F).item() if forgetting else 0.,
'guess': torch.sigmoid(best_bkt.G).item(),
'slip': torch.sigmoid(best_bkt.S).item()
}
with open(args.problems, 'r') as file:
problems = json.load(file)
problem_id_2_tag_ids = {problem['id']: problem['tags'] for problem in problems}
tags = set()
for problem in problems:
tags |= set(problem['tags'])
tags = list(sorted(list(tags)))
with open(args.submissions, 'r') as file:
user_submissions = json.load(file)
def prepare_data(tag, training, group):
ret_data = []
ret_max_length = 0
for user_data in user_submissions:
user_group = user_data['group']
if training and user_group == group \
or not training and user_group != group:
continue
submissions = user_data['submissions']
record = []
for sub in submissions:
if tag in problem_id_2_tag_ids[sub['problem']]:
record.append(sub['verdict'])
if len(record):
ret_data.append(record)
ret_max_length = max(ret_max_length, len(record))
return ret_data, ret_max_length
def convert(data, data_max_length):
batch_size = len(data)
ret_x = np.zeros((data_max_length, batch_size))
ret_mask = np.zeros((data_max_length, batch_size))
for idx in range(batch_size):
for i in range(len(data[idx])):
ret_x[i][idx] = data[idx][i]
ret_mask[i][idx] = 1
return torch.tensor(ret_x), torch.tensor(ret_mask)
def run(group):
y_true = np.zeros(0)
y_pred = np.zeros(0)
for tag in tags:
train, train_max_length = prepare_data(tag, training=True, group=group)
test, test_max_length = prepare_data(tag, training=False, group=group)
if train_max_length and test_max_length:
print("data set for '%d' has been prepared" % tag)
train_x, train_mask = convert(train, train_max_length)
test_x, test_mask = convert(test, test_max_length)
print(train_x.shape, test_x.shape)
print('fitting')
test_y, params = fit(x=train_x, mask=train_mask,
test_x=test_x, test_mask=test_mask,
title=str(tag) + ' - ' + str(group))
# pprint(params)
y_true_part = test_x.masked_select(test_mask != 0).numpy()
y_pred_part = test_y.masked_select(test_mask != 0).numpy()
y_true = np.concatenate([y_true, y_true_part])
y_pred = np.concatenate([y_pred, y_pred_part])
print("ROC AUC on '%d': %.10f" % (tag, roc_auc_score(y_true_part, y_pred_part)))
auc = roc_auc_score(y_true, y_pred)
rmse = sqrt(mean_squared_error(y_true, y_pred))
mae = mean_absolute_error(y_true, y_pred)
print('ROC AUC: {}'.format(auc))
print('RMSE: {}'.format(rmse))
print('MAE: {}'.format(mae))
return auc, rmse, mae
def main():
k = args.k
auc = np.zeros(k)
rmse = np.zeros(k)
mae = np.zeros(k)
for i in range(k):
print('group %d:' % i)
auc[i], rmse[i], mae[i] = run(i)
print('-' * 30)
print('ROC AUC: {} (+/- {})'.format(auc.mean(), auc.std()))
print('RMSE: {} (+/- {})'.format(rmse.mean(), rmse.std()))
print('MAE: {} (+/- {})'.format(mae.mean(), mae.std()))
if __name__ == '__main__':
main()
| 2.125 | 2 |
pynepsys/__init__.py | jpelzer/pynepsys | 0 | 12786923 | <reponame>jpelzer/pynepsys<filename>pynepsys/__init__.py
from pynepsys.pynepsys import Apex, Probe, Outlet
__version__ = "1.2.0"
| 1 | 1 |
backend/messages/serializers.py | HillalRoy/Studenthut | 1 | 12786924 | from .models import ClassMessage, Message
from rest_framework import serializers
from django.contrib.auth.models import User
class UserUserNameSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username']
class MessageSerializers(serializers.ModelSerializer):
sender = UserUserNameSerializer(read_only=True)
class Meta:
model = Message
fields = ['body', 'sender', 'parent_msg', 'sent_at']
read_only_fields = ['sent_at', 'sender']
class ClassMessegeSerializers(serializers.ModelSerializer):
msg = MessageSerializers()
class Meta:
model = ClassMessage
fields = ['msg', 'classes']
def create(self, validated_data):
# order = Order.objects.get(pk=validated_data.pop('event'))
# instance = Equipment.objects.create(**validated_data)
# Assignment.objects.create(Order=order, Equipment=instance)read_only=True)
msg_data = validated_data.pop('msg')
sender = validated_data.pop('msg__sender')
msg = Message.objects.create(
**msg_data, sender=sender)
msg.save()
instance = ClassMessage.objects.create(**validated_data, msg=msg)
return instance
| 2.1875 | 2 |
moyu_engine/config/main.py | MoYuStudio/MYSG01 | 0 | 12786925 |
import sys
import pygame
from pygame.locals import *
import moyu_engine.config.data.constants as C
import moyu_engine.config.system.assets_system
import moyu_engine.config.system.tilemap_system
import moyu_engine.config.system.move_system
import moyu_engine.config.window.main_window
def init():
pygame.init()
pygame.mixer.init()
SCREEN = pygame.display.set_mode(C.window['size'],pygame.RESIZABLE)
SCREEN_TITLE = pygame.display.set_caption(C.window['title'])
#pygame.display.set_icon(G.tl16)
CLOCK = pygame.time.Clock()
pygame.display.flip()
moyu_engine.config.system.assets_system.AssetsSystem.loader()
moyu_engine.config.system.tilemap_system.TilemapSystem.builder()
while True:
moyu_engine.config.system.move_system.MoveSystem.move()
moyu_engine.config.window.main_window.MainWindow.blit()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
CLOCK.tick(C.window['fps'])
def run():
init()
if __name__ == "__main__":
pass
| 2.34375 | 2 |
knn.py | AlparslanErol/KNN | 0 | 12786926 | <filename>knn.py
from csv import reader
from math import sqrt
import matplotlib.pyplot as plt
# Load a CSV file
def load_csv(filename):
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
return dataset
# Edit test and train dataset
def edit_data(dataset):
del dataset[0]
for val in dataset:
del val[0]
for i in range(len(dataset[0])-1):
str_column_to_float(dataset, i)
str_column_to_int(dataset, len(dataset[0])-1)
# Convert string column to float
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
# Convert string column to integer
def str_column_to_int(dataset, column):
class_values = [row[column] for row in dataset]
unique = set(class_values)
lookup = dict()
for i, value in enumerate(unique):
lookup[value] = i
print('[%s] => %d' % (value, i))
for row in dataset:
row[column] = lookup[row[column]]
return lookup
# Calculate accuracy percentage
def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
# Evaluate an algorithm using a cross validation split
def evaluate_algorithm(train_set, test_set, algorithm, k_list):
scores = list()
for k in k_list:
actual = list()
predicted = algorithm(train_set, test_set, k)
for val in test_set:
actual.append(val[-1])
accuracy = accuracy_metric(actual, predicted)
scores.append(accuracy)
return scores
# Calculate the Euclidean distance between two vectors
def euclidean_distance(row1, row2):
distance = 0.0
for i in range(len(row1)-1):
distance += (row1[i] - row2[i])**2
return sqrt(distance)
# Locate the most similar neighbors
def get_neighbors(train, test_row, num_neighbors):
distances = list()
for train_row in train:
dist = euclidean_distance(test_row, train_row)
distances.append((train_row, dist))
distances.sort(key=lambda tup: tup[1])
neighbors = list()
for i in range(num_neighbors):
neighbors.append(distances[i][0])
return neighbors
# Make a prediction with neighbors
def predict_classification(train, test_row, num_neighbors):
neighbors = get_neighbors(train, test_row, num_neighbors)
output_values = [row[-1] for row in neighbors]
prediction = max(set(output_values), key=output_values.count)
return prediction
# kNN Algorithm
def k_nearest_neighbors(train, test, num_neighbors):
predictions = list()
for row in test:
output = predict_classification(train, row, num_neighbors)
predictions.append(output)
return(predictions)
train_set = load_csv('iristrain.csv')
test_set = load_csv('iristest.csv')
edit_data(train_set)
edit_data(test_set)
#k_list = list()
#number = int(input("how many value you want in a list: "))
#for i in range(0,number):
# numbers = int(input("enter your choice number:"))
# k_list.append(numbers)
#
k_list = list(range(1,50,2))
for num, val in enumerate(k_list):
print("K-Value {}..: ".format(num+1),val)
# evaluate algorithm
scores = evaluate_algorithm(train_set, test_set, k_nearest_neighbors, k_list)
print('Scores: %s' % scores)
print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))
## define a new record
#row = [5.7,2.9,4.2,1.3]
#num_neighbors = 5
## predict the label
#label = predict_classification(train_set, row, num_neighbors)
#print('Data=%s, Predicted: %s' % (row, label))
# PLOT
# =============================================================================
plt.figure()
plt.bar(k_list,scores, label = "Scores for K-Values")
plt.ylim(90,100)
plt.ylabel('Accuracy Scores')
plt.xlabel('K-Values')
plt.title('KNN')
plt.legend()
plt.show()
# =============================================================================
| 3.734375 | 4 |
src/roughml/shared/decorators.py | billsioros/RoughML | 0 | 12786927 | import inspect
import logging
import time
from functools import wraps
logger = logging.getLogger(__name__)
def benchmark(method):
"""The following decorator aims at calculating the decorated function's
execution time and is used to benchmark our various approaches and assist
us in coming up with a comprehensive comparison of their efficiency.
"""
@wraps(method)
def wrapper(*args, **kwargs):
beg = time.time()
rv = method(*args, **kwargs)
end = time.time()
logger.info("%s returned after %7.3f seconds", method.__name__, end - beg)
return rv
return wrapper
def debug(method):
"""The following decorator serves at emitting details regarding the decorated
function's calls.
In more detai, the information emitted is:
- The function's name.
- Its positional and keyword arguements for the function call at hand.
- Any exception that the function `raises`.
In addition to that, the `debug` decorator passes a special boolean keyword arguement
by the name `debug`, if and only if it is included in the function signature.
You can then utilize this arguement inside the decorated function and emit additional
information.
"""
signature = inspect.signature(method)
defaults = {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
@wraps(method)
def wrapper(*args, **kwargs):
called_with = ""
if args:
called_with += ", ".join(str(x) for x in args)
called_with += ", "
called_with += ", ".join(
f"{x}={kwargs.get(x, defaults[x])}" for x in defaults.keys()
)
try:
rv = method(*args, **kwargs)
except Exception as e:
logger.debug(f"%s(%s) raised %s", method.__name__, called_with, e)
raise
logger.debug(f"%s(%s) returned %s", method.__name__, called_with, rv)
return rv
return wrapper
| 3.8125 | 4 |
icfs/cloudapi/cloud.py | bhanupratapjain/icfs | 0 | 12786928 | <gh_stars>0
from icfs.cloudapi.google import GDrive
# @class_decorator(logger)
class Cloud:
def __init__(self, gdrive_settings, tmp, creds):
self.gdrive_settings = gdrive_settings
self.clients = {}
self.creds = creds
self.tmp = tmp
def restore_gdrive(self, client_id):
g_drive = GDrive(self.tmp, self.creds, self.gdrive_settings)
g_drive.restore(client_id)
self.clients[client_id] = g_drive
def add_gdrive(self):
g_drive = GDrive(self.tmp, self.creds, self.gdrive_settings)
client_id = g_drive.init_auth()
self.clients[client_id] = g_drive
return client_id
# Raises CloudIOError
def pull(self, filename, client_id):
self.clients[client_id].pull(filename)
# Raises CloudIOError
def push(self, filename, client_id):
self.clients[client_id].push(filename)
# Raises CloudIOError
def push_all(self, file_list, client_id):
self.clients[client_id].push_all(file_list)
# Raises CloudIOError
def remove(self, filename, client_id):
self.clients[client_id].remove(filename)
# Removes everything from the cloud
def remove_all(self, client_id):
self.clients[client_id].remove_all()
def about(self, client_id):
about = self.clients[client_id].about()
return {
"current_user_name": about['name'],
"root_folder_id": about['rootFolderId'],
"total_quota": about['quotaBytesTotal'],
"used_quota": about['quotaBytesUsed']
}
| 2.296875 | 2 |
analyze_stats.py | JPEWdev/oe-icecream-demo | 4 | 12786929 | #! /usr/bin/env python3
#
# Copyright 2019 Garmin Ltd. or its subsidiaries
#
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import glob
import re
from scipy import stats
import numpy
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(THIS_DIR, 'poky', 'scripts', 'lib'))
from buildstats import BuildStats, diff_buildstats, taskdiff_fields, BSVerDiff
ICECREAM_TASKS = ('do_compile', 'do_compile_kernelmodules', 'do_configure', 'do_install')
VALUES = ('cputime', 'walltime')
def sum_task_totals(bs):
d = {}
for recipe_data in bs.values():
for name, bs_task in recipe_data.tasks.items():
for val_type in VALUES:
val = getattr(bs_task, val_type)
key = (name, val_type)
if name not in ICECREAM_TASKS:
key = ('other', val_type)
d.setdefault(key, 0)
d[key] += val
key = ('overall', val_type)
d.setdefault(key, 0)
d[key] += val
return d
def get_elapsed(p):
elapsed = None
cpu = None
with open(os.path.join(p, 'build_stats'), 'r') as f:
for l in f:
m = re.match(r'Elapsed time: (?P<elapsed>[\d.]+) ', l)
if m is not None:
elapsed = float(m.group('elapsed'))
continue
m = re.match(r'CPU usage: (?P<cpu>[\d.]+)%', l)
if m is not None:
cpu = float(m.group('cpu')) / 100
if elapsed is None:
raise Exception('Elapsed time not found for %s' % p)
if cpu is None:
raise Exception('CPU usage not found for %s' % p)
return (elapsed, cpu)
def pooled_stdev(a_std_dev, b_std_dev):
return numpy.sqrt((a_std_dev**2 + b_std_dev**2)/2)
def write_elapsed():
with open(os.path.join(THIS_DIR, 'stats', 'elapsed.csv'), 'w') as f:
f.write('Build,Elapsed without Icecream,Elapsed with Icecream,CPU usage without Icecream,CPU usage with Icecream\n')
elapsed_combined_without = []
elapsed_combined_with = []
cpu_combined_without = []
cpu_combined_with = []
for p in glob.glob(os.path.join(THIS_DIR, 'stats', 'build*')):
without_elapsed, without_cpu = get_elapsed(os.path.join(p, 'without-icecream'))
with_elapsed, with_cpu = get_elapsed(os.path.join(p, 'with-icecream'))
elapsed_combined_without.append(without_elapsed)
elapsed_combined_with.append(with_elapsed)
cpu_combined_without.append(without_cpu)
cpu_combined_with.append(with_cpu)
f.write('%s,%f,%f,%f,%f\n' % (os.path.basename(p), without_elapsed, with_elapsed,
without_cpu, with_cpu))
f.write('\n')
f.write(',Average without Icecream (s),Without Icecream std dev,Average with Icecream (s),With Icecream std dev,p-value,Percent Change,Percent Change std dev\n')
average_without = numpy.average(elapsed_combined_without)
average_with = numpy.average(elapsed_combined_with)
without_std_dev = numpy.std(elapsed_combined_without)
with_std_dev = numpy.std(elapsed_combined_with)
change = (average_with - average_without) / average_without
pooled_std_dev = pooled_stdev(without_std_dev, with_std_dev) / average_without
_, p = stats.ttest_rel(elapsed_combined_without, elapsed_combined_with)
f.write('Elapsed Time,%f,%f,%f,%f,%e,%.2f,%f\n' % (
average_without, without_std_dev,
average_with, with_std_dev, p,
change, pooled_std_dev))
f.write('\n')
f.write(',Average without Icecream,Without Icecream std dev,Average with Icecream,With Icecream std dev,p-value,Delta\n')
average_without = numpy.average(cpu_combined_without)
average_with = numpy.average(cpu_combined_with)
without_std_dev = numpy.std(cpu_combined_without)
with_std_dev = numpy.std(cpu_combined_with)
delta = average_with - average_without
_, p = stats.ttest_rel(cpu_combined_without, cpu_combined_with)
f.write('CPU Usage,%f,%f,%f,%f,%e,%.2f\n' % (
average_without, without_std_dev,
average_with, with_std_dev, p,
delta))
def write_tasks():
with open(os.path.join(THIS_DIR, 'stats', 'raw.csv'), 'w') as f:
combined_with = {}
combined_without = {}
f.write('Task,Attribute,Build,Without Icecream,With Icecream\n')
for p in glob.glob(os.path.join(THIS_DIR, 'stats', 'build*')):
without_stats = BuildStats.from_dir(os.path.join(p, 'without-icecream'))
with_stats = BuildStats.from_dir(os.path.join(p, 'with-icecream'))
without_d = sum_task_totals(without_stats)
with_d = sum_task_totals(with_stats)
for k in without_d.keys():
without_val = without_d[k]
with_val = with_d[k]
f.write("%s,%s,%s,%f,%f\n" % (k[0], k[1], os.path.basename(p), without_val, with_val))
combined_with.setdefault(k, []).append(with_val)
combined_without.setdefault(k, []).append(without_val)
with open(os.path.join(THIS_DIR, 'stats', 'totals.csv'), 'w') as f:
f.write('Task,Attribute,Without Icecream,Without Std dev,With Icecream,With Std dev,p-value,Percent Change,Percent Change Std Dev\n')
for k in combined_without.keys():
without_avg = numpy.average(combined_without[k])
with_avg = numpy.average(combined_with[k])
without_std_dev = numpy.std(combined_without[k])
with_std_dev = numpy.std(combined_with[k])
change = (with_avg - without_avg) / without_avg
pooled_std_dev = pooled_stdev(without_std_dev, with_std_dev) / without_avg
_, p = stats.ttest_rel(combined_without[k], combined_with[k])
f.write("%s,%s,%f,%f,%f,%f,%e,%.2f,%f\n" % (k[0], k[1], without_avg, without_std_dev, with_avg, with_std_dev, p, change, pooled_std_dev))
def main():
write_tasks()
write_elapsed()
if __name__ == "__main__":
main()
# exit on any error and unset variables
#set -u -e -o pipefail
#THIS_DIR="$(readlink -f $(dirname $0))"
#
#TASKS="do_configure do_compile do_install do_package_write_rpm"
#ATTRS="cputime walltime"
#
#echo "Task,Attribute,Build,Without Icecream,With Icecream" > $THIS_DIR/stats/stat.csv
#
#for d in $THIS_DIR/stats/build*; do
# for task in $TASKS; do
# for attr in $ATTRS; do
# VAL="$($THIS_DIR/poky/scripts/buildstats-diff --only-task $task --diff-attr $attr $d/without-icecream $d/with-icecream | tail -1)"
# echo "$task,$attr,$d,$(echo $VAL | sed 's/.*(\([0-9.]\+\)s).*(\([0-9.]\+\)s).*/\1,\2/g')" >> $THIS_DIR/stats/stat.csv
# done
# done
#done
| 2.078125 | 2 |
app.py | SlashNephy/chinachu-epgs-proxy | 1 | 12786930 | <filename>app.py
import os
import string
import requests
from flask import Flask, jsonify
HTTP_HOST = os.getenv("HTTP_HOST", "0.0.0.0")
HTTP_PORT = int(os.getenv("HTTP_PORT", "3000"))
EPGSTATION_HOST = os.getenv("EPGSTATION_HOST", "epgstation")
EPGSTATION_PORT = int(os.getenv("EPGSTATION_PORT", "8888"))
USE_HALF_WIDTH = int(os.getenv("USE_HALF_WIDTH", "0")) == 1
app = Flask(__name__)
def call_epgstation_api(path):
return requests.get(f"http://{EPGSTATION_HOST}:{EPGSTATION_PORT}/api{path}").json()
@app.route("/schedule.json", methods=["GET"])
def get_schedule():
# 扱いやすいように id をキーとして辞書を作る
channels = {
x["id"]: x
for x in call_epgstation_api("/channels")
}
return jsonify([
{
"type": channels[x["channel"]["id"]]["channelType"],
"channel": channels[x["channel"]["id"]]["channel"],
"name": channels[x["channel"]["id"]]["halfWidthName" if USE_HALF_WIDTH else "name"],
"id": to_base_36(x["channel"]["id"]),
"sid": channels[x["channel"]["id"]]["serviceId"],
"nid": channels[x["channel"]["id"]]["networkId"],
"hasLogoData": channels[x["channel"]["id"]]["hasLogoData"],
"n": channels[x["channel"]["id"]]["remoteControlKeyId"],
"programs": [
{
"id": to_base_36(y["id"]),
"category": {
0x0: "news",
0x1: "sports",
0x2: "information",
0x3: "drama",
0x4: "music",
0x5: "variety",
0x6: "cinema",
0x7: "anime",
0x8: "documentary",
0x9: "theater",
0xA: "hobby",
0xB: "welfare",
0xC: "etc",
0xD: "etc",
0xE: "etc",
0xF: "etc"
}[y.get("genre1", 0xF)],
"title": y["name"],
"fullTitle": y["name"], # TODO: Support flags such as [字]
"detail": y.get("description", ""),
"start": y["startAt"],
"end": y["endAt"],
"seconds": (y["endAt"] - y["startAt"]) / 1000,
"description": y.get("description", ""),
"extra": {
"_": y["extended"] # TODO
} if "extended" in y else {},
"channel": {
"type": channels[y["channelId"]]["channelType"],
"channel": channels[y["channelId"]]["channel"],
"name": channels[y["channelId"]]["halfWidthName" if USE_HALF_WIDTH else "name"],
"id": to_base_36(y["channelId"]),
"sid": channels[y["channelId"]]["serviceId"],
"nid": channels[y["channelId"]]["networkId"],
"hasLogoData": channels[y["channelId"]]["hasLogoData"],
"n": channels[y["channelId"]]["remoteControlKeyId"]
},
"subTitle": "", # Unsupported
"episode": None, # Unsupported
"flags": [] # Unsupported
}
for y in x["programs"]
]
}
for x in call_epgstation_api(f"/schedules?startAt=0&endAt=100000000000000&isHalfWidth={USE_HALF_WIDTH}&GR=true&BS=true&CS=true&SKY=true")
])
@app.route("/recording.json", methods=["GET"])
def get_recording():
# 扱いやすいように id をキーとして辞書を作る
channels = {
x["id"]: x
for x in call_epgstation_api("/channels")
}
return jsonify([
{
"id": to_base_36(x["programId"]),
"category": {
0x0: "news",
0x1: "sports",
0x2: "information",
0x3: "drama",
0x4: "music",
0x5: "variety",
0x6: "cinema",
0x7: "anime",
0x8: "documentary",
0x9: "theater",
0xA: "hobby",
0xB: "welfare",
0xC: "etc",
0xD: "etc",
0xE: "etc",
0xF: "etc"
}[x["genre1"]],
"title": x["name"],
"fullTitle": x["name"], # TODO: Support flags such as [字]
"detail": x.get("description", ""),
"start": x["startAt"],
"end": x["endAt"],
"seconds": (x["endAt"] - x["startAt"]) / 1000,
"description": x.get("description", ""),
"extra": {
"_": x["extended"] # TODO
} if "extended" in x else {},
"channel": {
"type": channels[x["channelId"]]["channelType"],
"channel": channels[x["channelId"]]["channel"],
"name": channels[x["channelId"]]["halfWidthName" if USE_HALF_WIDTH else "name"],
"id": to_base_36(x["channelId"]),
"sid": channels[x["channelId"]]["serviceId"],
"nid": channels[x["channelId"]]["networkId"],
"hasLogoData": channels[x["channelId"]]["hasLogoData"],
"n": channels[x["channelId"]]["remoteControlKeyId"]
},
"subTitle": "", # Unsupported
"episode": None, # Unsupported
"flags": [], # Unsupported
"isManualReserved": "ruleId" not in x,
"priority": 0, # Unsupported
"tuner": {
"name": "EPGStation",
"command": "",
"isScrambling": False
}, # Unsupported
"command": "", # Unsupported
"pid": 0, # Unsupported
"recorded": x["videoFiles"][0]["filename"]
}
for x in call_epgstation_api(f"/recording?offset=0&limit=10000&isHalfWidth={USE_HALF_WIDTH}")["records"]
])
@app.route("/reserves.json", methods=["GET"])
def get_reserves():
# 扱いやすいように id をキーとして辞書を作る
channels = {
x["id"]: x
for x in call_epgstation_api("/channels")
}
return jsonify([
{
"id": to_base_36(x["programId"]),
"category": {
0x0: "news",
0x1: "sports",
0x2: "information",
0x3: "drama",
0x4: "music",
0x5: "variety",
0x6: "cinema",
0x7: "anime",
0x8: "documentary",
0x9: "theater",
0xA: "hobby",
0xB: "welfare",
0xC: "etc",
0xD: "etc",
0xE: "etc",
0xF: "etc"
}[x["genre1"]],
"title": x["name"],
"fullTitle": x["name"], # TODO: Support flags such as [字]
"detail": x.get("description", ""),
"start": x["startAt"],
"end": x["endAt"],
"seconds": (x["endAt"] - x["startAt"]) / 1000,
"description": x.get("description", ""),
"extra": {
" ": x["extended"] # TODO
} if "extended" in x else {},
"channel": {
"type": channels[x["channelId"]]["channelType"],
"channel": channels[x["channelId"]]["channel"],
"name": channels[x["channelId"]]["halfWidthName" if USE_HALF_WIDTH else "name"],
"id": to_base_36(x["channelId"]),
"sid": channels[x["channelId"]]["serviceId"],
"nid": channels[x["channelId"]]["networkId"],
"hasLogoData": channels[x["channelId"]]["hasLogoData"],
"n": channels[x["channelId"]]["remoteControlKeyId"]
},
"subTitle": "", # Unsupported
"episode": None, # Unsupported
"flags": [], # Unsupported
"isConflict": x["isConflict"],
"recordedFormat": "" # Unsupported
}
for x in call_epgstation_api(f"/reserves?offset=0&limit=10000&isHalfWidth={USE_HALF_WIDTH}")["reserves"]
])
def to_base_36(value):
n = 36
characters = string.digits + string.ascii_lowercase
result = ""
tmp = value
while tmp >= n:
idx = tmp%n
result = characters[idx] + result
tmp = int(tmp / n)
idx = tmp%n
result = characters[idx] + result
return result
if __name__ == "__main__":
app.run(host=HTTP_HOST, port=HTTP_PORT)
| 2.859375 | 3 |
inject.py | LouisRoss/spiking-datasets | 0 | 12786931 | <reponame>LouisRoss/spiking-datasets
import sys
import json
from support.realtime_manager import RealtimeManager,BufferManager
def parse_args():
filename = ''
repeats = 250
period = 250 # microseconds
multiples = 1
step = 0
if len(sys.argv) < 2:
print(f'Usage: {sys.argv[0]} <filename> [repeats] [period(ms)] [multiples] [step(indexes)]')
return False, filename, repeats, period, multiples, step
if len(sys.argv) > 1:
filename = sys.argv[1]
if len(sys.argv) > 2:
repeats = int(sys.argv[2])
if len(sys.argv) > 3:
period = int(sys.argv[3])
if len(sys.argv) > 4:
multiples = int(sys.argv[4])
if len(sys.argv) > 5:
step = int(sys.argv[5])
print(f'Expanding file {filename} {multiples:d} times at offsets of {step:d}. Sending the resulting expansion {repeats:d} times, repeating every {period:d} milliseconds')
return True, filename, repeats, period, multiples, step
def load_pattern(filename):
# A return variable
spikepattern = []
# Load the specified json file.
f = open(filename)
settings = json.load(f)
# Get the spike pattern array from the file if present.
if 'spikepattern' in settings:
spikepattern = settings['spikepattern']
# If a neuron assignments dictionary exists, use it to translate neuron symbols to indexes.
if 'neuronassignments' in settings:
neuronassignments = settings['neuronassignments']
for spike in spikepattern:
if spike[1] in neuronassignments:
spike[1] = neuronassignments[spike[1]]
print(spikepattern)
return spikepattern
def step_multiple(singleSpikePattern, step, multiple):
""" Given a single instance of the spike pattern,
plus the step distance between patterns and a multiple count,
copy the pattern to the 'multple' number of
places, separated by 'step' indices.
"""
fullSpikePattern = []
for i in range(multiple):
offset = i * step
for spikePattern in singleSpikePattern:
fullSpikePattern.append([spikePattern[0], spikePattern[1] + offset])
print(fullSpikePattern)
return fullSpikePattern
if __name__ == "__main__":
success, filename, repeats, period, multiple, step = parse_args()
if not success:
sys.exit()
spikepattern = load_pattern(filename)
fullSpikePattern = step_multiple(spikepattern, step, multiple)
with RealtimeManager('Research1') as realtime:
realtime.send_spikes_repeat(fullSpikePattern, period / 1000, repeats) | 2.78125 | 3 |
calcloudML/tests/test_example.py | alphasentaurii/calcloud-ai | 1 | 12786932 | <gh_stars>1-10
import pytest
import os
from calcloudML.makefigs import load_data
from calcloudML.makefigs import predictor
def test_data_import():
print(os.getcwd())
assert True
# df = load_data.get_single_dataset("data/hst_data.csv")
# instruments = list(df["instr_key"].unique())
# assert len(instruments) == 4
def test_model_import():
assert True
# clf = predictor.get_model("models/mem_clf")
# assert len(clf.layers) > 0
# def test_primes():
# assert primes(10) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
# def test_imax_too_big():
# with pytest.raises(ValueError):
# primes(10001)
# def test_no_cython():
# with pytest.raises(NotImplementedError):
# do_primes(2, usecython=True)
# def test_cli(capsys):
# main(args=['-tp', '2'])
# captured = capsys.readouterr()
# assert captured.out.startswith('Found 2 prime numbers')
# assert len(captured.err) == 0
| 2.234375 | 2 |
midprice_profit_label/label_by_profit/jump_label_new_algo.py | anakinanakin/neural-network-on-finance-data | 1 | 12786933 | # use python3
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
from matplotlib import patches
from matplotlib.pyplot import figure
from datetime import timedelta, date
def date_range(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def time2int(time_str: str) -> int:
"""Transform '01:57:00' to (int)157"""
return int(time_str[:2] + time_str[3:5])
def time2str(time_int: int) -> str:
"""Transform 157 to '01:57:00'"""
padded_str = str(time_int).zfill(4) # 157 becomes "0157"
return padded_str[:2] + ":" + padded_str[2:4] + ":00"
def narrow_adjust(closing_prices, leftmost_min_index, leftmost_max_index, curr_min, curr_max, window_lborder,
window_rborder):
best_min_index = leftmost_min_index
best_max_index = leftmost_max_index
if leftmost_min_index < leftmost_max_index:
while (closing_prices[best_min_index + 1] == curr_min):
best_min_index += 1
while (closing_prices[best_max_index - 1] == curr_max):
best_max_index -= 1
else:
while (closing_prices[best_min_index - 1] == curr_min):
best_min_index -= 1
while (closing_prices[best_max_index + 1] == curr_max):
best_max_index += 1
return best_min_index, best_max_index
def plot_graph(single_date,
closing_prices,
min_max_pairs,
min_close_price,
max_close_price,
hyperparams,
dark_mode=False):
if dark_mode:
plt.style.use('dark_background')
figure(figsize=(48, 10), dpi=100)
ax = plt.subplot(1, 1, 1)
for pair in min_max_pairs:
# print(pair)
# Green when price surges, red when price drops
if dark_mode:
curr_color = (.2, .45, .2) if pair[0] < pair[1] else (.4, .2, .2)
else:
curr_color = (.7, 1, .7) if pair[0] < pair[1] else (1, .7, .7)
ax.add_patch(
patches.Rectangle((min(pair[0], pair[1]), min_close_price),
abs(pair[0] - pair[1]),
max_close_price - min_close_price + 3,
color=curr_color))
if dark_mode:
plt.plot(closing_prices, color="#99ccff")
else:
plt.plot(closing_prices)
plt.legend(['Closing price'], fontsize=20)
plt.title(f'New Algorithm ({single_date.strftime("%Y-%m-%d")})\n' +
f'No. of green/red stripes: {len(min_max_pairs)}, ' + f'Window size: {hyperparams[0]}, ' +
f'Slope threshold: {hyperparams[1]}, ' + f'Jump size threshold: {hyperparams[2]}',
fontsize=30)
plt.xlabel('Minutes since 00:00:00', fontsize=25)
plt.xticks(fontsize=18)
plt.ylabel('Closing price', fontsize=25)
plt.yticks(fontsize=18)
plt.savefig("figures_new_algo/" + single_date.strftime("%Y-%m-%d") +
f'_{hyperparams[0]}__{hyperparams[1]}__{hyperparams[2]}_' + ('_(dark)' if dark_mode else '_(light)') +
'.png')
plt.clf()
def main(window_size, slope_threshold, jump_size_threshold):
# window_size = 5 # hyperparameter window size
# slope_threshold = 0.1 # hyperparameter slope threshold
# jump_size_threshold = 1.0 # hyperparameter jump size threshold
hyperparams = (window_size, slope_threshold, jump_size_threshold)
start_date = date(2010, 3, 24)
end_date = date(2010, 3, 27)
for single_date in date_range(start_date, end_date):
df = pd.read_csv(single_date.strftime("%Y-%m-%d") + '.csv')
df.sort_values(by='dt') # don't need?
times = df['tm'].values.tolist() # the time (hr:min:sec) column
closing_prices = df['close'].values.tolist() # the closing price column
max_close_price = max(closing_prices)
min_close_price = min(closing_prices)
start_time: int = time2int(times[0])
end_time: int = time2int(times[-1])
window_lborder: int = start_time
window_rborder: int = start_time + window_size # upperbound to be excluded
min_max_pairs = [] # list of start-end index pairs whose area between should be colored red/green
while window_lborder < end_time:
window_rborder = min(window_rborder, end_time)
curr_slice = closing_prices[window_lborder:window_rborder]
if len(curr_slice) == 0:
break
curr_min: float = min(curr_slice)
curr_max: float = max(curr_slice)
if curr_min == curr_max:
window_lborder = window_rborder
window_rborder += window_size
continue
leftmost_min_index: int = closing_prices.index(curr_min, window_lborder, window_rborder)
leftmost_max_index: int = closing_prices.index(curr_max, window_lborder, window_rborder)
best_min_index, best_max_index = narrow_adjust(closing_prices, leftmost_min_index, leftmost_max_index,
curr_min, curr_max, window_lborder, window_rborder)
if ((curr_max - curr_min) / abs(best_min_index - best_max_index) > slope_threshold) and (
(curr_max - curr_min) >= jump_size_threshold):
min_max_pairs.append([best_min_index, best_max_index])
window_lborder = max(best_min_index, best_max_index)
window_rborder = window_lborder + window_size
else:
window_lborder = window_rborder
window_rborder += window_size
plot_graph(single_date,
closing_prices,
min_max_pairs,
min_close_price,
max_close_price,
hyperparams,
dark_mode=True)
if __name__ == '__main__':
count = 0
for i in range(1, 16): # slope
for j in range(8, 26): # jump size
main(5, i / 10, j / 10)
count += 1
print(f">>>>>>{count*100/(15*18):.2f}% Done...\n")
| 2.953125 | 3 |
strip_visibility.py | lingochamp/tensorflow | 0 | 12786934 | <gh_stars>0
"""This script replaces any internal visibility with //visibility:public."""
from subprocess import call
import glob
import os
import os.path
import re
def _build_files(workspace_dir):
"""Return all BUILD files under the workspace dir.
This excludes symbolic links.
"""
build_files = []
for file in glob.glob(workspace_dir + "/**/BUILD", recursive=True):
# Glob will follow symbolic links, which creates a lot of trouble.
# Exclude files that are not within the current workspace dir.
if not file.startswith(workspace_dir):
continue
if file.startswith(os.path.join(workspace_dir, "bazel-")):
continue
build_files.append(file)
return build_files
if __name__ == "__main__":
workspace_dir = os.getcwd()
while not os.path.exists(os.path.join(workspace_dir, "WORKSPACE")):
workspace_dir = os.path.dirname(workspace_dir)
if workspace_dir == "/":
print("Script must be called within Bazel workspace.")
exit(1)
print("Workspace is ", os.path.join(workspace_dir, "WORKSPACE"))
build_files = _build_files(workspace_dir)
reg = re.compile("visibility = \[.*?\]", re.DOTALL)
# Call buildifier on all BUILD files.
# REQUIRES: /usr/local/bin/buildifier presents.
for file in build_files:
print("check", file)
# Read in the file
with open(file, "r") as f :
data = f.read()
# Replace the target string
data = reg.sub("visibility = [\"//visibility:public\"]", data)
# Write the file out again
with open(file, "w") as f:
f.write(data)
call(["/usr/local/bin/buildifier", "-v", "--mode=fix", file])
| 2.765625 | 3 |
seisflow/scripts/xsede/xsede_perform_psf_test.py | ziyixi/seisflow | 2 | 12786935 | """
xsede_perform_psf_test.py: get the preconditioned summed kernel for doing the PSF test.
It's just the copy of xsede_perform_structure_inversion.py, but not submit the job3, and part of the job2 is contained into job1.
useless flags may be possible to exist.
"""
import sys
from os.path import join
import click
import sh
from ...slurm.submit_job import submit_job
from ...tasks.xsede.forward import forward_task
from ..shared.build_structure import Build_structure
from .xsede_perform_source_inversion import (calculate_misfit_windows,
calculate_stations_adjoint,
change_simulation_type,
collect_sync_files,
cp_stations_adjoint2structure,
ln_adjoint_source_to_structure)
from .xsede_process_kernel import \
construct_structure as construct_process_kernel_structure
from .xsede_process_kernel import do_preconditioned_summation
@click.command()
@click.option('--base_directory', required=True, type=str, help="the base inversion directory")
@click.option('--cmts_directory', required=True, type=str, help="the cmts directory")
@click.option('--ref_directory', required=True, type=str, help="the reference specfem directory")
@click.option('--windows_directory', required=True, type=str, help="the windows directory")
@click.option('--data_asdf_directory', required=True, type=str, help="the processed data directory")
@click.option('--data_info_directory', required=True, type=str, help="the data info directory")
@click.option('--last_step_model_update_directory', required=True, type=str, help="the last step smoothed kernel directory")
@click.option('--stations_path', required=True, type=str, help="the stations path")
@click.option('--sem_utils_directory', required=True, type=str, help="the sem_utils directory")
@click.option('--source_mask_directory', required=False, default="", type=str, help="the source mask directory")
@click.option('--n_total', required=True, type=int, help="the total number of events")
@click.option('--n_each', required=True, type=int, help="number of events to run in each iteration")
@click.option('--n_iter', required=True, type=int, help="the number of iterations to run")
@click.option('--nproc', required=True, type=int, help="the number of processes used for each event")
@click.option('--n_node', required=True, type=int, help="the number of nodes used in simulation")
@click.option('--partition', required=True, type=str, help="the partion name, eg: skx-normal")
@click.option('--time_forward', required=True, type=str, help="the time used in step 1")
@click.option('--account', required=True, type=str, help="the stampede2 account")
@click.option('--periods', required=True, type=str, help="periods in filtering: minp1,maxp1/minp2,maxp2/...")
@click.option('--waveform_length', required=True, type=int, help="the length of the waveform to cut")
@click.option('--sampling_rate', required=True, type=int, help="the sampling rate to use")
@click.option('--taper_tmin_tmaxs', required=True, type=str, help="the taper time bands: minp1,maxp1/minp2,maxp2/...")
def main(base_directory, cmts_directory, ref_directory, windows_directory, data_asdf_directory, data_info_directory, last_step_model_update_directory,
stations_path, sem_utils_directory, source_mask_directory,
n_total, n_each, n_iter, nproc, n_node, partition, time_forward, account,
periods, waveform_length, sampling_rate, taper_tmin_tmaxs):
"""
perform the structure inversion for the second iteration and later.
"""
time = time_forward
# * we have to build the structure to perform the structure inversion.
build_inversion_structure(base_directory, cmts_directory, ref_directory)
# * ======================================================================================================================
# * here we have to init the slurm script, no need to load modules here
result = "date; \n"
pyexec = sys.executable
current_path = str(sh.pwd())[:-1] # pylint: disable=not-callable
# * change the flags to -F
result += change_simulation_type(pyexec,
join(base_directory, 'simulation'), "forward_save")
# * submit the forward simulation job
forward_simulation_command = forward_task(base=join(base_directory, "simulation"),
N_total=n_total, N_each=n_each, N_iter=n_iter, nproc=nproc, run_mesh=True)
result += forward_simulation_command
result += f"cd {current_path}; \n"
# * collect the sync from the forward simulation
result += collect_sync_files(
pyexec, join(base_directory, 'output'), join(base_directory, 'raw_sync'))
# * process the sync
n_cores_each_event = nproc*n_each//n_total
# ! note here mvapich2 may have the problem of "time out". No better solution, try to use 24 cores here.
if(n_cores_each_event > 24):
n_cores_each_event = 24
result += process_sync(pyexec, n_total, join(base_directory,
"raw_sync"), join(base_directory, "processed_sync"), periods, waveform_length, sampling_rate, taper_tmin_tmaxs)
result += f"cd {current_path}; \n"
# * calculate the misfit windows
body_periods, surface_periods = periods.split("/")
body_periods_splitter = body_periods.split(",")
surface_periods_splitter = surface_periods.split(",")
min_periods = f"{body_periods_splitter[0]},{surface_periods_splitter[0]}"
max_periods = f"{body_periods_splitter[1]},{surface_periods_splitter[1]}"
result += calculate_misfit_windows(pyexec, n_total,
windows_directory, join(
base_directory, "misfit_windows"), min_periods, max_periods,
data_asdf_directory, join(base_directory, "processed_sync"), data_info_directory)
# * calculate the adjoint source, and ln it to the sem directory
result += calculate_adjoint_source(pyexec, n_total,
join(base_directory, "misfit_windows"), stations_path, join(
base_directory, "raw_sync"),
join(
base_directory, "processed_sync"), data_asdf_directory,
join(base_directory, "adjoint_source"), body_periods, surface_periods)
result += ln_adjoint_source_to_structure(pyexec,
join(base_directory, "adjoint_source"), join(base_directory, "simulation"))
# * generate STATIONS_ADJOINT and cp it to the simulation directory
result += calculate_stations_adjoint(pyexec, stations_path,
join(base_directory, "misfit_windows"), join(base_directory, "stations_adjoint"))
result += cp_stations_adjoint2structure(pyexec,
join(base_directory, "stations_adjoint"), join(base_directory, "simulation"))
# * change the simulation type to the type 3
result += change_simulation_type(pyexec,
join(base_directory, 'simulation'), "structure")
# * do the adjoint simulation
adjoint_simulation_command = forward_task(base=join(base_directory, "simulation"),
N_total=n_total, N_each=n_each, N_iter=n_iter, nproc=nproc, run_mesh=False)
result += adjoint_simulation_command
result += f"cd {current_path}; \n"
# * construct the processing kernel directory
kernel_process_directory = join(base_directory, "process_kernel")
input_model_directory = join(ref_directory, "DATA", "GLL")
construct_process_kernel_structure(
join(base_directory,
"database"), ref_directory, sem_utils_directory, kernel_process_directory,
input_model_directory, last_step_model_update=last_step_model_update_directory)
# * replace the source mask
result += replace_source_mask(pyexec, join(base_directory,
'simulation'), source_mask_directory)
# * do the summation
result += do_preconditioned_summation(kernel_process_directory)
# * here we submit the first job
submit_job("psf_test", result, n_node, n_each *
nproc, partition, time, account, "stampede2")
def build_inversion_structure(base_directory, cmts_directory, ref_directory):
"""
build_inversion_structure: build the structure to contain all the essencial directories used in the inversion and the simulation directory.
"""
sh.mkdir("-p", base_directory)
# * copy cmts_directory
sh.cp("-r", cmts_directory, join(base_directory, "cmts"))
# * init the simulation directory
output_path = join(base_directory, "output")
sh.mkdir("-p", output_path)
database_path = join(base_directory, "database")
sh.mkdir("-p", database_path)
simulation_path = join(base_directory, "simulation")
sh.mkdir("-p", simulation_path)
run_script = Build_structure(
base=simulation_path, cmtfiles=join(base_directory, "cmts"), ref=ref_directory,
output=output_path, database=database_path)
run_script.run()
# * make the directory for the sync of the forward simulation
sh.mkdir("-p", join(base_directory, "raw_sync"))
sh.mkdir("-p", join(base_directory, "processed_sync"))
# * mkdir for misfit windows
sh.mkdir("-p", join(base_directory, "misfit_windows"))
# * mkdir for adjoint source
sh.mkdir("-p", join(base_directory, "adjoint_source"))
sh.mkdir("-p", join(base_directory, "stations_adjoint"))
# * mkdir for kernel processing
sh.mkdir("-p", join(base_directory, "process_kernel"))
# * mkdir to collect the perturbed sync
sh.mkdir("-p", join(base_directory, "perturbed_sync"))
sh.mkdir("-p", join(base_directory, "processed_perturbed_sync"))
def calculate_adjoint_source(py, nproc, misfit_windows_directory, stations_path, raw_sync_directory, sync_directory,
data_directory, output_directory, body_band, surface_band):
"""
Calculatet the adjoint source for the structure inversion.
"""
script = f"ibrun -n {nproc} {py} -m seisflow.scripts.structure_inversion.mpi_calculate_adjoint_source_zerolagcc_multiple_events --misfit_windows_directory {misfit_windows_directory} --stations_path {stations_path} --raw_sync_directory {raw_sync_directory} --sync_directory {sync_directory} --data_directory {data_directory} --output_directory {output_directory} --body_band {body_band} --surface_band {surface_band}; \n"
return script
def replace_gll_link(py, simulation_directory, new_gll_directory):
"""
replace all gll links.
"""
script = f"{py} -m seisflow.scripts.structure_inversion.replace_gll_link --simulation_directory {simulation_directory} --new_gll_directory {new_gll_directory}; \n"
return script
def replace_source_mask(py, base_directory, source_mask_directory):
"""
replace source masks.
"""
script = f"{py} -m seisflow.scripts.structure_inversion.replace_source_mask --base_directory {base_directory} --source_mask_directory {source_mask_directory}; \n"
return script
def process_sync(py, nproc, sync_directory, output_directory, periods, waveform_length, sampling_rate, taper_tmin_tmaxs):
"""
process the sync.
"""
script = f"ibrun -n {nproc} {py} -m seisflow.scripts.asdf.mpi_process_sync_series --sync_directory {sync_directory} --output_directory {output_directory} --periods {periods} --waveform_length {waveform_length} --sampling_rate {sampling_rate} --taper_tmin_tmaxs {taper_tmin_tmaxs}; \n"
return script
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
| 2.046875 | 2 |
instrument/output/__init__.py | wearpants/instrument | 7 | 12786936 | import sys
def _do_print(name, count, elapsed, file):
if name is not None:
print("%s: %d items in %.2f seconds"%(name, count, elapsed), file=file)
else:
print("%d items in %.2f seconds"%(count, elapsed), file=file)
def print_metric(name, count, elapsed):
"""A metric function that prints to standard output
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds
"""
_do_print(name, count, elapsed, file=sys.stdout)
def stderr_metric(name, count, elapsed):
"""A metric function that prints to standard error
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds
"""
_do_print(name, count, elapsed, file=sys.stderr)
def make_multi_metric(*metrics):
"""Make a new metric function that calls the supplied metrics
:arg functions metrics: metric functions
:rtype: function
"""
def multi_metric(name, count, elapsed):
"""Calls multiple metrics (closure)"""
for m in metrics:
m(name, count, elapsed)
return multi_metric
| 3.609375 | 4 |
api/tests/unit/telemetry/test_unit_telemetry_serializers.py | mevinbabuc/flagsmith | 1,259 | 12786937 | <reponame>mevinbabuc/flagsmith<filename>api/tests/unit/telemetry/test_unit_telemetry_serializers.py
from unittest import mock
from django.test import override_settings
from telemetry.serializers import TelemetrySerializer
from tests.unit.telemetry.helpers import get_example_telemetry_data
@override_settings(INFLUXDB_TOKEN="<PASSWORD>")
@mock.patch("telemetry.serializers.get_ip_address_from_request")
@mock.patch("telemetry.serializers.InfluxDBWrapper")
def test_telemetry_serializer_save(MockInfluxDBWrapper, mock_get_ip_address):
# Given
data = get_example_telemetry_data()
serializer = TelemetrySerializer(data=data, context={"request": mock.MagicMock()})
mock_wrapper = mock.MagicMock()
MockInfluxDBWrapper.return_value = mock_wrapper
ip_address = "127.0.0.1"
mock_get_ip_address.return_value = ip_address
# When
serializer.is_valid() # must be called to access validated data
serializer.save()
# Then
mock_wrapper.add_data_point.assert_called_once_with(
"heartbeat", 1, tags={**data, "ip_address": ip_address}
)
mock_wrapper.write.assert_called_once()
| 2.1875 | 2 |
fetch_cord/computer/gpu/Gpu_interface.py | TabulateJarl8/FetchCord | 286 | 12786938 | # from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import List, TypeVar, Dict
from ..Peripheral_interface import Peripherical_interface
class Gpu_interface(Peripherical_interface, metaclass=ABCMeta):
_vendor: str
_model: str
@property
def vendor(self) -> str:
return self._vendor
@vendor.setter
def vendor(self, value: str):
self._vendor = value
@property
def model(self) -> str:
return self._model
@model.setter
@abstractmethod
def model(self, value: str):
raise NotImplementedError
@property
def temp(self) -> float:
try:
self._temp = self.get_temp()
except NotImplementedError as e:
try:
raise e
finally:
e = None
del e
else:
return self._temp
@temp.setter
def temp(self, value: float):
self._temp = value
def __init__(self, os, vendor, model):
super().__init__(os)
self.vendor = vendor
self.model = model
@abstractmethod
def get_temp(self) -> float:
raise NotImplementedError
GpuType = TypeVar("GpuType", bound="Gpu_interface")
def get_gpuid(gpu_ids: Dict[str, str], gpus: List[GpuType]):
vendors = []
for i in range(len(gpus)):
if gpus[i].vendor not in vendors:
vendors.append(gpus[i].vendor)
gpuvendor = "".join(vendors).lower()
if gpuvendor in gpu_ids:
return gpu_ids[gpuvendor]
else:
print("Unknown GPU, contact us on github to resolve this.")
return "unknown"
| 2.84375 | 3 |
sublime_codec.py | furikake/sublime-codec | 16 | 12786939 | # -*- coding: utf-8 -*-
import base64
import hashlib
import sublime, sublime_plugin
import sys
PYTHON = sys.version_info[0]
if 3 == PYTHON:
# Python 3 and ST3
from urllib import parse
from . import codec_base62
from . import codec_base64
from . import codec_xml
from . import codec_json
from . import codec_quopri
from . import codec_hex
from . import codec_idn
else:
# Python 2 and ST2
import urllib
import codec_base62
import codec_base64
import codec_xml
import codec_json
import codec_quopri
import codec_hex
import codec_idn
SETTINGS_FILE = "Codec.sublime-settings"
"""
Pick up all the selections which are not empty.
If no selection, make all the text in return selection.
"""
def selected_regions(view):
sels = [sel for sel in view.sel() if not sel.empty()]
if not sels:
sels = [sublime.Region(0, view.size())]
else:
sels = view.sel()
return sels
"""
Sublime Text 3 Base64 Codec
Assumes UTF-8 encoding
日本語 encodes to base64 as 5pel5pys6Kqe
subjects?abcd encodes to url safe base64 as c3ViamVjdHM_YWJjZA==
>>> view.run_command('base64_encode', {'encode_type': 'b64encode'})
"""
class Base64EncodeCommand(sublime_plugin.TextCommand):
ENCODE_TYPE = {
'b64decode': codec_base64.b64decode,
'urlsafe_b64decode': base64.urlsafe_b64decode,
}
def run(self, edit, encode_type='b64encode'):
fix_base32_padding = sublime.load_settings(SETTINGS_FILE).get("base32_fix_padding", False)
print("Codec: fix base32 padding? %s" % str(fix_base32_padding))
fix_base64_padding = sublime.load_settings(SETTINGS_FILE).get("base64_fix_padding", False)
print("Codec: fix base64 padding? %s" % str(fix_base64_padding))
for region in selected_regions(self.view):
if not region.empty():
original_string = self.view.substr(region)
# print("string: " + original_string)
if 'b64encode' == encode_type:
encoded_string = base64.b64encode(original_string.encode("UTF-8"))
elif 'b64decode' == encode_type:
encoded_string = codec_base64.b64decode(original_string.encode("UTF-8"), add_padding=fix_base64_padding)
elif 'urlsafe_b64encode' == encode_type:
encoded_string = base64.urlsafe_b64encode(original_string.encode("UTF-8"))
elif 'urlsafe_b64decode' == encode_type:
encoded_string = codec_base64.urlsafe_b64decode(original_string.encode("UTF-8"), add_padding=fix_base64_padding)
elif 'b32encode' == encode_type:
encoded_string = base64.b32encode(original_string.encode("UTF-8"))
elif 'b32decode' == encode_type:
encoded_string = codec_base64.b32decode(original_string.encode("UTF-8"), add_padding=fix_base32_padding)
elif 'b16encode' == encode_type:
encoded_string = base64.b16encode(original_string.encode("UTF-8"))
elif 'b16decode' == encode_type:
encoded_string = base64.b16decode(original_string.encode("UTF-8"))
else:
print("unsupported operation %s" % (encode_type,))
break
# print("string encoded: " + str(encoded_string.decode("UTF-8")))
self.view.replace(edit, region, encoded_string.decode("UTF-8"))
"""
Sublime Text 3 URL Encoding (Percentage Encoding) Codec
日本語 encodes to %E6%97%A5%E6%9C%AC%E8%AA%9E
"something with a space" encodes to "something%20with%20a%20space"
>>> view.run_command('url_encode', {'encode_type': 'quote'})
"""
class UrlEncodeCommand(sublime_plugin.TextCommand):
if 2 == PYTHON:
ENCODE_TYPE = {
'quote': urllib.quote,
'unquote': urllib.unquote,
'quote_plus': urllib.quote_plus,
'unquote_plus': urllib.unquote_plus
}
else:
ENCODE_TYPE = {
'quote': parse.quote,
'unquote': parse.unquote,
'quote_plus': parse.quote_plus,
'unquote_plus': parse.unquote_plus
}
def run(self, edit, encode_type='quote'):
safe_characters = str(sublime.load_settings(SETTINGS_FILE).get("url_encoding_safe", "/"))
print("Codec: safe url characters? %s" % str(safe_characters))
urlencode_method = UrlEncodeCommand.ENCODE_TYPE[encode_type]
# print("using url encode method: " + str(urlencode_method))
for region in selected_regions(self.view):
if not region.empty():
original_string = self.view.substr(region)
# print("string: " + original_string.encode("UTF-8"))
# print("string encoded: " + encoded_string)
if 2 == PYTHON:
try:
encoded_string = urlencode_method(original_string.encode("UTF-8"), safe=safe_characters)
except TypeError:
# FIXME - time to separate quote and unquote to avoid this kind of errors.
encoded_string = urlencode_method(original_string.encode("UTF-8"))
self.view.replace(edit, region, encoded_string.decode("UTF-8"))
else:
try:
encoded_string = urlencode_method(original_string, safe=safe_characters)
except TypeError:
# FIXME - time to separate quote and unquote to avoid this kind of errors.
encoded_string = urlencode_method(original_string)
self.view.replace(edit, region, encoded_string)
"""
Sublime Text 3 Secure Hash Codec
日本語 hashes to SHA-256 as 77710aedc74ecfa33685e33a6c7df5cc83004da1bdcef7fb280f5c2b2e97e0a5
>>> view.run_command('secure_hash', {'secure_hash_type': 'sha256'})
"""
class SecureHashCommand(sublime_plugin.TextCommand):
SECURE_HASH_TYPE = {
'md5': 'md5',
'sha1': 'sha1',
'sha224': 'sha224',
'sha256': 'sha256',
'sha384': 'sha384',
'sha512': 'sha512'
}
def run(self, edit, secure_hash_type='sha256'):
secure_hash_type = SecureHashCommand.SECURE_HASH_TYPE[secure_hash_type]
# print("using secure hash algorithm: " + secure_hash_type)
for region in selected_regions(self.view):
if not region.empty():
original_string = self.view.substr(region)
# print("string: " + original_string)
hash_obj = hashlib.new(secure_hash_type)
hash_obj.update(original_string.encode("UTF-8"))
encoded_string = hash_obj.hexdigest()
# print("string encoded: " + str(encoded_string))
self.view.replace(edit, region, str(encoded_string))
"""
Sublime Text 3 Secure Hash Codec
doSomething(); hashes to SHA-256 as RFWPLDbv2BY+rCkDzsE+0fr8ylGr2R2faWMhq4lfEQc=
>>> view.run_command('binary_secure_hash', {'secure_hash_type': 'sha256'})
"""
class BinarySecureHashCommand(sublime_plugin.TextCommand):
SECURE_HASH_TYPE = {
'sha256': 'sha256',
'sha384': 'sha384',
'sha512': 'sha512'
}
def run(self, edit, secure_hash_type='sha256'):
secure_hash_type = SecureHashCommand.SECURE_HASH_TYPE[secure_hash_type]
# print("using secure hash algorithm: " + secure_hash_type)
for region in selected_regions(self.view):
if not region.empty():
original_string = self.view.substr(region)
# print("string: " + original_string)
hash_obj = hashlib.new(secure_hash_type)
hash_obj.update(original_string.encode("UTF-8"))
encoded_string = base64.b64encode(hash_obj.digest()).decode('UTF-8')
# print("string encoded: " + str(encoded_string))
self.view.replace(edit, region, str(encoded_string))
"""
Escapes and unescapes the 5 standard XML predefined entities
<hello>T'was a dark & "stormy" night</hello>
escapes to
<hello>T'was a dark & "stormy" night</hello>
>>> view.run_command('xml', {'encode_type': 'escape'})
"""
class XmlCommand(sublime_plugin.TextCommand):
def run(self, edit, encode_type='escape'):
method = self.get_method(encode_type)
for region in selected_regions(self.view):
if not region.empty():
original_string = self.view.substr(region)
new_string = method(original_string)
self.view.replace(edit, region, new_string)
def get_method(self, encode_type):
if 'escape' == encode_type:
return codec_xml.escape
elif 'unescape' == encode_type:
return codec_xml.unescape
else:
raise NotImplementedError("unknown encoding type %s" % (str(encode_type),))
"""
Encodes and decodes Quoted-Printable strings
This is a really long line to test whether "quoted-printable" works correctly when using 日本語 and 英語
encodes to
This is a really long line to test whether "quoted-printable" works correct=
ly when using =E6=97=A5=E6=9C=AC=E8=AA=9E and =E8=8B=B1=E8=AA=9E
>>> view.run_command('quoted_printable', {'encode_type': 'encode'})
"""
class QuotedPrintableCommand(sublime_plugin.TextCommand):
def run(self, edit, encode_type='encode'):
method = self.get_method(encode_type)
for region in selected_regions(self.view):
if not region.empty():
original_string = self.view.substr(region)
encoded_string = method(original_string.encode("UTF-8"))
self.view.replace(edit, region, encoded_string.decode("UTF-8"))
def get_method(self, encode_type):
if 'encode' == encode_type:
return codec_quopri.encodestring
elif 'decode' == encode_type:
return codec_quopri.decodestring
else:
raise NotImplementedError("unknown encoding type %s" % (str(encode_type),))
"""
Encodes and decodes JSON
T'was a dark & "stormy" night in 日本
encodes to
"T'was a dark & \"stormy\" night in 日本"
>>> view.run_command('json', {'encode_type': 'encode'})
"""
class JsonCommand(sublime_plugin.TextCommand):
def run(self, edit, encode_type='encode'):
method = self.get_method(encode_type)
for region in selected_regions(self.view):
if not region.empty():
original_string = self.view.substr(region)
new_string = method(original_string)
self.view.replace(edit, region, new_string)
def get_method(self, encode_type):
if 'encode' == encode_type:
return codec_json.encode
elif 'encode_ensure_ascii' == encode_type:
return codec_json.encode_ensure_ascii
elif 'decode' == encode_type:
return codec_json.decode
else:
raise NotImplementedError("unknown encoding type %s" % (str(encode_type),))
"""
Encodes and decodes C-style hex representations of bytes
Hello, my good friend
encodes to
\\x48\\x65\\x6c\\x6c\\x6f\\x2c\\x20\\x6d\\x79\\x20\\x67\\x6f\\x6f\\x64\\x20\\x66\\x72\\x69\\x65\\x6e\\x64\\x21
>>> view.run_command('c_hex', {'encode_type': 'encode'})
"""
class HexCommand(sublime_plugin.TextCommand):
def run(self, edit, encode_type='encode'):
method = self.get_method(encode_type)
for region in selected_regions(self.view):
if not region.empty():
original_string = self.view.substr(region)
new_string = method(original_string)
self.view.replace(edit, region, new_string)
def get_method(self, encode_type):
if 'encode' == encode_type:
return codec_hex.encode_hex
elif 'decode' == encode_type:
return codec_hex.decode_hex
else:
raise NotImplementedError("unknown encoding type %s" % (str(encode_type),))
class IdnCommand(sublime_plugin.TextCommand):
def run(self, edit, encode_type='punycode_encode'):
method = self.get_method(encode_type)
for region in selected_regions(self.view):
if not region.empty():
original_string = self.view.substr(region)
new_string = method(original_string)
self.view.replace(edit, region, new_string)
def get_method(self, encode_type):
if 'punycode_encode' == encode_type:
return codec_idn.punycode_encode
elif 'punycode_decode' == encode_type:
return codec_idn.punycode_decode
elif 'idna_encode' == encode_type:
return codec_idn.idna_encode
elif 'idna_decode' == encode_type:
return codec_idn.idna_decode
elif 'idna2008_encode' == encode_type:
return codec_idn.idna2008_encode
elif 'idna2008_decode' == encode_type:
return codec_idn.idna2008_decode
elif 'idna2008uts46_encode' == encode_type:
return codec_idn.idna2008uts46_encode
elif 'idna2008uts46_decode' == encode_type:
return codec_idn.idna2008uts46_decode
elif 'idna2008transitional_encode' == encode_type:
return codec_idn.idna2008transitional_encode
elif 'idna2008transitional_decode' == encode_type:
return codec_idn.idna2008transitional_decode
else:
raise NotImplementedError("unknown encoding type %s" % (str(encode_type),))
"""
Sublime Text 3 Base62 Codec
"""
class Base62EncodeCommand(sublime_plugin.TextCommand):
def run(self, edit, encode_type='b62encode'):
for region in selected_regions(self.view):
if not region.empty():
original_string = self.view.substr(region)
if 'b62encode_int' == encode_type:
encoded_string = codec_base62.b62encode_int(original_string.encode("UTF-8"))
elif 'b62decode_int' == encode_type:
encoded_string = codec_base62.b62decode_int(original_string.encode("UTF-8"))
elif 'b62encode_inv_int' == encode_type:
encoded_string = codec_base62.b62encode_inv_int(original_string.encode("UTF-8"))
elif 'b62decode_inv_int' == encode_type:
encoded_string = codec_base62.b62decode_inv_int(original_string.encode("UTF-8"))
elif 'b62encode_hex' == encode_type:
encoded_string = codec_base62.b62encode_hex(original_string.encode("UTF-8"))
elif 'b62decode_hex' == encode_type:
encoded_string = codec_base62.b62decode_hex(original_string.encode("UTF-8"))
elif 'b62encode_inv_hex' == encode_type:
encoded_string = codec_base62.b62encode_inv_hex(original_string.encode("UTF-8"))
elif 'b62decode_inv_hex' == encode_type:
encoded_string = codec_base62.b62decode_inv_hex(original_string.encode("UTF-8"))
else:
print("unsupported operation %s" % (encode_type,))
break
self.view.replace(edit, region, encoded_string.decode("UTF-8"))
| 2.546875 | 3 |
WebMirror/management/rss_parser_funcs/feed_parse_extractCgtranslationsMe.py | fake-name/ReadableWebProxy | 193 | 12786940 | <filename>WebMirror/management/rss_parser_funcs/feed_parse_extractCgtranslationsMe.py
def extractCgtranslationsMe(item):
'''
Parser for 'cgtranslations.me'
'''
if 'Manga' in item['tags']:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if ('Gifting (Fanfic)' in item['tags'] and 'LN Chapters' in item['tags']) or \
item['tags'] == ['Gifting (Fanfic)']:
return buildReleaseMessageWithType(item, 'Gifting this World with Wonderful Blessings!', vol, chp, frag=frag, postfix=postfix)
if 'Gifting (Fanfic)' in item['tags'] and 'explosion' in item['tags']:
return buildReleaseMessageWithType(item, 'Kono Subarashii Sekai ni Bakuen wo!', vol, chp, frag=frag, postfix=postfix)
if ('KonoSuba' in item['tags'] and 'LN Chapters' in item['tags']):
return buildReleaseMessageWithType(item, 'KonoSuba', vol, chp, frag=frag, postfix=postfix)
return False | 2.390625 | 2 |
engine/op_math.py | ludius0/mercury_engine | 0 | 12786941 | <filename>engine/op_math.py
# libs
import math
# scripts
from .func_base import Func, setattr_value
class Add(Func):
"""
>>> Value(1).add(1)
>>> Value(2)
"""
@staticmethod
def forward(ctx, x, y):
return x + y
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
setattr_value(Add)
class Mul(Func):
"""
>>> Value(1).mul(2)
>>> Value(2)
"""
@staticmethod
def forward(ctx, x, y):
ctx.saved_values.extend([x, y])
return x * y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_values
return y * grad_output, x * grad_output
setattr_value(Mul)
class Pow(Func):
"""
>>> Value(2).pow(0)
>>> Value(1)
"""
@staticmethod
def forward(ctx, x, y):
ctx.saved_values.extend([x, y])
return x ** y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_values
grad1 = y * (x**(y - 1.0)) * grad_output
grad2 = (x**y) * math.log(x) * grad_output if x > 0 else grad_output
return grad1, grad2
setattr_value(Pow)
class Exp(Func):
"""
>>> Value(1).exp()
>>> Value(2.71828182846)
"""
@staticmethod
def forward(ctx, x):
out = math.exp(x)
ctx.saved_values.extend([out])
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_values
return grad_output * out
setattr_value(Exp) | 2.9375 | 3 |
sesion7.py | organizacion-sesion-3-anabel-palasi/sesion7-tarea-individual | 0 | 12786942 | from __future__ import print_function
import sys
# Calcula la suma
def suma(num1, num2):
pass
# Calcula la resta
def resta(num1, num2):
pass
# Calcula la multiplicacion
def multiplicacion(num1, num2):
pass
# Calcula la raiz cuadrada
def raizcuadrada(numero):
pass
# Calcula el módulo
def modulo(num1,num2):
pass
# Calcula x elevado a y
def elevado(num1, num2):
pass
| 2.40625 | 2 |
ebl/tests/fragmentarium/test_genre_route.py | BuildJet/ebl-api | 4 | 12786943 | import falcon
from ebl.fragmentarium.domain.genres import genres
def test_get_genre(client):
get_result = client.simulate_get("/genres")
assert get_result.status == falcon.HTTP_OK
assert get_result.headers["Access-Control-Allow-Origin"] == "*"
assert get_result.json == list(map(list, genres))
| 1.773438 | 2 |
probe/modules/antivirus/eset/eset_file_security.py | krisshol/bach-kmno | 248 | 12786944 | <filename>probe/modules/antivirus/eset/eset_file_security.py
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
from pathlib import Path
from datetime import datetime
from modules.antivirus.base import AntivirusUnix
log = logging.getLogger(__name__)
class EsetFileSecurity(AntivirusUnix):
name = "ESET File Security (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super().__init__(*args, **kwargs)
# Modify retun codes (see --help for details)
self._scan_retcodes[self.ScanResult.INFECTED] = lambda x: x in [1, 50]
self._scan_retcodes[self.ScanResult.ERROR] = lambda x: x in [100]
# scan tool variables
self.scan_args = (
"--clean-mode=NONE", # do not remove infected files
"--no-log-all" # do not log clean files
)
self.scan_patterns = [
re.compile('name="(?P<file>.*)", threat="(?P<name>.*)", '
'action=.*', re.IGNORECASE),
]
self.scan_path = Path("/opt/eset/esets/sbin/esets_scan")
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def get_version(self):
"""return the version of the antivirus"""
return self._run_and_parse(
'--version',
regexp='(?P<version>\d+(\.\d+)+)',
group='version')
def get_database(self):
"""return list of files in the database"""
search_paths = [
Path('/var/opt/eset/esets/lib/'),
]
database_patterns = [
'*.dat', # determined using strace on linux
]
return self.locate(database_patterns, search_paths, syspath=False)
def get_virus_database_version(self):
"""Return the Virus Database version"""
fdata = Path("/var/opt/eset/esets/lib/data/data.txt")
data = fdata.read_text()
matches = re.search('VerFileETAG_update\.eset\.com=(?P<version>.*)',
data, re.IGNORECASE)
if not matches:
raise RuntimeError(
"Cannot read dbversion in {}".format(fdata.absolute()))
version = matches.group('version').strip()
matches = re.search('LastUpdate=(?P<date>\d*)', data, re.IGNORECASE)
if not matches:
raise RuntimeError(
"Cannot read db date in {}".format(fdata.absolute()))
date = matches.group('date').strip()
date = datetime.fromtimestamp(int(date)).strftime('%Y-%m-%d')
return version + ' (' + date + ')'
| 2.09375 | 2 |
sovrin_client/__metadata__.py | evernym/sovrin-client | 1 | 12786945 | """
sovrin-client package metadata
"""
__version_info__ = (0, 2)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "<NAME>."
__license__ = "Apache 2.0"
__all__ = ['__version_info__', '__version__', '__author__', '__license__']
# TODO: Shouldn't we update these dependencies?
__dependencies__ = {
"anoncreds": ">=0.1.11",
"sovrin_common": ">=0.0.4"
}
| 1.421875 | 1 |
indexers/AnnoyIndexer.py | Nashrah31/SimDB | 2 | 12786946 | from interfaces.ANNIndexer import ANNIndexer
import annoy
# Usage : indexer = AnnoyIndexer(vector_length=100, n_trees=1000)
class AnnoyIndexer(ANNIndexer):
def __init__(self, content_vectors, vector_length=100, n_trees=10):
print("initializing annoy wrapper")
self.vector_length = vector_length
self.n_trees = n_trees
self.index = annoy.AnnoyIndex(vector_length)
self.content_vectors = content_vectors
def build_index(self, path=None):
print("building index")
print("len of docvecs", self.content_vectors.size())
vectors_map = self.content_vectors.get_vectors_map()
for key in vectors_map:
try:
self.index.add_item(key, vectors_map[key])
except Exception as e:
print("problem adding to index for id : " + str(key), e)
# vectors.apply(lambda df_item: self.index.add_item(df_item.name, df_item['vector']))
print(self.index.build(self.n_trees))
print("items in index - ", self.index.get_n_items())
def save(self, path):
self.index.save(path)
def load(self, path):
self.index.load(path)
def find_NN_by_id(self, query_id, n=10):
return self.index.get_nns_by_item(query_id, n)
def find_NN_by_vector(self, query_vector, n=10):
return self.index.get_nns_by_vector(query_vector, n)
| 2.796875 | 3 |
examples/car_crash_analysis/crash_analysis_with_explicit_contact.py | jdlaubrie/florence | 65 | 12786947 | <filename>examples/car_crash_analysis/crash_analysis_with_explicit_contact.py
import os
import numpy as np
from Florence import *
def crash_analysis():
""" Car crash analysis in a simplified 2D car geometry with hyperelastic
explicit dynamics solver using explicit penalty contact formulation
"""
mesh = Mesh()
mesh.ReadGmsh(os.path.join(PWD(__file__),"Car2D.msh"),element_type="quad")
mesh.points /=1000.
mu = 1.0e6
mu1 = mu
mu2 = 0.
v = 0.495
lamb = 2.*mu*v/(1-2.*v)
material = MooneyRivlin(2, mu1=mu1, mu2=mu2, lamb=lamb, rho=8000.)
def DirichletFunc(mesh, time_step):
boundary_data = np.zeros((mesh.points.shape[0],2, time_step))+np.NAN
X_0 = np.isclose(mesh.points[:,0],0.)
boundary_data[:,1,:] = 0.
return boundary_data
def NeumannFuncDyn(mesh, time_step):
boundary_data = np.zeros((mesh.points.shape[0],2, time_step))+np.NAN
mag = 5e5
n = 3000
stage0 = np.ones(n)*mag
stage1 = np.zeros(time_step-n)
full_stage = np.concatenate((stage0,stage1))
boundary_data[:,0,:] = full_stage
return boundary_data
time_step = 6000
boundary_condition = BoundaryCondition()
boundary_condition.SetDirichletCriteria(DirichletFunc, mesh, time_step)
boundary_condition.SetNeumannCriteria(NeumannFuncDyn, mesh, time_step)
formulation = DisplacementFormulation(mesh)
contact_formulation = ExplicitPenaltyContactFormulation(mesh,
np.array([-1.,0.]), # unit vector specifying the direction of normal contact
180., # normal gap [distance of rigid body from the deformable object]
1e7 # penalty parameter kappa
)
fem_solver = FEMSolver(total_time=9,
number_of_load_increments=time_step,
analysis_type="dynamic",
analysis_subtype="explicit",
mass_type="lumped",
analysis_nature="nonlinear",
optimise=True,
memory_store_frequency=20)
solution = fem_solver.Solve(formulation=formulation, material=material, mesh=mesh,
boundary_condition=boundary_condition, contact_formulation=contact_formulation)
# check validity
solution_vectors = solution.GetSolutionVectors()
assert np.linalg.norm(solution_vectors) > 21610
assert np.linalg.norm(solution_vectors) < 21630
# Write results to vtk file
# solution.WriteVTK("crash_analysis_results", quantity=0)
if __name__ == "__main__":
crash_analysis() | 2.8125 | 3 |
AIoT/Vitis-AI/VART/example/adas_detection_py/threads/yolov3_thread.py | kaka-lin/ML-Notes | 0 | 12786948 | import time
import threading
import numpy as np
from common import preprocess_one_image_fn, draw_outputs, load_classes, generate_colors
from yolo_utils import yolo_eval
from priority_queue import PriorityQueue
class YOLOv3Thread(threading.Thread):
def __init__(self, runner: "Runner", deque_input, lock_input,
deque_output, lock_output, thread_name):
super(YOLOv3Thread, self).__init__(name=thread_name)
self.runner = runner
self.deque_input = deque_input
self.lock_input = lock_input
self.deque_output = deque_output
self.lock_output = lock_output
self.class_names = load_classes('./model_data/adas_classes.txt')
self.colors = generate_colors(self.class_names)
def set_input_image(self, input_run, frame, size):
w, h = size
img = preprocess_one_image_fn(frame, w, h)
input_run[0, ...] = img.reshape((h, w, 3))
def run(self):
# Get input/output tensors and dims
inputTensors = self.runner.get_input_tensors()
outputTensors = self.runner.get_output_tensors()
input_ndim = tuple(inputTensors[0].dims) # (1, 256, 512, 3)
result0_ndim = tuple(outputTensors[0].dims) # (1, 8, 16, 40)
result1_ndim = tuple(outputTensors[1].dims) # (1, 16, 32, 40)
result2_ndim = tuple(outputTensors[2].dims) # (1, 32, 64, 40)
result3_ndim = tuple(outputTensors[3].dims) # (1, 64, 126, 40)
# input/output data define
input_data = [np.empty(input_ndim, dtype=np.float32, order="C")]
result0 = np.empty(result0_ndim, dtype=np.float32, order="C")
result1 = np.empty(result1_ndim, dtype=np.float32, order="C")
result2 = np.empty(result2_ndim, dtype=np.float32, order="C")
result3 = np.empty(result3_ndim, dtype=np.float32, order="C")
results = [result0, result1, result2, result3]
# get input width, height for preprocess
input_shape = (input_ndim[2], input_ndim[1])
while True:
self.lock_input.acquire()
# empy
if not self.deque_input:
self.lock_input.release()
continue
else:
# get input frame from input frames queue
data_from_deque = self.deque_input[0]
self.deque_input.popleft()
self.lock_input.release()
# Init input image to input buffers
img = data_from_deque['img']
idx = data_from_deque['idx']
start_time = data_from_deque['time']
self.set_input_image(input_data[0], img, input_shape)
# invoke the running of DPU for yolov3
"""Benchmark DPU FPS performance over Vitis AI APIs `execute_async()` and `wait()`"""
# (self: vart.Runner, arg0: List[buffer], arg1: List[buffer]) -> Tuple[int, int]
job_id = self.runner.execute_async(input_data, results)
self.runner.wait(job_id)
self.post_process(img, results, input_shape)
self.lock_output.acquire()
img_info = PriorityQueue(idx, img, start_time)
self.deque_output.append(img_info)
self.deque_output.sort()
self.lock_output.release()
def post_process(self, image, results, input_ndim):
"""Xilinx ADAS detction model: YOLOv3
Name: yolov3_adas_pruned_0_9
Input shape: (256, 512, 3)
Classe: 3
Anchor: 5, for detail please see `yolo_utils.py`
Outputs: 4
outputs_node: {
"layer81_conv",
"layer93_conv",
"layer105_conv",
"layer117_conv",
}
"""
image_shape = (image.shape[1], image.shape[0]) # (w, h)
scores, boxes, classes = yolo_eval(
results,
image_shape=image_shape,
input_ndim=input_ndim,
classes=3,
score_threshold=0.5,
iou_threshold=0.7)
# print("detection:")
# for i in range(scores.shape[0]):
# print("\t{}, {}, {}".format(
# self.class_names[int(classes[i])], scores[i], boxes[i]
# ))
image = draw_outputs(image, (scores, boxes, classes),
self.class_names, self.colors)
| 2.578125 | 3 |
app/module/spotify_to_youtube/__init__.py | MisakaMikoto0502/zeroday | 0 | 12786949 | <reponame>MisakaMikoto0502/zeroday
from .convert import SpotifyConverter
| 0.96875 | 1 |
application/app.py | Miseq/python_text_image_webscrapper | 1 | 12786950 | <gh_stars>1-10
from flask import Flask, make_response, jsonify
import validators
from flask import request
from flask.views import MethodView
from file_manager import FileManager
app = Flask(__name__)
downloader = FileManager()
class GettingAll(MethodView):
@staticmethod
def get():
answer = "This app dosen't have a Frontend. In order tu communicate with it you can use REST API " \
"through comand line, like 'curl', or by third party programs like free 'Postman'. You can also use " \
"simple build-in command line user interface, just run 'user_interface.py' while this app is working."\
"\nFor manual REST request you need to specify two fields in json body:" \
"\nFirst - 'url': '<url_from_which_you_want_to_download>'" \
"\nSecond - 'option': 'all' OR 'text' OR 'images', which tells app what you want to download"
return make_response(jsonify(answer), 200)
@staticmethod
def post():
r = request.get_json()
downloader.scraper.url = r.get('url')
if validators.url(downloader.scraper.url):
download_option = r.get('option')
if download_option == "all":
output = downloader.save_to_zip(download_images=True, download_text=True)
elif download_option == "text":
output = downloader.save_to_zip(download_text=True)
elif download_option == "images":
output = downloader.save_to_zip(download_images=True)
else:
output = "Nie podano rodzaju pobiernaia, wpisz jedno: 'option': 'all'/'text'/'images'"
else:
output = "URL jest bledny!"
return make_response(jsonify(output), 200)
def put(self):
# takie jak post
return self.post()
app.add_url_rule("/", view_func=GettingAll.as_view("GettingAll"))
if __name__ == '__main__':
app.run('0.0.0.0', 5000, debug=False)
| 3.140625 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.