max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/utils/data_utils.py | sahaana/ember-API | 7 | 12788951 | <filename>src/utils/data_utils.py
import numpy as np
import pandas as pd
from typing import List, Union, Dict, Optional, Tuple
def sample_excluding(n: int,
exclude: List[int]) -> int:
x = np.random.randint(n)
while x in exclude:
x = np.random.randint(n)
return x
def sequential_tt_split(n: int,
n_train: int,
n_test: int) -> (np.array, np.array):
if n_train + n_test > n:
print("BAD TT_SPLIT")
return [], []
indices = np.arange(n - 1)
if n_test == 0:
return indices, np.array([])
train_idx = indices[:n_train]
test_idx = indices[-n_test:]
return train_idx, test_idx
| 3.09375 | 3 |
lib/surface/data_fusion/operations/wait.py | kustodian/google-cloud-sdk | 2 | 12788952 | # -*- coding: utf-8 -*- #
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to wait for operation completion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.data_fusion import datafusion as df
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.data_fusion import operation_poller
from googlecloudsdk.command_lib.data_fusion import resource_args
class Wait(base.SilentCommand):
"""Wait for asynchronous operation to complete.
## EXAMPLES
To wait for operation 'my-operation' in project 'my-project' and location
'my-location', run:
$ {command} --project=my-project --location=my-location my-operation
"""
WAIT_CEILING_MS = 60 * 20 * 1000
@staticmethod
def Args(parser):
resource_args.AddOperationResourceArg(parser, 'The operation to wait for.')
def Run(self, args):
datafusion = df.Datafusion()
operation_ref = args.CONCEPTS.operation.Parse()
req = datafusion.messages.DatafusionProjectsLocationsOperationsGetRequest(
name=operation_ref.RelativeName())
operation = datafusion.client.projects_locations_operations.Get(req)
waiter.WaitFor(
operation_poller.OperationPoller(),
operation.name,
'Waiting for [{}] to complete.'.format(operation.name),
wait_ceiling_ms=self.WAIT_CEILING_MS)
| 2.28125 | 2 |
src/classification/data_object.py | PeterJackNaylor/CellularHeatmaps | 0 | 12788953 | <filename>src/classification/data_object.py
import numpy as np
from glob import glob
from tqdm import tqdm
import pandas as pd
import os
from skimage.util import crop, pad
from sklearn.model_selection import StratifiedKFold
def t_name(name):
"""
How to process the file name to get the key.
"""
basname = os.path.basename(name).split('.')[0]
# basname = basname.split('_')[-1]
return basname
def load_folder(path):
"""
Loads a folder of numpy array into a dictionnary.
Parameters
----------
path: string,
path to folder from which to find 'npy'
Returns
-------
A dictionnary where the key is the tissue id and
the item is the tissue heatmaps.
"""
files = glob(path + "/*.npy")
loaded = {t_name(f): np.load(f) for f in tqdm(files)}
# tt = [loaded[t_name(f)].shape for f in files]
# import pdb; pdb.set_trace()
# checks sizes how they go down
return loaded
def load_labels(label_path, label_interest):
"""
Loads the label table.
Parameters
----------
label_path: string,
path to label csv table
label_interest: string,
name of variable to predict. Has to be in the table loaded by
the parameter label_path.
Returns
-------
A tuple of series where the first is the variable of interest,
the second is the outer test folds, the third the variable to stratefy
the inner folds by.
"""
table = pd.read_csv(label_path)
table = table.set_index(['Biopsy'])
y = table[label_interest]
stratefied_variable = table[label_interest]
folds = table["fold"]
return y, folds, stratefied_variable
def crop_pad_around(image, size):
"""
Pads or crops an image so that the image is of a given size.
Parameters
----------
image: numpy array,
3 channel numpy array to crop/pad.
size: tuple of integers,
size to achieve.
Returns
-------
A padded or croped version of image so that image
has a size of size.
"""
x, y, z = image.shape
x_pad_width = size[0] - x if size[0] > x else 0
y_pad_width = size[1] - y if size[1] > y else 0
if x_pad_width > 0 or y_pad_width > 0:
pad_width = [(x_pad_width, y_pad_width) for _ in range(2)]
pad_width +=[(0, 0)]
image = np.pad(image, pad_width, mode='constant')
x, y, z = image.shape
shapes = [x, y, z]
x_crop_width = x - size[0] if x > size[0] else 0
y_crop_width = y - size[1] if y > size[1] else 0
if x_crop_width > 0 or y_crop_width > 0:
crops = []
for i, c in enumerate([x_crop_width, y_crop_width]):
crop_v = np.random.randint(0, c) if c != 0 else 0
crops.append((crop_v, shapes[i] - size[i] - crop_v))
crops.append((0,0))
image = crop(image, crops, copy=True)
return image
class DataGenImage():
'Generates data for Keras datagenerator'
def __init__(self, path, label_file, label_interest, categorize=False, classes=2):
'Initialization'
self.mapper = load_folder(path)
# vector y, fold, and stratefied
self.y, self.f, self.sv = load_labels(label_file, label_interest)
self.classes = classes
self.folds_focus = False
def __getitem__(self, index):
"""
Gets tuple, when given a biopsy id returns image and its label.
Parameters
----------
index: string,
string existing in the mapper dictionnary
Returns
-------
A biopsy heatmap.
"""
return self.mapper[index], self.y.ix[index]
def cropped(self, index, size):
"""
Gets tuples but return the image at a given size,
when given a biopsy id returns image to a given size.
Parameters
----------
index: string,
string existing in the mapper dictionnary
Returns
-------
A biopsy heatmap.
"""
image, label = self.__getitem__(index)
image = crop_pad_around(image, size)
return image, label
def return_keys(self):
"""
Returns list of keys.
"""
return list(self.mapper.keys())
def return_weights(self):
"""
Returns class associated weights.
"""
class_weight = {}
train, val = self.index_folds[0]
n = self.y.ix[train].shape[0] + self.y.ix[val].shape[0]
for i in range(self.classes):
size_i = (self.y.ix[train] == i).astype(int).sum() + (self.y.ix[val] == i).astype(int).sum()
class_weight[i] = (1 - size_i / n)
return class_weight
def __len__(self):
'Denotes the number of batches per epoch'
return int(len(self.mapper))
def return_fold(self, split, number):
"""
Returns index associated to a given split and if necessary
(for train and validation) a given
Parameters
----------
split: string,
could be either 'train', 'validation', 'test'
number: int,
split number, ignored if test, can't be above the number of
folds...
Returns
-------
Index which is a list of integers
"""
if self.folds_focus:
if split == "train":
train, val = self.index_folds[number]
return train
elif split == "validation":
train, val = self.index_folds[number]
return val
else:
return self.test_folds
else:
print("cant do, need to focus folds with create_inner_fold")
def create_inner_fold(self, nber_splits, test_fold):
"""
Creates inner stratefied folds and focuses the dataset
on a given test fold number. Important to do before
running. Allows to load the model once for multiple
data configurations.
Parameters
----------
nber_splits: int,
number of inner folds for the training
test_fold: int,
fold number to remove before doing the inner fold.
"""
self.test_folds = self.f[self.f == test_fold].index
for_train = self.f[self.f != test_fold].index
skf = StratifiedKFold(n_splits=nber_splits, shuffle=True)
stratefied_variable = self.sv[self.f != test_fold]
obj = skf.split(for_train, stratefied_variable)
self.index_folds = [(for_train[train_index], for_train[val_index]) for train_index, val_index in obj]
self.folds_focus = True
def main():
import matplotlib.pylab as plt
path = "/mnt/data3/pnaylor/ProjectFabien/outputs/heat_maps_small_8/comp3"
labels_path = "/mnt/data3/pnaylor/ProjectFabien/outputs/multi_class.csv"
dgi = DataGenImage(path, labels_path, "RCB_class")
index = '500169'
x, y = dgi.cropped(index, (224, 224, 3))
dgi.return_fold("validation", 4)
dgi.create_inner_fold(5, 9)
dgi.return_fold("validation", 4)
import pdb; pdb.set_trace()
def test_crop():
import matplotlib.pylab as plt
path = "/mnt/data3/pnaylor/ProjectFabien/outputs/heat_maps_small_8/comp3"
labels_path = "/mnt/data3/pnaylor/ProjectFabien/outputs/multi_class.csv"
dgi = DataGenImage(path, labels_path, "RCB_class")
dgi.create_inner_fold(5, 9)
peaps = dgi.return_fold("train", 4)
for peap in peaps:
x, y = dgi.cropped(str(peap), (224, 224, 3))
print(x.shape)
peaps = dgi.return_fold("validation", 4)
for peap in peaps:
x, y = dgi.cropped(str(peap), (224, 224, 3))
print(x.shape)
peaps = dgi.return_fold("test", 4)
for peap in peaps:
x, y = dgi.cropped(str(peap), (224, 224, 3))
print(x.shape)
import pdb; pdb.set_trace()
def test_weight():
import matplotlib.pylab as plt
path = "/mnt/data3/pnaylor/ProjectFabien/outputs/heat_maps_small_8/comp3"
labels_path = "/mnt/data3/pnaylor/ProjectFabien/outputs/multi_class.csv"
dgi = DataGenImage(path, labels_path, "tumour_cells")
dgi.create_inner_fold(5, 9)
w = dgi.return_weights()
print(w)
if __name__ == '__main__':
# test_weight()
test_crop()
main()
| 2.734375 | 3 |
ddf_utils/chef/procedure/groupby.py | semio/ddf_utils | 2 | 12788954 | # -*- coding: utf-8 -*-
"""groupby procedure for recipes"""
import fnmatch
import logging
from typing import List
from .. helpers import debuggable, mkfunc
from .. model.ingredient import DataPointIngredient
from .. model.chef import Chef
logger = logging.getLogger('groupby')
@debuggable
def groupby(chef: Chef, ingredients: List[DataPointIngredient], result, **options) -> DataPointIngredient:
"""group ingredient data by column(s) and run aggregate function
.. highlight:: yaml
Procedure format:
::
procedure: groupby
ingredients: # list of ingredient id
- ingredient_id
result: str # new ingredient id
options:
groupby: str or list # column(s) to group
aggregate: dict # function block
transform: dict # function block
filter: dict # function block
The function block should have below format:
::
aggregate:
column1: func_name1
column2: func_name2
or
::
aggrgrate:
column1:
function: func_name
param1: foo
param2: baz
wildcard is supported in the column names. So ``aggreagte: {"*": "sum"}`` will run on every indicator in
the ingredient
Keyword Args
------------
groupby : `str` or `list`
the column(s) to group, can be a list or a string
insert_key : `dict`
manually insert keys in to result. This is useful when we want to add back the
aggregated column and set them to one value. For example ``geo: global`` inserts
the ``geo`` column with all values are "global"
aggregate
transform
filter : `dict`, optinoal
the function to run. only one of `aggregate`, `transform` and `filter` should be supplied.
Note
----
- Only one of ``aggregate``, ``transform`` or ``filter`` can be used in one procedure.
- Any columns not mentioned in groupby or functions are dropped.
"""
assert len(ingredients) == 1, "procedure only support 1 ingredient for now."
# ingredient = chef.dag.get_node(ingredients[0]).evaluate()
ingredient = ingredients[0]
logger.info("groupby: " + ingredient.id)
data = ingredient.get_data()
by = options.pop('groupby')
if 'insert_key' in options:
insert_key = options.pop('insert_key')
else:
insert_key = dict()
# only one of aggregate/transform/filter should be in options.
assert len(list(options.keys())) == 1
comp_type = list(options.keys())[0]
assert comp_type in ['aggregate', 'transform', 'filter']
if comp_type == 'aggregate': # only aggregate should change the key of ingredient
if isinstance(by, list):
newkey = ','.join(by)
else:
newkey = by
by = [by]
logger.debug("changing the key to: " + str(newkey))
else:
newkey = ingredient.key
by = [by]
newdata = dict()
for name_tmpl, func in options[comp_type].items():
func = mkfunc(func)
indicator_names = fnmatch.filter(data.keys(), name_tmpl)
for k in indicator_names:
df = data[k].compute()
if comp_type == 'aggregate':
newdata[k] = (df.groupby(by=by).agg({k: func})
.reset_index().dropna())
if comp_type == 'transform':
df = df.set_index(ingredient.key)
levels = [df.index.names.index(x) for x in by]
newdata[k] = (df.groupby(level=levels)[k].transform(func)
.reset_index().dropna())
if comp_type == 'filter':
df = df.set_index(ingredient.key)
levels = [df.index.names.index(x) for x in by]
newdata[k] = (df.groupby(level=levels)[k].filter(func)
.reset_index().dropna())
for col, val in insert_key.items():
newdata[k][col] = val
newkey = newkey + ',' + col
return DataPointIngredient.from_procedure_result(result, newkey, data_computed=newdata)
| 3.015625 | 3 |
Learning to Monitor Machine Health with Convolutional Bi-Directional LSTM Networks/main_test.py | cingtiye/Convolutional-Bi-Directional-LSTM-Networks-and-feature-based-gated-recurrent-unit-networks | 17 | 12788955 | # -*- coding: utf-8 -*-
import pickle
import numpy as np
from Conv_Bidrect_LSTM import CBLSTM
import tensorflow as tf
def load_data(normal_stat=False):
if normal_stat:
filepath = "./data/data_normal.p"
else:
filepath = "./data/data_seq.p"
with open(filepath, mode='rb') as f:
x = pickle.load(f, encoding='latin1')
return x[0], x[1], x[2], x[3] # retrun train_x, train_y, test_x, test_y
if __name__ == '__main__':
train_x, train_y, test_x, test_y = load_data()
print(train_x.shape)
print(train_y.shape)
l = 20 # time steps
d = 70 # data length
k = 50 # filter number
m = 4 # filter size
s = 2 # pool size
batch_size = 30 # batch size
train_x = train_x.reshape([-1, l, d, 1])
test_x = test_x.reshape([-1, l, d, 1])
model = CBLSTM(MODEL_TYPE = 'Regression',
FILTER_NUM = k,
FILTER_SIZE = m,
POOL_SIZE = s,
INPUT_LENGTH = d,
TIME_STEP = l,
CELL_UNITS = [50, 100],
FULL_UNITS = [100, 200],
KEEP_PROB = 0.5,
OUTPUT_NUM = 1, )
model.train_model(train_x = train_x,
train_y = train_y,
test_x = test_x,
test_y = test_y,
batch_size = batch_size,
num_epochs = 100,
num_threads = 4, )
| 2.578125 | 3 |
tests/unit/console/parsers/test_getinfo_parser.py | antonku/ncssl_api_client | 8 | 12788956 | <gh_stars>1-10
import unittest
import argparse
from ncssl_api_client.console.parsers.getinfo_parser import GetInfoParser
class GetInfoParserTest(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.subparsers = self.parser.add_subparsers(help='Available commands:', dest='command')
GetInfoParser().add_parser(self.subparsers)
def test_success_flow(self):
arguments = self.parser.parse_args(['getinfo', '-id', '00000', '-rc'])
self.assertEqual(arguments.command, 'getinfo')
self.assertEqual(arguments.ReturnCertificate, True)
self.assertEqual(arguments.CertificateID, '00000')
def test_error_insufficient_args(self):
with self.assertRaises(SystemExit):
self.parser.parse_args(['getinfo'])
| 2.90625 | 3 |
mc_luigi/tools/__init__.py | constantinpape/mc_luigi | 0 | 12788957 | from .tools import config_logger, run_decorator, get_replace_slices
from .numpy_tools import get_unique_rows, find_matching_row_indices, find_matching_indices, replace_from_dict, cartesian
| 1.328125 | 1 |
letterCombinations.py | xiaochuan-cd/leetcode | 0 | 12788958 |
tb = ['abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']
class Solution:
def recursive(self, st, res):
if not st:
return res
if not res:
res = ['']
res2 = []
cs = st.pop()
for c in cs:
res2 += [c+x for x in res]
return self.recursive(st, res2)
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
st = [tb[int(x)-2] for x in str(digits)]
res = []
return self.recursive(st, res)
if __name__ == "__main__":
print(Solution().letterCombinations(23456789))
| 3.421875 | 3 |
exphydro/lumped/ExphydroParameters.py | sopanpatil/exp-hydro | 11 | 12788959 | #!/usr/bin/env python
# Programmer(s): <NAME>.
# This file is part of the 'exphydro.lumped' package.
from hydroutils import Parameter
######################################################################
class ExphydroParameters(object):
def __init__(self):
""" Each parameter set contains a random realisation of all six
EXP-HYDRO parameters as well as default values of Nash-Sutcliffe
and Kling-Gupta efficiencies
"""
self.f = Parameter(0, 0.1)
self.smax = Parameter(100.0, 1500.0)
self.qmax = Parameter(10.0, 50.0)
self.ddf = Parameter(0.0, 5.0)
self.mint = Parameter(-3.0, 0.0)
self.maxt = Parameter(0.0, 3.0)
self.objval = -9999 # This is the objective function value
# ----------------------------------------------------------------
def assignvalues(self, f, smax, qmax, ddf, mint, maxt):
""" This method is used to manually assign parameter values,
which are given by the user as input arguments.
"""
self.f.value = f
self.smax.value = smax
self.qmax.value = qmax
self.ddf.value = ddf
self.mint.value = mint
self.maxt.value = maxt
# ----------------------------------------------------------------
def updateparameters(self, param1, param2, w):
""" This method is used for PSO algorithm.
Each parameter in the model has to do the following
two things:
(1) Update its velocity
(2) Update its value
"""
# Update parameter velocities
self.f.updatevelocity(param1.f, param2.f, w)
self.ddf.updatevelocity(param1.ddf, param2.ddf, w)
self.smax.updatevelocity(param1.smax, param2.smax, w)
self.qmax.updatevelocity(param1.qmax, param2.qmax, w)
self.mint.updatevelocity(param1.mint, param2.mint, w)
self.maxt.updatevelocity(param1.maxt, param2.maxt, w)
# Update parameter values
self.f.updatevalue()
self.ddf.updatevalue()
self.smax.updatevalue()
self.qmax.updatevalue()
self.mint.updatevalue()
self.maxt.updatevalue()
######################################################################
| 3.015625 | 3 |
deepscm/experiments/medical/ukbb/sem_vi/conditional_sem.py | mobarakol/deepscm | 183 | 12788960 | import torch
import pyro
from pyro.nn import pyro_method
from pyro.distributions import Normal, Bernoulli, TransformedDistribution
from pyro.distributions.conditional import ConditionalTransformedDistribution
from deepscm.distributions.transforms.affine import ConditionalAffineTransform
from pyro.nn import DenseNN
from deepscm.experiments.medical.ukbb.sem_vi.base_sem_experiment import BaseVISEM, MODEL_REGISTRY
class ConditionalVISEM(BaseVISEM):
context_dim = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
# ventricle_volume flow
ventricle_volume_net = DenseNN(2, [8, 16], param_dims=[1, 1], nonlinearity=torch.nn.LeakyReLU(.1))
self.ventricle_volume_flow_components = ConditionalAffineTransform(context_nn=ventricle_volume_net, event_dim=0)
self.ventricle_volume_flow_transforms = [self.ventricle_volume_flow_components, self.ventricle_volume_flow_constraint_transforms]
# brain_volume flow
brain_volume_net = DenseNN(2, [8, 16], param_dims=[1, 1], nonlinearity=torch.nn.LeakyReLU(.1))
self.brain_volume_flow_components = ConditionalAffineTransform(context_nn=brain_volume_net, event_dim=0)
self.brain_volume_flow_transforms = [self.brain_volume_flow_components, self.brain_volume_flow_constraint_transforms]
@pyro_method
def pgm_model(self):
sex_dist = Bernoulli(logits=self.sex_logits).to_event(1)
_ = self.sex_logits
sex = pyro.sample('sex', sex_dist)
age_base_dist = Normal(self.age_base_loc, self.age_base_scale).to_event(1)
age_dist = TransformedDistribution(age_base_dist, self.age_flow_transforms)
age = pyro.sample('age', age_dist)
age_ = self.age_flow_constraint_transforms.inv(age)
# pseudo call to thickness_flow_transforms to register with pyro
_ = self.age_flow_components
brain_context = torch.cat([sex, age_], 1)
brain_volume_base_dist = Normal(self.brain_volume_base_loc, self.brain_volume_base_scale).to_event(1)
brain_volume_dist = ConditionalTransformedDistribution(brain_volume_base_dist, self.brain_volume_flow_transforms).condition(brain_context)
brain_volume = pyro.sample('brain_volume', brain_volume_dist)
# pseudo call to intensity_flow_transforms to register with pyro
_ = self.brain_volume_flow_components
brain_volume_ = self.brain_volume_flow_constraint_transforms.inv(brain_volume)
ventricle_context = torch.cat([age_, brain_volume_], 1)
ventricle_volume_base_dist = Normal(self.ventricle_volume_base_loc, self.ventricle_volume_base_scale).to_event(1)
ventricle_volume_dist = ConditionalTransformedDistribution(ventricle_volume_base_dist, self.ventricle_volume_flow_transforms).condition(ventricle_context) # noqa: E501
ventricle_volume = pyro.sample('ventricle_volume', ventricle_volume_dist)
# pseudo call to intensity_flow_transforms to register with pyro
_ = self.ventricle_volume_flow_components
return age, sex, ventricle_volume, brain_volume
@pyro_method
def model(self):
age, sex, ventricle_volume, brain_volume = self.pgm_model()
ventricle_volume_ = self.ventricle_volume_flow_constraint_transforms.inv(ventricle_volume)
brain_volume_ = self.brain_volume_flow_constraint_transforms.inv(brain_volume)
z = pyro.sample('z', Normal(self.z_loc, self.z_scale).to_event(1))
latent = torch.cat([z, ventricle_volume_, brain_volume_], 1)
x_dist = self._get_transformed_x_dist(latent)
x = pyro.sample('x', x_dist)
return x, z, age, sex, ventricle_volume, brain_volume
@pyro_method
def guide(self, x, age, sex, ventricle_volume, brain_volume):
with pyro.plate('observations', x.shape[0]):
hidden = self.encoder(x)
ventricle_volume_ = self.ventricle_volume_flow_constraint_transforms.inv(ventricle_volume)
brain_volume_ = self.brain_volume_flow_constraint_transforms.inv(brain_volume)
hidden = torch.cat([hidden, ventricle_volume_, brain_volume_], 1)
latent_dist = self.latent_encoder.predict(hidden)
z = pyro.sample('z', latent_dist)
return z
MODEL_REGISTRY[ConditionalVISEM.__name__] = ConditionalVISEM
| 2.0625 | 2 |
src/destination/abstract_classes/__init__.py | tomfran/lastfm-project | 1 | 12788961 | <gh_stars>1-10
from .abstract_destination import AbstractDestination
| 1.085938 | 1 |
configs/train_config.py | Open-Speech-EkStep/speech_music_classification | 0 | 12788962 | <gh_stars>0
from dataclasses import dataclass
@dataclass
class SpectConfig:
samp_rate : int = 16000 # Sampling rate for extracting spectrogram features
n_fft : int = 512 # Length of the windowed signal after padding
win_dur : float = 0.025 # Window size in seconds
win_stride : float = 0.01 # Window stride in seconds
@dataclass
class TrainConfig:
speech_folder_path : str = ''
songs_folder_path : str = ''
lr : float = 0.0001
batch_size : int = 16
num_epochs : int = 50 | 2.34375 | 2 |
Edurights/UI/urls.py | AnshShrivastava/EduRights | 0 | 12788963 | <filename>Edurights/UI/urls.py
from django.urls import path
from . import views
urlpatterns = [
path("",views.home,name="Home"),
path("home",views.home,name="Home"),
path("test",views.test,name="test"),
path("newrequest",views.newrequest,name="Requests"),
path("about",views.about_us,name="About Us"),
path("contact",views.leads,name="Contact Us"),
path("institutes",views.institutes,name="Institutes"),
path("vloggers",views.vloggers,name="Vloggers"),
path("vlogs",views.vlogs,name="Vlogs"),
path("vloglist",views.vloglist,name="Vlogs"),
path("layout",views.layout,name="layout"),
# path('search1',views.search.as_view(),name='Search'),
path('college', views.college, name="College"),
path('thank', views.thank, name="thank"),
path('addreview', views.addreview, name="add")
] | 1.945313 | 2 |
chmp/src/chmp/bayes/__init__.py | chmp/misc-exp | 6 | 12788964 | """Helpers for Bayesian Modelling.
"""
import inspect
class NoOpContext:
def __enter__(self):
return self
def __exit__(self, exc, exc_type, traceback):
pass
class Model(NoOpContext):
"""Definition of a model
Usage::
with bayes.Model() as model:
@model.observe
def _(s):
s.x = tf.placeholder(dtype=floatx, shape=[None, 1], name='x')
s.y = tf.placeholder(dtype=floatx, shape=[None], name='y')
@model.define
def _(s, lam=0.5):
s.p.w = tf.distributions.Normal(loc=0.0, scale=1.0 / lam)
s.p.y = tf.distributions.Normal(loc=tf.squeeze(s.x @ s.w[:, None]), scale=1.0)
@model.inference
def _(s):
_, n_features = get_shape(s.x)
s.q.w = tf.distributions.Normal(
loc=tf.get_variable('w_loc', shape=[n_features], dtype=floatx),
scale=tf.nn.softplus(tf.get_variable('w_scale', shape=[n_features], dtype=floatx)),
)
"""
def __init__(self):
self._scope = {"observed": {}}
self._observed = None
self._definition = None
self._inference = None
self._built = False
def observe(self, func):
self._observed = func
return func
def define(self, func):
self._definition = func
return func
def inference(self, func):
self._inference = func
return func
# TODO: deprecate
def __getitem__(self, key):
self._ensure_observed()
if isinstance(key, tuple):
return tuple(self._scope["observed"][k] for k in key)
return self._scope["observed"][key]
def get(self, *args, **kwargs):
kwargs.setdefault("ensure_loss", "loss" in args)
scope = self.build(**kwargs)
res = []
for k in args:
if k == "loss":
res.append(scope["loss"])
elif k in scope["observed"]:
res.append(scope["observed"][k])
elif k in scope["latent"]:
res.append(scope["latent"][k])
else:
raise KeyError(f"cannot get {k}")
return res[0] if len(res) == 1 else tuple(res)
def build(self, scope=None, latent_strategy=None, ensure_loss=True):
import tensorflow as tf
if scope is None:
scope = {}
if latent_strategy is None:
latent_strategy = sample_latent
self._ensure_observed()
scope = dict(self._scope, **scope)
scope = Scope(scope, latent_strategy=latent_strategy)
with tf.variable_scope("inference", reuse=tf.AUTO_REUSE):
self._inference(scope)
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
scope._scope["loss"] = self._definition(scope)
if ensure_loss and scope._scope["loss"] is None:
scope._scope["loss"] = _build_kl_loss(scope._scope)
return scope.get()
def _ensure_observed(self):
if self._built:
return
if self._observed is None:
self._built = True
return
with DictWrapper(self._scope["observed"]) as s:
self._observed(s)
self._built = True
class DictWrapper(NoOpContext):
def __init__(self, d):
super().__setattr__("_target", d)
def __setattr__(self, k, v):
self._target[k] = v
def __getattr__(self, k):
try:
return self._target[k]
except KeyError:
raise AttributeError(k)
class Scope:
def __init__(self, scope, latent_strategy=None):
if latent_strategy is None:
latent_strategy = sample_latent
self._scope = dict(scope)
self._scope.setdefault("q", {})
self._scope.setdefault("p", {})
self._scope.setdefault("latent", {})
self._latent_strategy = latent_strategy
def __getattr__(self, key):
scope = self._scope
if key in scope["latent"]:
return scope["latent"][key]
if key in scope["observed"]:
return scope["observed"][key]
if key in scope["q"]:
self._latent_strategy(scope, key)
return scope["latent"][key]
raise AttributeError(key)
def get(self):
return self._scope
@property
def p(self):
return DictWrapper(self._scope["p"])
@property
def q(self):
return DictWrapper(self._scope["q"])
def build(model, *defs, latent_strategy=None):
scope = model.build(latent_strategy=latent_strategy)
if not defs:
return scope
res = []
for f in defs:
spec = inspect.getfullargspec(f)
# TODO: raise error for unsupported features
args = [_lookup_dist(scope, arg) for arg in spec.args]
res.append(f(*args))
if len(defs) == 1:
return res[0]
return tuple(res)
def _lookup_dist(scope, k):
return scope["p"][k] if k in scope["observed"] else scope["q"][k]
def sample_prior(scope, key):
scope["latent"][key] = scope["p"][key].sample()
def sample_latent(scope, key):
scope["latent"][key] = scope["q"][key].sample()
def sample_latent_no_grad(scope, key):
import tensorflow as tf
scope["latent"][key] = tf.stop_gradient(scope["q"][key].sample())
def average_latent(scope, key):
scope["latent"][key] = scope["q"][key].mean()
def build_reparam_loss(model):
import tensorflow as tf
# TODO: raise warning if non-re-parametrizable
scope = (
model if isinstance(model, dict) else model.build(latent_strategy=sample_latent)
)
loss = tf.reduce_mean(scope["loss"])
return loss, loss
def build_score_loss(model, var_list=None):
import tensorflow as tf
scope = (
model
if isinstance(model, dict)
else model.build(latent_strategy=sample_latent_no_grad)
)
if var_list is None:
var_list = tf.trainable_variables()
grad_q = 0
for k, q in scope["q"].items():
v = scope["latent"][k]
grad_q += q.log_prob(v)
return (
tf.reduce_mean(scope["loss"]),
tf.reduce_mean(scope["loss"] + tf.stop_gradient(scope["loss"]) * grad_q),
)
def relax_bernoulli(p, temperature=1.0):
"""Create a relaxed sample from a Bernoulli distribution.
:param tf.distributions.Bernoulli p:
the bernoulli distribution from which to sample
:param float temperature:
the temperature used for relaxed quantities
:returns:
a triple of sampled variable, relaxed variable and relaxed variable
conditioned on the non-relaxed variable.
"""
import tensorflow as tf
u = tf.random_uniform(tf.shape(p.probs))
z = tf.log(p.probs / (1.0 - p.probs)) + tf.log(u / (1.0 - u))
b = tf.cast(z > 0, dtype=z.dtype)
b = tf.stop_gradient(b)
b_relaxed = tf.sigmoid(z / temperature)
nu = tf.random_uniform(tf.shape(b))
nu_cond = (nu * (1 - p.probs)) * (1 - b) + (1 - p.probs * nu) * b
z_cond = tf.log(p.probs / (1.0 - p.probs)) + tf.log(nu_cond / (1.0 - nu_cond))
b_cond_relaxed = tf.sigmoid(z_cond / temperature)
return b, b_relaxed, b_cond_relaxed
def relax_categorical(p, temperature=1.0):
"""Create a relaxed sample from a OneHotCategorical distribution.
:param Union[tf.distributions.Mutltinomial,tf.contrib.distributions.OneHotCategorical] p:
the categorical distribution from which to sample. If specified as a
Multinomial, the total count has to be 1.
:param float temperature:
the temperature used for relaxed quantities
:returns:
a triple of sampled variable, relaxed variable and relaxed variable
conditioned on the non-relaxed variable.
"""
import tensorflow as tf
if isinstance(p, tf.distributions.Multinomial):
control_deps = [
tf.assert_equal(p.total_count, 1.0, message="can only relax categoricals")
]
event_size = tf.shape(p.probs)[-1]
else:
control_deps = []
event_size = p.event_size
z = tf.log(p.probs) - tf.log(-tf.log(tf.random_uniform(tf.shape(p.probs))))
b = tf.argmax(z, axis=-1)
b = tf.one_hot(b, event_size)
with tf.control_dependencies(control_deps):
b = tf.stop_gradient(b)
b_relaxed = tf.nn.softmax(z / temperature)
alpha = (1.0 - p.probs) / p.probs
theta_b = tf.reduce_sum(p.probs * b, keep_dims=True, axis=-1)
u_i_exp = 1 - b
u_b_exp = b + (1 - b) * p.probs / theta_b
u_b = tf.random_uniform(tf.shape(p.probs)) ** (1.0 / (1.0 + alpha))
u_b = tf.reduce_sum(u_b * b, keep_dims=True, axis=-1)
u_i = tf.random_uniform(tf.shape(p.probs))
u_cond = (u_i ** u_i_exp) * (u_b ** u_b_exp)
z_cond = tf.log(p.probs) - tf.log(-tf.log(u_cond))
b_cond_relaxed = tf.nn.softmax(z_cond / temperature)
return b, b_relaxed, b_cond_relaxed
def build_relax_loss(model):
"""Build the RELAX loss.
Described in <NAME> al., "Backpropagation through the Void:
Optimizing control variates for black-box gradient estimation", 2017,
found at ``https://arxiv.org/abs/1711.00123``.
:param Model model:
the model to build the REBAR loss for
:returns:
a pair of loss and train loss
"""
import tensorflow as tf
scope = model.build(latent_strategy=relax_latent_strategy)
scope_cond_relaxed = dict(
scope, latent=scope["latent_cond_relaxed"].copy(), p={}, loss=None
)
scope_cond_relaxed = model.build(
scope=scope_cond_relaxed, latent_strategy=raise_latent_strategy
)
scope_relaxed = dict(scope, latent=scope["latent_relaxed"].copy(), p={}, loss=None)
scope_relaxed = model.build(
scope=scope_relaxed, latent_strategy=raise_latent_strategy
)
grad_q = 0
for k, q in scope["q"].items():
v = scope["latent"][k]
grad_q += q.log_prob(v)
loss = (
scope["loss"]
+ tf.stop_gradient(scope["loss"] - scope_cond_relaxed["loss"]) * grad_q
+ scope_relaxed["loss"]
- scope_cond_relaxed["loss"]
)
return tf.reduce_mean(scope["loss"]), tf.reduce_mean(loss)
def relax_latent_strategy(scope, key):
import tensorflow as tf
p = scope["q"][key]
if isinstance(p, tf.distributions.Bernoulli):
v, v_relaxed, v_cond_relaxed = relax_bernoulli(p)
elif isinstance(p, tf.distributions.Multinomial):
v, v_relaxed, v_cond_relaxed = relax_categorical(p)
elif isinstance(p, tf.contrib.distributions.OneHotCategorical):
v, v_relaxed, v_cond_relaxed = relax_categorical(p)
elif isinstance(p, tf.distributions.Categorical):
raise NotImplementedError(
"use Multinomial with total_count = 1 or OneHotCategorical"
)
else:
v = v_relaxed = v_cond_relaxed = p.sample()
v = tf.stop_gradient(v)
scope.setdefault("latent", {})[key] = v
scope.setdefault("latent_relaxed", {})[key] = v_relaxed
scope.setdefault("latent_cond_relaxed", {})[key] = v_cond_relaxed
def raise_latent_strategy(scope, key):
"""Raise for non-existing latent variables"""
raise RuntimeError(f"latent variable {key} does not exit")
def _build_kl_loss(scope):
loss = 0
for k, p in scope["p"].items():
if k in scope["latent"]:
v = scope["latent"][k]
else:
v = scope["observed"][k]
loss += p.log_prob(v)
for q in scope["q"].values():
loss += q.entropy()
return -loss
| 2.984375 | 3 |
tdb/app.py | hotdrink7/tdb | 0 | 12788965 | from base64 import b64encode
from ipykernel.comm import Comm
from IPython import get_ipython
import io
from io import BytesIO
import urllib.request, urllib.parse, urllib.error
_comm=None
def is_notebook():
iPython=get_ipython()
if iPython is None or not iPython.config:
return False
return 'IPKernelApp' in iPython.config
def connect():
"""
establish connection to frontend notebook
"""
if not is_notebook():
print('Python session is not running in a Notebook Kernel')
return
global _comm
kernel=get_ipython().kernel
kernel.comm_manager.register_target('tdb',handle_comm_opened)
# initiate connection to frontend.
_comm=Comm(target_name='tdb',data={})
# bind recv handler
_comm.on_msg(None)
def send_action(action, params=None):
"""
helper method for sending actions
"""
data={"msg_type":"action", "action":action}
if params is not None:
data['params']=params
_comm.send(data)
def send_fig(fig,name):
"""
sends figure to frontend
"""
imgdata = BytesIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
uri = 'data:image/png;base64,' + urllib.parse.quote(b64encode(imgdata.getvalue()))
send_action("update_plot",params={"src":uri, "name":name})
# handler messages
def handle_comm_opened(msg):
# this won't appear in the notebook
print('comm opened')
print(msg) | 2.421875 | 2 |
pyrt/ray.py | sdeu/pyrt | 0 | 12788966 | <filename>pyrt/ray.py
from dataclasses import dataclass
from pyrt.point import Point3
from pyrt.vec3 import Vec3
@dataclass
class Ray:
origin: Point3
direction: Vec3
def point_at(self, t):
return self.origin + (t * self.direction)
def __str__(self):
return f'{self.origin} + t*{self.direction}'
| 2.96875 | 3 |
coveo-stew/coveo_stew/utils.py | coveooss/coveo-python-oss | 7 | 12788967 | from pathlib import Path
from typing import MutableMapping, Any
from coveo_styles.styles import ExitWithFailure
import toml
from toml import TomlDecodeError
def load_toml_from_path(toml_path: Path) -> MutableMapping[str, Any]:
"""Loads a toml from path or raise ExitWithFailure on failure."""
return _load_toml_from_content(toml_path.read_text(), toml_path)
def _load_toml_from_content(toml_content: str, toml_path: Path) -> MutableMapping[str, Any]:
try:
return toml.loads(toml_content)
except TomlDecodeError as ex:
lineno, colno = ex.lineno, ex.colno # type: ignore
raise ExitWithFailure(suggestions=f"{toml_path}:{lineno}:{colno} parse error") from ex
| 2.328125 | 2 |
chess/parser.py | victor-rene/vacc | 0 | 12788968 | from vector import Vector
from movement import movements
def dest_rank(text):
i = len(text) -1
while i >= 0:
if text[i].isdigit():
return ord(text[i]) - 48 - 1
else: i -= 1
raise Exception("No number found in " + text + ".")
def dest_file(text):
i = len(text) - 1
while i >= 0:
if text[i].islower():
return ord(text[i]) - 96 - 1
else: i -= 1
raise Exception("No lowercase char found in " + text + ".")
def convert_file(c):
return ord(c) - 96 - 1
def convert_rank(c):
return ord(c) - 48 - 1
def search_pieces(board, type, file=None, rank=None):
squares = []
file_range = []
rank_range = []
if file != None:
file_range = [file]
else: file_range = range(8)
if rank != None:
rank_range = [rank]
else: rank_range = range(8)
for rank in rank_range:
for file in file_range:
if board.squares[file][rank] == type:
squares.append([file, rank]);
return squares
def clear_path(board, orig, dest, vector):
square = orig[:]
i = 1
while i < 8: # safety measure to prevent infinite loop
square[0] += vector.dx
square[1] += vector.dy
if square[0] == dest[0] and square[1] == dest[1]:
return True
if board.squares[square[0]][square[1]] == ' ':
i += 1
else: return False
def origin_hint(move):
offset = 1 if 'x' in move else 0 # capture
if len(move) < (4 + offset) or move[2 + offset].isdigit():
return None
else: return move[1]
# //http://en.wikipedia.org/wiki/Portable_Game_Notation
def read_algebraic(board, move):
input = None
if move[0] == 'O': # castle
if board.side_to_move == -1:
if move == "O-O": # short
board.squares[5][7] = 'r'
board.squares[6][7] = 'k'
board.squares[7][7] = ' '
board.squares[4][7] = ' '
input = ([4, 7], [6, 7])
elif move == "O-O-O": # long
board.squares[3][7] = 'r'
board.squares[2][7] = 'k'
board.squares[0][7] = ' '
board.squares[4][7] = ' '
input = ([4, 7], [2, 7])
else:
if move == "O-O": # short
board.squares[5][0] = 'R'
board.squares[6][0] = 'K'
board.squares[7][0] = ' '
board.squares[4][0] = ' '
input = ([4, 0], [6, 0])
elif move == "O-O-O": # long
board.squares[3][0] = 'R'
board.squares[2][0] = 'K'
board.squares[0][0] = ' '
board.squares[4][0] = ' '
input = ([4, 0], [2, 0])
else: # not castle
# if 'x' in move: # capture
# captured = self.squares[destfile][destrank]
# if captured == None:
# raise Exception("No piece to capture on " + dest.ToString() + ".")
# self.squares[destfile][destrank] = ' '
destrank = dest_rank(move)
destfile = dest_file(move)
origfile = None
origrank = None
if move[0].islower(): # pawn move
pawns = None
origfile = convert_file(move[0])
if board.side_to_move == 1:
pawns = search_pieces(board, 'P', file=origfile)
else: pawns = search_pieces(board, 'p', file=origfile)
if move[1] == 'x': # capture
origrank = destrank - board.side_to_move
board.squares[destfile][destrank] = board.squares[origfile][origrank]
board.squares[origfile][origrank] = ' '
input = ([origfile, origrank], [destfile, destrank])
else: # not a capture
if len(pawns) == 1: # only one pawn in file
origfile = pawns[0][0]
origrank = pawns[0][1]
board.squares[destfile][destrank] = board.squares[origfile][origrank]
board.squares[origfile][origrank] = ' '
input = ([origfile, origrank], [destfile, destrank])
else: # find pawn closest to destination
i = 1
while i < 8:
origrank = destrank - i * board.side_to_move
if board.squares[origfile][origrank] != ' ':
break
i += 1
board.squares[destfile][destrank] = board.squares[origfile][origrank]
board.squares[origfile][origrank] = ' '
input = ([origfile, origrank], [destfile, destrank])
if move.find('=') != -1: # promotion
pos = move.index('=')
if board.side_to_move == 1:
board.squares[destfile][destrank] = move[pos+1]
else: board.squares[destfile][destrank] = move[pos+1].lower()
else: # piece move
if board.side_to_move == 1:
squares = search_pieces(board, move[0])
else: squares = search_pieces(board, move[0].lower())
if len(squares) == 1: # only one piece
origfile = squares[0][0]
origrank = squares[0][1]
else: # find origin square
orig = None
hint = origin_hint(move)
if not hint: # only one candidate piece
for file, rank in squares:
vector = Vector.create(file, rank, destfile, destrank)
squares = search_pieces(board, move[0])
piece = board.squares[file][rank]
if movements[piece].is_sliding:
vector.normalize()
for v in movements[piece].vectors:
# print move, board.side_to_move, '.', v.dx, v.dy, '|', vector.dx, vector.dy
if v == vector and clear_path(board, [file, rank], [destfile, destrank], v):
orig = [file, rank]
break
else: # several candidates pieces
if hint.isdigit(): # hint is rank
for square in squares:
if square[1] == convert_rank(hint):
orig = square
break
else: # hint is file
for square in squares:
if square[0] == convert_file(hint):
orig = square
break
origfile = orig[0]
origrank = orig[1]
board.squares[destfile][destrank] = board.squares[origfile][origrank]
board.squares[origfile][origrank] = ' '
input = ([origfile, origrank], [destfile, destrank])
board.switch_turn()
# print move, board.to_fen()
return input
| 3.625 | 4 |
samochat/__init__.py | SamoChat/samochat-python | 1 | 12788969 | <reponame>SamoChat/samochat-python
# Authorization
from samochat.auth import OAuthHandler
# API data
from samochat.client import SamochatData
# global variables
client_id = None
client_secret = None
__base_url__ = "https://api.samochat.net/" | 1.578125 | 2 |
yard/skills/00-web/django_demo/dss/urls.py | paser4se/bbxyard | 1 | 12788970 | <filename>yard/skills/00-web/django_demo/dss/urls.py<gh_stars>1-10
from com.adapter.urls import path, include
import dss.views
urlpatterns = [
path('do', dss.views.do),
]
| 1.414063 | 1 |
lorenz/tests/test_dataset.py | eryl/lorenz-timeseries | 0 | 12788971 | import unittest
import os
import matplotlib.pyplot as plt
import numpy as np
from lorenz.lorenz import make_dataset, plot_3d
import lorenz.dataset
class TestDataset(unittest.TestCase):
def setUp(self):
self.path = os.path.join(os.path.split(os.path.split(os.path.dirname(__file__))[0])[0], 'data', 'lorenz.h5')
def test_random_iterator_1d(self):
rng = np.random.RandomState(1729)
dataset = lorenz.dataset.Dataset(self.path, view='1d')
for b in dataset.random_iterator(4, 100):
plt.plot(np.linspace(0,1,b.shape[1]), b[:,:,0].T)
plt.show()
#plot_3d(b)
def test_random_iterator_3d(self):
rng = np.random.RandomState(1729)
dataset = lorenz.dataset.Dataset(self.path, view='3d')
for b in dataset.random_iterator(4, 100):
plot_3d(b)
| 2.9375 | 3 |
keras_maskrcnn/utils/overlap.py | akashdeepjassal/keras-maskrcnn | 432 | 12788972 | <filename>keras_maskrcnn/utils/overlap.py
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
def compute_overlap(a, b):
"""
Args
a: (N, H, W) ndarray of float
b: (K, H, W) ndarray of float
Returns
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
intersection = np.zeros((a.shape[0], b.shape[0]))
union = np.zeros((a.shape[0], b.shape[0]))
for index, mask in enumerate(a):
intersection[index, :] = np.sum(np.count_nonzero(b & mask, axis=1), axis=1)
union[index, :] = np.sum(np.count_nonzero(b + mask, axis=1), axis=1)
return intersection / union
| 2.65625 | 3 |
docker/output/Neo_Gene_EC_Map.py | PhilPalmer/onemetagenome | 1 | 12788973 | #!/usr/bin/env python
import os
import os.path
import sys
import shutil
Swiss_prot_map = sys.argv[1]
Prot_file = sys.argv[2]
Gene_EC_map = sys.argv[3]
mapping_dict = {}
with open(Swiss_prot_map, "r") as mapping:
for line in mapping:
line_as_list = line.strip("\n").split("\t")
mapping_dict[line_as_list[0]] = set(line_as_list[2:])
EC2Gene = {}
def db_hits(dmnd_out, EC_map):
with open(dmnd_out, "r") as diamond:
for line in diamond:
line_as_list = line.strip("\n").split("\t")
for EC in mapping_dict:
if line_as_list[1] in mapping_dict[EC]:
try:
EC_map[EC].append(line_as_list[0])
except:
EC_map[EC] = [line_as_list[0]]
db_hits(Prot_file, EC2Gene)
gene_count = 0
with open(Gene_EC_map, "w") as ec_out:
for EC in EC2Gene:
for Gene in EC2Gene[EC]:
print EC+"\n"
ec_out.write(Gene + "\t" + EC +"\n")
gene_count += 1
print str(gene_count) + " proteins were mapped with mmseqs to " + str(len(EC2Gene)) + " unique enzyme functions."
| 2.953125 | 3 |
q2.py | Utd04/Kannada-Digit-Classification-using-Neural-Networks | 0 | 12788974 | <reponame>Utd04/Kannada-Digit-Classification-using-Neural-Networks
import sys
import pandas as pd
import numpy as np
import math
import time
import random
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
def sigmoid_activation(a):
a1 = np.multiply(a >= 0, a)
a2 = np.multiply(a < 0, a)
return np.add(1/(1+np.exp(-a1)), np.divide(np.exp(a2), (1+np.exp(a2)))) - 0.5
def sigmoid_derivative(a):
return np.multiply(sigmoid_activation(a), 1-sigmoid_activation(a))
def relu_activation(a):
return np.multiply(a > 0, a)
def relu_derivative(a):
return np.multiply(a > 0, np.ones(a.shape, dtype=float))
def RandomInit(layerinfo):
np.random.seed(1)
modelParameters = {}
for l in range(1, len(layerinfo)):
# can use np.random.rand() too
sizeOfMatrix = (layerinfo[l],layerinfo[l-1])
modelParameters["Weight"+str(l)] = np.random.normal(0, 1, sizeOfMatrix)*np.sqrt(2.0/layerinfo[l-1])
sizeOfMatrix = (layerinfo[l], 1)
modelParameters["Bias"+str(l)] = np.zeros(sizeOfMatrix, dtype=float)
return modelParameters
def prediction(modelParameters, data_x, activationFunction):
forward_pass = {}
x = np.transpose(data_x)
for i in range((int)(len(modelParameters)/2)):
x = np.dot(modelParameters["Weight"+str(i+1)], x) + modelParameters["Bias"+str(i+1)]
forward_pass["z"+str(i+1)] = x
x = sigmoid_activation(x)
forward_pass["a"+str(i+1)] = x
output = np.exp(forward_pass["a"+str((int)(len(modelParameters)/2))])
summer = np.sum(output, axis=0)
output = np.divide(output, summer)
return np.argmax(output, axis=0)
def onehot(y):
ret = []
for i in range(10):
if(y==i):
ret.append(1)
else:
ret.append(0)
return np.array(ret)
def gereralNeuralNetwork(trXpath,trYpath,teXpath,outputfile,batchSize,LayerStringInputArgument,activationFunction,adaptive):
hiddenLayer = (LayerStringInputArgument.split())
temporarylist = []
for i in hiddenLayer:
temporarylist.append((int(i)))
hiddenLayer = temporarylist
inputBoxes = 784
ouputBoxes = 10
learning_rate = 0.1
currerror = 10
Eps = 0.1
tolerance = 0.0001
CostStorage = []
if(adaptive):
learning_rate= 1
print("adaptive")
xTraining = np.load(trXpath)
yTraining = np.load(trYpath)
spvar = xTraining.shape
newx = np.zeros((spvar[0], spvar[1]*spvar[2]), dtype=int)
for i in range(spvar[0]):
newlist = []
for j in range(spvar[1]):
for k in range(spvar[2]):
newlist.append(xTraining[i][j][k])
newx[i] = np.array(newlist)
xTraining = newx
newy = np.zeros((yTraining.shape[0],10))
for i in range(yTraining.shape[0]):
newy[i] = onehot(yTraining[i])
yTraining = newy
# CONVERTING THE TEST X
xTest = np.load(teXpath)
spvar = xTest.shape
newx = np.zeros((spvar[0], spvar[1]*spvar[2]), dtype=int)
for i in range(spvar[0]):
newlist = []
for j in range(spvar[1]):
for k in range(spvar[2]):
newlist.append(xTest[i][j][k])
newx[i] = np.array(newlist)
xTest = newx
# TEST PURPOSE
# yTest = np.load("y_test.npy")
# newy = np.zeros((yTest.shape[0],10))
# for i in range(yTest.shape[0]):
# newy[i] = onehot(yTest[i])
# yTest = newy
xMatrix = np.asmatrix(xTraining)
yMatrix = np.asmatrix(yTraining)
xtestMatrix = np.asmatrix(xTest)
# ytestMatrix = np.asmatrix(yTest)
maxEpochs = 150
epochs = 0
m = len(xMatrix)
totalBatches = (int)(m/batchSize)
Boxes = [inputBoxes]
Boxes.extend(hiddenLayer)
Boxes.append(ouputBoxes)
modelParameters = RandomInit(Boxes)
while(True):
if(Eps > currerror):
break
if(epochs> maxEpochs):
break
currerror = 0
for batchIndex in range(totalBatches):
begin = batchIndex*batchSize
end = 0
if batchIndex == totalBatches-1:
end = m
else:
end = begin+batchSize
currentX = xMatrix[begin:end,:]
currentY = yMatrix[begin:end,:]
forwardValues = {}
layerCount =(len(modelParameters)//2)
xt = np.transpose(currentX)
forwardValues["a0"] = xt
for i in range(layerCount-1):
tempMat = modelParameters["Weight"+str(i+1)]
temp2= np.dot(tempMat, xt)
xt = temp2 + modelParameters["Bias"+str(i+1)]
forwardValues["z"+str(i+1)] = xt
if(activationFunction =="rlu"):
xt = relu_activation(xt)
else:
xt = sigmoid_activation(xt)
forwardValues["a"+str(i+1)] = xt
tempMat2 = modelParameters["Weight"+str(layerCount)]
temp3 = np.dot(tempMat2, xt)
xt = temp3 + modelParameters["Bias"+str(layerCount)]
forwardValues["z"+str(layerCount)] = xt
xt = sigmoid_activation(xt)
forwardValues["a"+str(layerCount)] = xt
trueOutput = np.transpose(currentY)
value0 = forwardValues["a"+str((len(modelParameters)//2))]
helpval = (forwardValues["a"+str((len(modelParameters)//2))] == 0)
value1 = np.multiply(1,helpval)
val = np.add(value0, value1)
loss0 = np.multiply(trueOutput, np.log(val))
value0 = 1 - forwardValues["a"+str((int)(len(modelParameters)/2))]
helpval = (forwardValues["a"+str((len(modelParameters)//2))] == 1)
value1 = np.multiply(1, helpval)
val = np.add(value0, value1)
loss1 = np.multiply(1-trueOutput, np.log(val))
loss = np.add(loss0, loss1)
loss = -1*loss
averageLoss = np.mean(loss, axis=1)
transposemat = np.transpose(averageLoss)
magnitude = np.dot(transposemat, averageLoss)[0, 0]
magnitude = np.sqrt(magnitude)
derivativeStorage= {}
finalParameters = {}
shapeVal = trueOutput.shape[1]
# # derivative for last layer of network
help0 = forwardValues["a"+str((len(modelParameters)//2))]
lastLayerDet = help0- trueOutput
derivativeStorage["der"+str((int)(len(modelParameters)/2))] = lastLayerDet
for i in range((len(modelParameters)//2) - 1, 0, -1):
if(activationFunction=="rlu"):
t0 = relu_derivative(forwardValues["z"+str(i)])
else:
t0 = sigmoid_derivative(forwardValues["z"+str(i)])
t2= derivativeStorage["der"+str(i+1)]
t1 = np.transpose(modelParameters["Weight"+str(i+1)])
f0 = np.dot(t1,t2)
lastLayerDet = np.multiply(f0, t0)
derivativeStorage["der"+str(i)] = lastLayerDet
for i in range(1, (len(modelParameters)//2) + 1):
t0 = forwardValues["a"+str(i-1)]
t1 = np.transpose(t0)
t2 = derivativeStorage["der"+str(i)]
f0 = np.dot(t2, t1)
val1 = (learning_rate/shapeVal)*f0
par1 = modelParameters["Weight"+str(i)]
finalParameters["Weight"+str(i)] = par1 - val1
p0 = np.sum(t2, axis=1)
val2 = (learning_rate/shapeVal)*p0
par2 = modelParameters["Bias"+str(i)]
finalParameters["Bias"+str(i)] = par2 - val2
modelParameters = finalParameters
currerror += ((float(magnitude))/(end-begin+1))
CostStorage.append(currerror)
epochs = epochs + 1
if(adaptive):
learning_rate = learning_rate/(math.sqrt(epochs))
# print("TRAINING DATA")
# trainingDataPerdiction =(prediction(modelParameters, xMatrix, activationFunction))
# trainingDataPerdiction = np.transpose(trainingDataPerdiction)
# a = (accuracy_score(np.argmax(yMatrix, axis=1), trainingDataPerdiction))
# print("TEST DATA")
testDataPrediction =(prediction(modelParameters, xtestMatrix, activationFunction))
testDataPrediction = np.transpose(testDataPrediction)
np.savetxt(outputfile, testDataPrediction, fmt="%d", delimiter="\n")
def partd(trXpath,trYpath,teXpath,outputfile):
xTraining = np.load(trXpath)
yTraining = np.load(trYpath)
xTest = np.load(teXpath)
spvar = xTraining.shape
newx = np.zeros((spvar[0], spvar[1]*spvar[2]), dtype=int)
for i in range(spvar[0]):
newlist = []
for j in range(spvar[1]):
for k in range(spvar[2]):
newlist.append(xTraining[i][j][k])
newx[i] = np.array(newlist)
xTraining = newx
clf = MLPClassifier(hidden_layer_sizes=(100,100),solver='sgd')
clf.fit(xTraining,yTraining)
# trainingDataPerdiction = clf.predict(xTraining)
# print(accuracy_score(yTraining, trainingDataPerdiction))
# predict over the test set...
spvar = xTest.shape
newx = np.zeros((spvar[0], spvar[1]*spvar[2]), dtype=int)
for i in range(spvar[0]):
newlist = []
for j in range(spvar[1]):
for k in range(spvar[2]):
newlist.append(xTraining[i][j][k])
newx[i] = np.array(newlist)
xTest = newx
DataPerdiction = clf.predict(xTraining)
def main():
trXpath = sys.argv[1]
trYpath = sys.argv[2]
teXpath = sys.argv[3]
outputfile = sys.argv[4]
batchSize = int(sys.argv[5])
LayerStringInputArgument = sys.argv[6]
activationFunction = sys.argv[7]
adaptive = False
gereralNeuralNetwork(trXpath,trYpath,teXpath,outputfile,batchSize,LayerStringInputArgument,activationFunction,adaptive)
# partb(trXpath,trYpath,teXpath,outputfile,batchSize,LayerStringInputArgument,activationFunction)
# partd(trXpath,trYpath,teXpath,outputfile)
# code to plot the graphs
# layermat = ["1", "10", "50", "100", "500"]
# trainArr = [0.0]*5
# testArr = [0.0]*5
# for i in range(5):
# print(i)
# (a,b)= gereralNeuralNetwork(trXpath,trYpath,teXpath,outputfile,batchSize,layermat[i],activationFunction,adaptive)
# trainArr[i] = a
# testArr[i] = b
# print(trainArr)
# print(testArr)
# plt.title("Accuracy vs Hidden Layer Units")
# plt.plot(layermat, trainArr, label = 'Training')
# plt.plot(layermat, testArr, label = 'Testing')
# plt.xlabel("Hidden Layer Units")
# plt.ylabel('Accuracy')
# plt.legend()
# plt.show()
if __name__ == "__main__":
main()
| 3.5 | 4 |
tests/cases/update_with.py | trivoldus28/gunpowder | 43 | 12788975 | <gh_stars>10-100
import numpy as np
from gunpowder import (
BatchProvider,
BatchFilter,
Array,
ArraySpec,
ArrayKey,
Graph,
GraphSpec,
GraphKey,
Batch,
BatchRequest,
Roi,
PipelineRequestError,
build,
)
import pytest
class ArrayTestSource(BatchProvider):
def __init__(self, key, spec):
default_spec = ArraySpec(
voxel_size=(1,) * spec.roi.dims(),
interpolatable=False,
nonspatial=False,
dtype=np.uint8,
)
default_spec.update_with(spec)
spec = default_spec
self.key = key
self.array = Array(
np.zeros(spec.roi.get_shape() / spec.voxel_size, dtype=spec.dtype,),
spec=spec,
)
def setup(self):
self.provides(self.key, self.array.spec)
def provide(self, request):
batch = Batch()
roi = request[self.key].roi
batch[self.key] = self.array.crop(roi)
return batch
class RequiresSpec(BatchFilter):
def __init__(self, key, required_spec):
self.key = key
self.required_spec = required_spec
def setup(self):
self.updates(self.key, self.spec[self.key].copy())
def prepare(self, request):
deps = BatchRequest()
deps[self.key] = self.required_spec
return deps
def process(self, batch, request):
return batch
@pytest.mark.parametrize("request_dtype", [np.uint8, np.int64, np.float32])
def test_require_dtype(request_dtype):
dtypes = [
np.uint8,
np.uint16,
np.uint32,
np.int32,
np.int64,
np.float32,
np.float64,
]
array = ArrayKey("ARRAY")
roi = Roi((0, 0), (10, 10))
for dtype in dtypes:
source = ArrayTestSource(array, ArraySpec(roi=roi, dtype=dtype))
pipeline = source + RequiresSpec(array, ArraySpec(roi=roi, dtype=request_dtype))
with build(pipeline):
batch_request = BatchRequest()
batch_request[array] = ArraySpec(roi)
if dtype == request_dtype:
pipeline.request_batch(batch_request)
else:
with pytest.raises(PipelineRequestError):
pipeline.request_batch(batch_request)
@pytest.mark.parametrize("request_voxel_size", [(1, 1), (2, 2)])
def test_require_voxel_size(request_voxel_size):
voxel_sizes = [
(1, 1),
(4, 4),
(6, 6),
]
array = ArrayKey("ARRAY")
roi = Roi((0, 0), (12, 12))
for voxel_size in voxel_sizes:
source = ArrayTestSource(array, ArraySpec(roi=roi, voxel_size=voxel_size))
pipeline = source + RequiresSpec(
array, ArraySpec(roi=roi, voxel_size=request_voxel_size)
)
with build(pipeline):
batch_request = BatchRequest()
batch_request[array] = ArraySpec(roi)
if voxel_size == request_voxel_size:
pipeline.request_batch(batch_request)
else:
with pytest.raises(PipelineRequestError):
pipeline.request_batch(batch_request)
class GraphTestSource(BatchProvider):
def __init__(self, key, spec):
default_spec = GraphSpec(directed=True)
default_spec.update_with(spec)
spec = default_spec
self.key = key
self.graph = Graph([], [], spec=spec,)
def setup(self):
self.provides(self.key, self.graph.spec)
def provide(self, request):
batch = Batch()
roi = request[self.key].roi
batch[self.key] = self.graph.crop(roi)
return batch
@pytest.mark.parametrize("requested_directed", [True, False])
def test_require_directed(requested_directed):
directed_options = [True, False]
graph = GraphKey("GRAPH")
roi = Roi((0, 0), (10, 10))
for provided_directed in directed_options:
source = GraphTestSource(graph, GraphSpec(roi=roi, directed=provided_directed))
pipeline = source + RequiresSpec(
graph, GraphSpec(roi=roi, directed=requested_directed)
)
with build(pipeline):
batch_request = BatchRequest()
batch_request[graph] = GraphSpec(roi)
if provided_directed == requested_directed:
pipeline.request_batch(batch_request)
else:
with pytest.raises(PipelineRequestError):
pipeline.request_batch(batch_request)
| 2.203125 | 2 |
Python/43. MultiplyStrings.py | nizD/LeetCode-Solutions | 263 | 12788976 | """
Leetcode's Medium challege #43 - Multiply Strings (Solution)
<https://leetcode.com/problems/multiply-strings/>
Description:
Given two non-negative integers num1 and num2
represented as strings, return the product of num1 and num2,
also represented as a string.
EXAMPLE:
Input: num1 = "2", num2 = "3"
Output: "6"
Author: <Curiouspaul1>
github: https://github.com/Curiouspaul1
"""
def int_(s):
"""
Converts strings to int, raises exception
for non-int literals
"""
reslt = 0
for i in s:
if ord(i) in range(48,58): # checks that string character is something in [0-9]
reslt = reslt*10 + (ord(i) - ord('0'))
else:
raise ValueError
return reslt
class Solution:
def multiply(self, num1: str, num2: str) -> str:
if len(num1) >= 110 or len(num2) >= 110: # constraints from leetcode
return 0
try:
num1, num2 = int_(num1), int_(num2)
result = num1 * num2
return str(result)
except ValueError:
print("Invalid Entry")
return 0 | 3.921875 | 4 |
src/engr16x/piTalk/computer.py | engr16x/engr16x-library | 0 | 12788977 | # Library file on the Computer.
# Must be in the same directory as any file using it's functions.
import socket
import struct
import sys
from threading import Thread, Event
from binascii import crc_hqx
class CompTalk:
def __init__(self, host):
# Variables that define the communication
self.buffer = 1024
self.CRCValue = 0x61
# The __init__ mainly searches for and establishes the connection
port = 12345 # Arbitrary, will be reassigned by the connection.
print('Attempting to connect using ', host)
try:
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.bind((host, port))
except:
sys.exit('Client IP Address was not valid. Check that the correct IP address was entered')
try:
print('Waiting for connection from host')
soc.listen(1)
self.conn, addr = soc.accept()
except:
print('Conneciton request timed out.')
print('Connected by ', addr[0])
print('Press [ctrl + C] on Pi to stop\n')
self.dataStream = []
def _flatten( self, array):
# Takes a multi-dimensional array and flattens it to 1 dimension
return sum( ([x] if not isinstance(x, list) else self._flatten(x) for x in array), [] )
def _convert2list( self, list):
# Unpacks and structures the sent data into a list of the correct number of rows/columns/dimensions
dim = []
# Extract the dimensions of the array
# Format: [Number of Dimensions, Length of Dim1, Length of Dim2, ...Length of DimN, ...Data to be unpacked...]
dimLength = list[0]
for i in range(dimLength):
# Add 1 to skip the first element which defines dim length
dim.append(list[i + 1])
values = list[dimLength+1:]
# Define an interator and build structure the remaining data based on the dimensions extracted
self._iterator = 0
return self._recursiveBuild( dim, values)
def _recursiveBuild( self, dimensions, data):
final = []
# If there's still remaining dimensions, must continue unpacking
if (len(dimensions) > 1):
for i in range(dimensions[0]):
final.append(self._recursiveBuild( dimensions[1:], data))
# If you have all the dimensions, begin building the data
else:
self._iterator += dimensions[0]
return data[self._iterator-dimensions[0]:self._iterator]
# Once finished, return the resulting array
return final
def _unpackFmt( self, data):
# Unpacks the format string for a packet
fmtString = ""
numPackets = struct.unpack("I", data[:4])[0]
# Wait to recieve all of the packets
while(numPackets > 1):
d = self._recvAndCheck()
if not data: return 0
data = data + d
numPackets -= 1
# combine the data into one string
for i in range(4, len(data)):
fmtString = str(fmtString + chr(data[i]))
# Comma's will denote new packets, so split based on those
return fmtString.split(',')
def _unpackData( self, formatStr, data):
# Unpacks the recieved raw data based on the format string
dataSize = { 'i':4, 'f':4, 's':1, '?':1 }
numPackets = len(formatStr)
content = []
p = 0 # Packet number
d = 0
while(p < numPackets):
length = 0
firstElement = True
isList = False
isString = False
i = 0 # index in format string
d = 0 # index in data recieved
# Iterate through all expected packets
while (i < len(formatStr[p])):
# Since anayzed 1 digit at a time, this accounts for 2+ digit numbers
if (formatStr[p][i] == '-'):
break
if (formatStr[p][i] == '0'):
break
if (formatStr[p][i].isdigit()):
length = 10 * length + int(formatStr[p][i])
isList = True
# If not a digit then a data type was identified and something needs to be unpacked
else:
if (length == 0):
length = 1
if (formatStr[p][i] == 's'):
isString = True
string = ''
# append all of the characters for this entry to 1 string variable
for temp in range(length):
string = str(string + chr(data[p][d]))
d += 1 # move to next data entry
if (isList and firstElement and (formatStr[p-1][-1] == '-')):
content[-1] = str(content[-1] + string)
else:
content.append(string)
else:
# Append the next length of data to the resulting content
for temp in range(length):
content.append( struct.unpack(formatStr[p][i], data[p][d:(d+dataSize[formatStr[p][i]])])[0])
d += dataSize[formatStr[p][i]]
length = 0
firstElement = False
i += 1
p += 1
if (isList):
final = self._convert2list(content)
elif isString:
final = ''
for t in content:
final += t
else:
final = content[0]
return final
def _recvAndCheck( self):
# Check's the sync byte to make sure the packet was fully recieved.
# Send a response accordingly
d = self.conn.recv(self.buffer + 2)
if (struct.unpack('H', d[-2:])[0] == 0x55AA):
self.conn.sendall(b"Valid.")
return d[:-2]
else:
self.conn.sendall(b"Invalid.")
raise packetException('Communication Error: Packed could not be validated')
def getData( self, showRawData=False):
# Waits for and recieves all data in a communication attempt
#try:
# Wait for the data
data = self._recvAndCheck()
# Get the format string
if not data: return 0
formatString = self._unpackFmt( data)
# Recieve the rest of the packets if any, as identified in the format string
payload = []
for i in range(len(formatString)):
d = self._recvAndCheck()
if not data: return 0
payload.append( d)
# Unpack the data
content = self._unpackData( formatString, payload)
# Print raw data if requested by the user
if (showRawData):
print("\nBuffer Size: ", self.buffer, "\nFormat: ")
try:
[print(f) for f in formatString]
except:
print(formatString)
print("Recieved:")
try:
[print(str(c)) for c in content]
except:
print(content)
return content
#except packetException:
# print('Listening for resent data...')
# self.getData( showRawData=showRawData)
def streamData( self, showRawData=False):
# Creates a continuously refreshing data stream
self.dataBuffer = []
self.dataStream = []
self.receiveEvt = Event()
self.streaming = True
self.listen = Thread(target=self._waitForStream)
self.listen.daemon = True
self.listen.start()
return 1
def _waitForStream( self):
# Waits for the next communication in a data stream
print('Listening for data...')
try:
while self.streaming:
d = self.getData()
# print(d)
self.dataStream.append(d)
except KeyboardInterrupt:
thread.exit()
return
except BrokenPipeError:
thread.exit()
return
class packetException(Exception):
pass | 2.84375 | 3 |
probability/queue_test.py | peterhogan/python | 0 | 12788978 | <reponame>peterhogan/python<filename>probability/queue_test.py<gh_stars>0
from time import sleep
server_wait = "0 |"
server_serv = "0x|"
print("Queue System")
cust = "x"
for while 1 == 1:
print(server_wait,n*cust, end="\r")
sleep(0.5)
| 2.921875 | 3 |
__init__.py | mikhailkin/dataset | 0 | 12788979 | <reponame>mikhailkin/dataset
import sys
import importlib
sys.modules[__package__] = importlib.import_module('.dataset', __package__)
| 1.125 | 1 |
OJS/teste3.py | r-luis/Projeto-PUB | 0 | 12788980 | <gh_stars>0
from urllib.request import urlopen
from urllib.error import URLError
from urllib.error import HTTPError
from bs4 import BeautifulSoup
import requests
def baixadireto(url, nomearquivo):
"""
Faz o download direto do arquivo PDF
e coloca direto no diretório que é mostrado.
"""
nomearquivo = nomearquivo.replace("/", "-")
r = requests.get(url)
print(nomearquivo, '-', url)
with open('Arquivos/' + nomearquivo + '.pdf', 'wb') as pdf:
for chunk in r.iter_content(chunk_size=2048):
if chunk:
pdf.write(chunk)
def abrirSite(s):
'''Função para abrir sites e já colocar dentro do BS
automaticamente, sem precisar repetir o mesmo código
s = url do link a ser definido na função'''
try:
html = urlopen(s)
except HTTPError as e:
print(e)
except URLError as e:
print(e)
return BeautifulSoup(html, 'html.parser')
def metadadosColeta(linkinicial, metad, arquivo):
"""Essa função coleta os metadados de um artigo (testado no OJS 3.1.2.1, 2.4.8.0)
metad => a variável que contém todos os metadados
Ex: variável = bs.find_all('meta')
metadadosColeta(variável)
"""
for m in metad:
if 'content' in m.attrs:
try:
print(f"<{linkinicial}> <{m.attrs['name'].replace('DC', 'dc')}> '{m.attrs['content']}'")
arquivo.write(f"<{linkinicial}> <{m.attrs['name'].replace('DC', 'dc')}> '{m.attrs['content']}'\n")
except:
pass
nome = 'rebecin'
arq = open(nome + '.ttl', 'w', encoding='utf-8')
links = 'https://portal.abecin.org.br/rebecin/issue/view/33' # 3.1.2.4
bs = abrirSite(links)
secoes = bs.find_all('div', {'class': 'issue-toc-section'})
pdfs = bs.find_all('div', {'class': 'article-summary-galleys'})
# Coleta dos links dos artigos
link_artigos = []
for secao in secoes:
subs = secao.find_all('div', {'class': 'article-summary-title'})
for s in subs:
link_artigos.append(s.find('a')['href'])
# Coleta dos metadados do conteúdo da revista
print('@prefix dc: <http://purl.org/dc/elements/1.1/> .')
for link in link_artigos:
artigo = abrirSite(link)
meta_artigo = artigo.find_all('meta')
metadadosColeta(link, meta_artigo, arq)
# Coletando as referências
ref = artigo.find('div', {'class': 'article-details-references-value'}).get_text().split('\n')
for f in ref:
if len(f.strip()) == 0:
pass
else:
print(f"<{link}> <dc.bibliographicCitation> '{f.strip()}'\n")
arq.write(f"<{link}> <dc.bibliographicCitation> '{f.strip()}'\n")
arq.close()
# Coleta dos PDF's
for pdf in pdfs:
link_download = pdf.find('a')['href'].replace('view', 'download')
nome_arq = link_download.split('/download/')[1].replace('/', '-')
baixadireto(link_download, nome + '_' + nome_arq) | 3.234375 | 3 |
app.py | vitorkaio/py-class-mongo | 0 | 12788981 | <filename>app.py
# from controller.database import Database
from controller.user import User
from controller.perfil import Perfil
from datetime import datetime as Date
from bson.objectid import ObjectId
user = User()
perfil = Perfil()
newUser = {
'name': 'Alice',
'password': '<PASSWORD>',
'email': '<EMAIL>',
'create_data': Date.now(),
'perfis': [
ObjectId("5d52ffed4cd29474379dab54")
]
}
res = user.insertUser(newUser)
print(res)
users = user.getUsers()
for userItem in users:
print(userItem['name'])
for perfilItem in userItem['perfis']:
res = perfil.getPerfil(perfilItem)
print(res['name'])
print('\n')
| 2.953125 | 3 |
source/main.py | ubombar/Traffic-Simulator | 0 | 12788982 | <reponame>ubombar/Traffic-Simulator<gh_stars>0
if __name__ == "__main__":
raise NotImplementedError('The main program not implemented yet!') | 1.289063 | 1 |
opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py | gky360/opentelemetry-python | 0 | 12788983 | <gh_stars>0
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# type: ignore
"""
OpenTelemetry Base Instrumentor
"""
from abc import ABC, abstractmethod
from logging import getLogger
_LOG = getLogger(__name__)
class BaseInstrumentor(ABC):
"""An ABC for instrumentors"""
def __init__(self):
self._is_instrumented = False
@abstractmethod
def _instrument(self) -> None:
"""Instrument"""
@abstractmethod
def _uninstrument(self) -> None:
"""Uninstrument"""
def instrument(self) -> None:
"""Instrument"""
if not self._is_instrumented:
result = self._instrument()
self._is_instrumented = True
return result
_LOG.warning("Attempting to instrument while already instrumented")
return None
def uninstrument(self) -> None:
"""Uninstrument"""
if self._is_instrumented:
result = self._uninstrument()
self._is_instrumented = False
return result
_LOG.warning("Attempting to uninstrument while already uninstrumented")
return None
__all__ = ["BaseInstrumentor"]
| 2.1875 | 2 |
extra_foam/gui/windows/tests/test_filestream_window.py | ebadkamil/EXtra-foam | 7 | 12788984 | <gh_stars>1-10
import unittest
from unittest.mock import MagicMock, patch
from PyQt5.QtTest import QSignalSpy
from PyQt5.QtWidgets import QMainWindow
from extra_foam.logger import logger_stream as logger
from extra_foam.gui import mkQApp
from extra_foam.gui.windows.file_stream_w import FileStreamWindow
app = mkQApp()
logger.setLevel('CRITICAL')
class TestFileStreamWindow(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.gui = QMainWindow() # dummy MainGUI
cls.gui.registerSatelliteWindow = MagicMock()
@classmethod
def tearDownClass(cls):
cls.gui.close()
def testWithParent(self):
from extra_foam.gui.mediator import Mediator
mediator = Mediator()
# Disconnect all slots connected to this signal. We do this to prevent
# any un-GC'ed objects connected to it from previous tests from
# executing their slots.
#
# This came up when TestMainGuiCtrl::testFomFilterCtrlWidget() executed
# immediately before testWithParent(). That test happens to create an
# entire Foam() instance, which creates a DataSourceWidget somewhere in
# the object tree, which connects to this signal. The slot,
# DataSourceWidget.updateMetaData(), ends up making a call to Redis. So
# when testWithParent() ran and emitted this signal, that slot was
# called because the DataSourceWidget hadn't been GC'ed yet. This
# particular test case doesn't spin up Redis, so the slot would fail and
# raise an exception.
try:
mediator.file_stream_initialized_sgn.disconnect()
except TypeError:
# This call fails if there are no connections
pass
spy = QSignalSpy(mediator.file_stream_initialized_sgn)
win = FileStreamWindow(parent=self.gui)
widget = win._ctrl_widget
self.assertEqual('*', widget.port_le.text())
self.assertTrue(widget.port_le.isReadOnly())
self.assertEqual(1, len(spy))
self.gui.registerSatelliteWindow.assert_called_once_with(win)
self.gui.registerSatelliteWindow.reset_mock()
mediator.connection_change_sgn.emit({
"tcp://127.0.0.1:1234": 0,
"tcp://127.0.0.1:1235": 1,
})
self.assertEqual(1234, win._port)
with patch.object(win._ctrl_widget, "close") as mocked_close:
with patch.object(self.gui, "unregisterSatelliteWindow", create=True):
win.close()
mocked_close.assert_called_once()
def testStandAlone(self):
with self.assertRaises(ValueError):
FileStreamWindow(port=454522)
win = FileStreamWindow(port=45449)
widget = win._ctrl_widget
self.assertEqual('45449', widget.port_le.text())
self.assertIsNone(win._mediator)
with patch("extra_foam.gui.windows.file_stream_w.Process.start") as mocked_start:
# test when win._rd_cal is None
spy = QSignalSpy(win.file_server_started_sgn)
widget.serve_start_btn.clicked.emit()
self.assertEqual(0, len(spy))
mocked_start.assert_not_called()
# test when win._rd_cal is not None
win._rd_cal = MagicMock()
widget.serve_start_btn.clicked.emit()
self.assertEqual(1, len(spy))
mocked_start.assert_called_once()
# test populate sources
with patch("extra_foam.gui.windows.file_stream_w.load_runs") as lr:
with patch("extra_foam.gui.windows.file_stream_w.gather_sources") as gs:
with patch.object(widget, "initProgressControl") as fpc:
with patch.object(widget, "fillSourceTables") as fst:
# test load_runs return (None, None)
win._rd_cal, win._rd_raw = object(), object()
gs.return_value = (set(), set(), set())
lr.return_value = (None, None)
widget.data_folder_le.setText("abc")
lr.assert_called_with("abc")
fpc.assert_called_once_with(-1, -1)
fpc.reset_mock()
fst.assert_called_once_with(None, None) # test _rd_cal and _rd_raw were reset
fst.reset_mock()
# test load_runs raises
lr.side_effect = ValueError
widget.data_folder_le.setText("efg")
fpc.assert_called_once_with(-1, -1)
fpc.reset_mock()
fst.assert_called_once_with(None, None)
fst.reset_mock()
with patch("extra_foam.gui.windows.file_stream_w.run_info",
return_value=(100, 1001, 1100)):
# test load_runs return
lr.side_effect = None
gs.return_value = ({"DET1": ["data.adc"]},
{"output1": ["x", "y"]},
{"motor1": ["actualPosition"], "motor2": ["actualCurrent"]})
widget.data_folder_le.setText("hij")
fpc.assert_called_once_with(1001, 1100)
self.assertEqual("DET1", widget._detector_src_tb.item(0, 0).text())
cell_widget = widget._detector_src_tb.cellWidget(0, 1)
self.assertEqual(1, cell_widget.count())
self.assertEqual("data.adc", cell_widget.currentText())
self.assertEqual("output1", widget._instrument_src_tb.item(0, 0).text())
cell_widget = widget._instrument_src_tb.cellWidget(0, 1)
self.assertEqual(2, cell_widget.count())
self.assertEqual("x", cell_widget.currentText())
self.assertEqual("motor1", widget._control_src_tb.item(0, 0).text())
cell_widget = widget._control_src_tb.cellWidget(0, 1)
self.assertEqual(1, cell_widget.count())
self.assertEqual("actualPosition", cell_widget.currentText())
self.assertEqual("motor2", widget._control_src_tb.item(1, 0).text())
cell_widget = widget._control_src_tb.cellWidget(1, 1)
self.assertEqual(1, cell_widget.count())
self.assertEqual("actualCurrent", cell_widget.currentText())
# None is selected.
self.assertEqual(([], [], []), widget.getSourceLists())
def testInitProgressControl(self):
win = FileStreamWindow(port=45452)
widget = win._ctrl_widget
with patch.object(widget.tid_progress_br, "reset") as mocked_reset:
widget.initProgressControl(1001, 1100)
self.assertEqual(1001, widget.tid_start_sld.minimum())
self.assertEqual(1100, widget.tid_start_sld.maximum())
self.assertEqual(1001, widget.tid_start_sld.value())
self.assertEqual("1001", widget.tid_start_lb.text())
self.assertEqual(1001, widget.tid_end_sld.minimum())
self.assertEqual(1100, widget.tid_end_sld.maximum())
self.assertEqual(1100, widget.tid_end_sld.value())
self.assertEqual("1100", widget.tid_end_lb.text())
self.assertEqual(1001, widget.tid_progress_br.minimum())
self.assertEqual(1100, widget.tid_progress_br.maximum())
self.assertEqual(1000, widget.tid_progress_br.value())
self.assertEqual(3, mocked_reset.call_count)
mocked_reset.reset_mock()
# test set individual sliders
widget.tid_start_sld.setValue(1050)
self.assertEqual(1050, widget.tid_progress_br.minimum())
self.assertEqual("1050", widget.tid_start_lb.text())
mocked_reset.assert_called_once()
mocked_reset.reset_mock()
# test last tid is set to be smaller than the first one
widget.tid_end_sld.setValue(1049)
self.assertEqual(1050, widget.tid_end_sld.value())
self.assertEqual(1050, widget.tid_progress_br.maximum())
self.assertEqual("1050", widget.tid_end_lb.text())
mocked_reset.assert_called_once()
mocked_reset.reset_mock()
# test reset
widget.initProgressControl(-1, -1)
self.assertEqual(-1, widget.tid_start_sld.minimum())
self.assertEqual(-1, widget.tid_start_sld.maximum())
self.assertEqual(-1, widget.tid_start_sld.value())
self.assertEqual("", widget.tid_start_lb.text())
self.assertEqual(-1, widget.tid_end_sld.minimum())
self.assertEqual(-1, widget.tid_end_sld.maximum())
self.assertEqual(-1, widget.tid_end_sld.value())
self.assertEqual("", widget.tid_end_lb.text())
self.assertEqual(-1, widget.tid_progress_br.minimum())
self.assertEqual(-1, widget.tid_progress_br.maximum())
self.assertEqual(-2, widget.tid_progress_br.value())
mocked_reset.assert_called()
| 1.960938 | 2 |
aioffmpeg/cmd_opts.py | ucrux/aioffmpeg | 4 | 12788985 | <reponame>ucrux/aioffmpeg<gh_stars>1-10
from aioffmpeg._cmd_raw_str import *
# h264编码参数选项
class H264EncoderArgs():
"""
h.264 编码的一些参数
; -profile:v
; -level
; -preset
; -crf
; -c:v
; -r
如果ffmpeg编译时加了external的libx264,那就这么写:
aioffmpeg -i input.mp4 -c:v libx264 -x264-params "profile=high:level=3.0" output.mp4
aioffmpeg -i input -c:v libx264 -profile:v main -preset:v fast -level 3.1 -x264opts crf=18
"""
"""
c:v 参数,h.264 video使用编码库
"""
codec_v_libx264 = 'libx264'
codec_v_h264_nvenc = 'h264_nvenc'
codec_v_h264_qsv = 'h264_qsv'
"""
profile 参数 H.264 Baseline profile,Extended profile和Main profile都是针对8位样本数据,4:2:0格式(YUV)的视频序列
在相同配置情况下,High profile(HP)可以比Main profile(MP)降低10%的码率
根据应用领域的不同,Baseline profile多应用于实时通信领域,Main profile多应用于流媒体领域,High profile则多应用于广电和存储领域
"""
profile_baseline = 'baseline' # 基本画质.支持I/P帧,只支持无交错(Progressive)和CAVLC
profile_extended = 'extended' # 进阶画质.支持I/P/B/SP/SI帧,只支持无交错(Progressive)和CAVLC
profile_main = 'main' # 主流画质.提供I/P/B帧,支持无交错(Progressive)和交错(Interlaced),也支持CAVLC和CABAC
profile_high = 'high' # 高级画质.在main Profile的基础上增加了8x8内部预测,自定义量化,无损视频编码和更多的YUV格式
profile_high10 = 'high10'
profile_high422 = 'high422'
profile_high444 = 'high444'
"""
level 参数定义可以使用的最大帧率,码率和分辨率
level max max max bitrate max bitrate high resolution
number macroblocks frame for profile baseline for profile @frame rate
per secs size extended main high high
"""
level_1 = '1' # 1485 99 64 kbit/s 80 kbit/s [email protected]
# [email protected]
level_1b = '1b' # 1485 99 128 kbit/s 160 kbit/s [email protected]
# [email protected]
level_1_1 = '1.1' # 3000 396 192 kbit/s 240 kbit/s [email protected]
# [email protected]
# [email protected]
level_1_2 = '1.2' # 6000 396 384 kbit/s 480 kbit/s [email protected]
# [email protected]
level_1_3 = '1.3' # 11880 396 768 kbit/s 960 kbit/s [email protected]
# [email protected]
level_2 = '2' # 11880 396 2 Mbit/s 2.5 Mbit/s [email protected]
# [email protected]
level_2_1 = '2.1' # 19880 792 4 Mbit/s 5 Mbit/s [email protected]
# [email protected]
level_2_2 = '2.2' # 20250 1620 4 Mbit/s 5 Mbit/s [email protected]
# [email protected]
# [email protected]
# [email protected]
level_3 = '3' # 40500 1620 10 Mbit/s 12.5 Mbit/s [email protected]
# [email protected]
# [email protected]
# [email protected]
level_3_1 = '3.1' # 108000 3600 14 Mbit/s 17.5 Mbit/s [email protected]
# [email protected]
# [email protected]
level_3_2 = '3.2' # 216000 5120 20 Mbit/s 25 Mbit/s [email protected]
# [email protected]
level_4 = '4' # 245760 8192 20 Mbit/s 25 Mbit/s [email protected]
# [email protected]
# [email protected]
level_4_1 = '4.1' # 245760 8192 50 Mbit/s 50 Mbit/s [email protected]
# [email protected]
# [email protected]
level_4_2 = '4.2' # 522240 8704 50 Mbit/s 50 Mbit/s [email protected]
# [email protected]
level_5 = '5' # 589824 22080 135 Mbit/s 168.75 Mbit/s [email protected]
# [email protected]
# [email protected]
# [email protected]
# [email protected]
level_5_1 = '5.1' # 983040 36864 240 Mbit/s 300 Mbit/s [email protected]
# [email protected]
# [email protected]
"""
preset 参数
调整编码速度,越慢编码质量越高
ultrafast,superfast,veryfast,faster,fast,medium,slow,slower,veryslow and placebo
"""
preset_ultrafast = 'ultrafast'
preset_superfast = 'superfast'
preset_veryfast = 'veryfast'
preset_faster = 'faster'
preset_fast = 'fast'
preset_medium = 'medium'
preset_slow = 'slow'
preset_slower = 'slower'
preset_veryslow = 'veryslow'
# preset_placebo = 'placebo'
"""
crf 参数
CRF(Constant Rate Factor)
范围 0-51:
- 0是编码毫无丢失信息
- 23 is 默认
- 51 是最差的情况
相对合理的区间是18-28.
值越大,压缩效率越高,但也意味着信息丢失越严重,输出图像质量越差
crf每+6,比特率减半
crf每-6,比特率翻倍
"""
crf_0 = 0
crf_18 = 18
crf_23 = 23
crf_28 = 28
crf_51 = 51
"""
视频帧率
"""
# gif用
v_frame_5 = 5
v_frame_8 = 8
v_frame_15 = 15
# 视频用
v_frame_30 = 30
v_frame_24 = 24
"""
视频旋转方向参数
"""
v_left_rotate = 2
v_right_rotate = 1
"""
自动旋转,即跟随matedata中视频的旋转方向
"""
autorotate = r''
noautorotate = r'-noautorotate'
"""
是否固定ts切片的时长
"""
no_fix_ts_time = ''
fix_ts_time = '+split_by_time'
"""
音频码率
"""
audio_rate_64 = 64
audio_rate_128 = 128
"""
音频采样率
"""
audio_simple_rate_default = r''
audio_simple_rate_44100 = r'-ar 44100'
audio_simple_rate_48000 = r'-ar 48000'
"""
音频声道数
"""
audio_channel_default = r''
audio_channel_1 = r'-ac 1'
audio_channel_2 = r'-ac 2'
"""
图片水印模版
"""
# 固定图片水印位置
water_mark_fix = 0
water_mark_move = 1
"""
编解码外部设备
"""
hwaccel_default = r''
hwaccel_cuda = r'-hwaccel cuda'
hwaccel_cuvid = r'-hwaccel cuvid'
hwaccel_qsv = r'-hwaccel qsv'
"""
外部编解码设备编号
"""
hwaccel_device_default = r''
hwaccel_device_gpu0 = r'hwaccel_device 0'
hwaccel_device_gpu1 = r'hwaccel_device 1'
hwaccel_device_gpu2 = r'hwaccel_device 2'
hwaccel_device_gpu3 = r'hwaccel_device 3'
hwaccel_device_gpu4 = r'hwaccel_device 4'
hwaccel_device_gpu5 = r'hwaccel_device 5'
hwaccel_device_gpu6 = r'hwaccel_device 6'
hwaccel_device_gpu7 = r'hwaccel_device 7'
"""
外部解码器
"""
decoder_default = r''
decoder_h264_cuvid = r'-c:v h264_cuvid'
decoder_h264_qsv = r'-c:v h264_qsv'
"""
QSV相关参数
"""
qsv_hw_device_none = r''
qsv_hw_device = r'-init_hw_device qsv=qsv:hw -filter_hw_device qsv'
class FfmpegCmdModel:
check_h264 = CMD_CHECK_H264
get_video_probe = CMD_GET_VIDEO_PROBE
ch_video_metadata = CMD_CH_VIDEO_METADATA
scale_video = CMD_SCALE_VIDEO_CODECS
scale_video_qsv = CMD_SCALE_VIDEO_QSV
rotate_video = CMD_ROTATE_VIDEO
hls_video = CMD_HLS_VIDEO
hls_video_other = CMD_HLS_VIDEO_OTHER
hls_video_qsv = CMD_HLS_VIDEO_QSV
snapshot_video = CMD_SNAPSHOT
cut_video = CMD_CUT_VIDEO
cut_video_qsv = CMD_CUT_VIDEO_QSV
concat_video = CMD_CONCAT_VIDEO
concat_video_safe = CMD_CONCAT_VIDEO_SAFE
logo_video_fix = CMD_LOGO_FIX
logo_video_move = CMD_LOGO_MOVE
del_log = CMD_DEL_LOGO
create_gif = CMD_GIF_VIDEO
download_m3u8 = CMD_M3U8_DOWNLOAD
class FfmpegOptsModel:
metadata = OPTS_MATEDATA
pad_left_right = OPTS_PAD_LR
pad_up_down = OPTS_PAD_UD
del_log = OPTS_DEL_LOGO
ass = OPTS_ASS
hls_enc_key_url = OPTS_HLS_ENC_KEY_URL
| 2.015625 | 2 |
scripts/make_graphical_abstract.py | rohitsupekar/active_matter_spheres | 1 | 12788986 | <gh_stars>1-10
import os
import sys
sys.path.append("../") # go to parent dir
import glob
import time
import logging
import numpy as np
from scipy.sparse import linalg as spla
import matplotlib.pyplot as plt
import logging
from mpl_toolkits import mplot3d
from mayavi import mlab
from scipy.special import sph_harm
#add path to data folder
input_folder = "/Volumes/ExtDrive/data"
output_folder = "plots"
dpi=600
ind = 2100
f00 = "%s/sphere113/output_%i.npz" %(input_folder, ind)
f01 = "%s/sphere114/output_%i.npz" %(input_folder, ind)
f02 = "%s/sphere115/output_%i.npz" %(input_folder, ind)
f10 = "%s/sphere111/output_%i.npz" %(input_folder, ind)
f11 = "%s/sphere109/output_%i.npz" %(input_folder, ind)
f12 = "%s/sphere110/output_%i.npz" %(input_folder, ind)
f20 = "%s/sphere116/output_%i.npz" %(input_folder, ind)
f21 = "%s/sphere117/output_%i.npz" %(input_folder, ind)
f22 = "%s/sphere118/output_%i.npz" %(input_folder, ind)
fs = [f12, f22]
om_list = [None for i in range(2)]
#load data
for i, str in enumerate(fs):
with np.load(str) as file:
print('Loaded %s' %(str))
om_list[i] = file['om']
phi = file['phi']
theta = file['theta']
time = file['t'][0]
print('time = %f' %(time))
#change phi
phi = np.linspace(0, 2*np.pi, len(phi))
theta = np.flip(np.linspace(0, np.pi, len(theta)))
# Create a sphere
r = 0.3
pi = np.pi
cos = np.cos
sin = np.sin
phiphi, thth = np.meshgrid(theta, phi-pi)
x = r * sin(phiphi) * cos(thth)
y = r * sin(phiphi) * sin(thth)
z = r * cos(phiphi)
#s = sph_harm(0, 10, theta, phi).real
mlab.figure(1, bgcolor=(0, 0, 0), fgcolor=(1, 1, 1), size=(720, 600))
mlab.clf()
cmin, cmax = -300, 300
for i, om in enumerate(om_list):
om_max = np.max(om)
scale = 0.5
spacing = 1.0
mlab.mesh(x, y- spacing*i, z, scalars=om, colormap='coolwarm', vmax=scale*om_max, vmin=-scale*om_max)
# Plot the equator and the tropics
for angle in (-np.pi/3, -np.pi/6, 0., np.pi/6, np.pi/3):
x_ = r*np.cos(phi) * np.cos(angle)
y_ = r*np.sin(phi) * np.cos(angle)
z_ = r*np.ones_like(phi) * np.sin(angle)
mlab.plot3d(x_, y_-spacing*i, z_, color=(0, 0, 0),
opacity=1, tube_radius=0.003)
th_ = np.linspace(-np.pi/3, np.pi/3, 100)
for angle in np.linspace(0, 2*np.pi, 16):
x_ = r*np.cos(angle) * np.cos(th_)
y_ = r*np.sin(angle) * np.cos(th_)
z_ = r*np.ones_like(angle) * np.sin(th_)
mlab.plot3d(x_, y_-spacing*i, z_, color=(0, 0, 0),
opacity=1, tube_radius=0.003)
mlab.view(60, 63, distance=2.5)
mlab.savefig("%s/abstract.jpg" %(output_folder), magnification=2)
mlab.show()
| 1.890625 | 2 |
pylgrum/tests/test_hand.py | jrheling/pylgrum | 2 | 12788987 | <gh_stars>1-10
import unittest
from pylgrum.card import Card, Rank, Suit
from pylgrum.hand import Hand
from pylgrum.errors import OverdealtHandError
class TestHand(unittest.TestCase):
def test_too_many_cards(self):
"""Implicitly tests the add() override in Hand, too."""
h = Hand()
self.assertEqual(h.size(), 0)
h.add(Card(rank=Rank.QUEEN, suit=Suit.HEART)) # 0 : QH
h.add(Card(rank=Rank.JACK, suit=Suit.DIAMOND)) # 1 : JD
h.add(Card(rank=Rank.ACE, suit=Suit.CLUB)) # 2 : AC
h.add(Card(rank=Rank.KING, suit=Suit.SPADE)) # 3 : KS
h.add(Card(rank=Rank.TWO, suit=Suit.HEART)) # 4 : 2H
h.add(Card(rank=Rank.THREE, suit=Suit.DIAMOND)) # 5 : 3D
h.add(Card(rank=Rank.FOUR, suit=Suit.CLUB)) # 6 : 4C
h.add(Card(rank=Rank.FIVE, suit=Suit.SPADE)) # 7 : 5S
h.add(Card(rank=Rank.TEN, suit=Suit.HEART)) # 8 : 10H
h.add(Card(rank=Rank.NINE, suit=Suit.DIAMOND)) # 9 : 9D
h.add(Card(rank=Rank.EIGHT, suit=Suit.CLUB)) # 10: 8C
self.assertEqual(h.size(), 11) ## a full hand
with self.assertRaises(OverdealtHandError):
h.add(Card(rank=Rank.SEVEN, suit=Suit.SPADE))
if __name__ == '__main__':
unittest.main()
| 3.359375 | 3 |
config.py | mixtek/Webscraping_Mars | 0 | 12788988 | # Twitter API Keys
consumer_key = "k4dGS4RNgveXn70tuR8fujiAu"
consumer_secret = "<KEY>"
access_token = "<KEY>"
access_token_secret = "<KEY>" | 1.132813 | 1 |
test/mongo_mock_repository.py | SeniorSA/hybrid-rs-trainner | 15 | 12788989 | from mongomock import MongoClient
from repository.repository_factory import RepositoryFactory
class MongoMockRepository(RepositoryFactory):
__data_source = None
def get_data_source(self):
if MongoMockRepository.__data_source == None:
MongoMockRepository.__data_source = MongoClient()
return MongoMockRepository.__data_source[self.args.mongo_database_name]
def __init__(self, args):
self.args = args
| 2.3125 | 2 |
skelshop/utils/timer.py | cstenkamp/skelshop | 2 | 12788990 | <filename>skelshop/utils/timer.py
import logging
from time import perf_counter_ns
logger = logging.getLogger(__name__)
class Timer:
def __init__(self, name="task", logger=logger):
self.name = name
self.logger = logger
def __enter__(self):
if self.logger.isEnabledFor(logging.INFO):
self.start = perf_counter_ns()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.logger.isEnabledFor(logging.INFO):
end = perf_counter_ns()
self.logger.info(
"[Time] {} consumes {:.4f} s".format(
self.name, (end - self.start) * (10 ** -9)
)
)
| 2.828125 | 3 |
openks/distributed/quick-start/openKS_distributed/base/RoleMaker.py | HIT-SCIR-xuanxuan/OpenKS | 88 | 12788991 | # Copyright (c) 2020 Room 525 Research Group, Zhejiang University.
# All Rights Reserved.
"""Defination of Role Makers."""
from __future__ import print_function
import paddle.fluid as fluid
import os
import time
__all__ = [
'Role', 'RoleMakerBase', 'MPISymetricRoleMaker', 'UserDefinedRoleMaker',
'UserDefinedCollectiveRoleMaker', 'PaddleCloudRoleMaker', 'GeneralRoleMaker', "Open_KS_read", "Open_KS_ImageNet",
"Open_KS_Read_Character", "Open_KS_Character"
]
class Role:
WORKER = 1
SERVER = 2
class Open_KS_ImageNet:
"""
A single image class.
Loading and using the Mini-ImageNet dataset.
To use these APIs, you should prepare a directory that
contains three sub-directories: train, test, and val.
Each of these three directories should contain one
sub-directory per WordNet ID.
"""
def __init__(self, dir_path):
self.dir_path = dir_path
self._cache = {}
def sample(self, num_images):
"""
Sample images (as numpy arrays) from the class.
Returns:
A sequence of 84x84x3 numpy arrays.
Each pixel ranges from 0 to 1.
"""
names = [f for f in os.listdir(self.dir_path) if f.endswith('.jpg')]
random.shuffle(names)
images = []
for name in names[:num_images]:
images.append(self._read_image(name))
return images
def _read_image(self, name):
if name in self._cache:
return self._cache[name].astype('float32') / 0xff
with open(os.path.join(self.dir_path, name), 'rb') as in_file:
img = Image.open(in_file).resize((84, 84)).convert('RGB')
self._cache[name] = np.array(img)
return self._read_image(name)
class Open_KS_read:
def read_dataset(data_dir):
"""
Read the Mini-ImageNet dataset.
Args:
data_dir: directory containing Mini-ImageNet.
Returns:
A tuple (train, val, test) of sequences of
ImageNetClass instances.
"""
return tuple(_read_classes(os.path.join(data_dir, x)) for x in ['train', 'val', 'test'])
def _read_classes(dir_path):
"""
Read the WNID directories in a directory.
"""
return [ImageNetClass(os.path.join(dir_path, f)) for f in os.listdir(dir_path)
if f.startswith('n')]
class Open_KS_Character:
"""
A single character class.
"""
def __init__(self, dir_path, rotation=0):
self.dir_path = dir_path
self.rotation = rotation
self._cache = {}
def sample(self, num_images):
"""
Sample images (as numpy arrays) from the class.
Returns:
A sequence of 28x28 numpy arrays.
Each pixel ranges from 0 to 1.
"""
names = [f for f in os.listdir(self.dir_path) if f.endswith('.png')]
random.shuffle(names)
images = []
for name in names[:num_images]:
images.append(self._read_image(os.path.join(self.dir_path, name)))
return images
def _read_image(self, path):
if path in self._cache:
return self._cache[path]
with open(path, 'rb') as in_file:
img = Image.open(in_file).resize((28, 28)).rotate(self.rotation)
self._cache[path] = np.array(img).astype('float32')
return self._cache[path]
class Open_KS_Read_Character:
def read_dataset(data_dir):
"""
Iterate over the characters in a data directory.
Args:
data_dir: a directory of alphabet directories.
Returns:
An iterable over Characters.
The dataset is unaugmented and not split up into
training and test sets.
"""
for alphabet_name in sorted(os.listdir(data_dir)):
alphabet_dir = os.path.join(data_dir, alphabet_name)
if not os.path.isdir(alphabet_dir):
continue
for char_name in sorted(os.listdir(alphabet_dir)):
if not char_name.startswith('character'):
continue
yield Character(os.path.join(alphabet_dir, char_name), 0)
def split_dataset(dataset, num_train=1200):
"""
Split the dataset into a training and test set.
Args:
dataset: an iterable of Characters.
Returns:
A tuple (train, test) of Character sequences.
"""
all_data = list(dataset)
random.shuffle(all_data)
return all_data[:num_train], all_data[num_train:]
def augment_dataset(dataset):
"""
Augment the dataset by adding 90 degree rotations.
Args:
dataset: an iterable of Characters.
Returns:
An iterable of augmented Characters.
"""
for character in dataset:
for rotation in [0, 90, 180, 270]:
yield Character(character.dir_path, rotation=rotation)
class RoleMakerBase(object):
"""
RoleMakerBase is a base class for assigning a role to current process
in distributed training.
A paddle developer can implement RoleMakerBase to design a role maker
for worker or pserver assignment.
"""
def __init__(self):
self._worker_endpoints = []
self._server_endpoints = []
self._role_is_generated = False
self._role = None
self._current_id = -1
def is_worker(self):
"""
return is_worker() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def is_server(self):
"""
return is_server() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def is_first_worker(self):
"""
Check whether the node is the first instance of worker.
Returns:
bool: True if this is the first node of worker,
False if not.
"""
raise NotImplementedError("Please implement this method in child class")
def worker_num(self):
"""
Get current total worker number.
Returns:
int: worker number
"""
raise NotImplementedError("Please implement this method in child class")
def worker_index(self):
"""
Get current worker id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def server_index(self):
"""
Get current server id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def get_trainer_endpoints(self):
"""
return trainer endpoints
"""
return self._worker_endpoints
def get_pserver_endpoints(self):
"""
return pserver endpoints
"""
return self._server_endpoints
def to_string(self):
return "role: {}, current_id: {}, worker_endpoints: {}, server_endpoints: {}".format(
self._role, self._current_id, self._worker_endpoints,
self._server_endpoints)
def all_gather(self, input):
"""
all gather between trainers and pservers
Args:
input(int|float): input value
Returns:
return a list of values
"""
print("warning: RoleMakerBase does not have all gather.")
return None
def all_reduce_worker(self, input, output, mode="sum"):
"""
all reduce between trainers if current role is TRAINER,
only support array of one dim.
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
print("warning: RoleMakerBase does not have all reduce worker.")
def barrier_worker(self):
"""
barrier between trainers if current role is TRAINER
"""
print("warning: RoleMakerBase does not have barrier worker.")
def barrier_all(self):
"""
barrier between trainers if current role is PSERVER
"""
print("warning: RoleMakerBase does not have barrier all.")
class MPIRoleMaker(RoleMakerBase):
"""
MPIRoleMaker is a MPI-API based role maker which is a counter-part of K8SRoleMaker
mpi4py will be used if a developer inherits MPIRoleMaker
"""
def __init__(self):
"""Init."""
super(MPIRoleMaker, self).__init__()
from mpi4py import MPI
self.MPI = MPI
self._comm = MPI.COMM_WORLD
self._node_type_comm = None
self._ips = None
self._ip = None
def _get_rank(self):
"""Return rank."""
self._rank = self._comm.Get_rank()
return self._rank
def _get_size(self):
"""Return size."""
self._size = self._comm.Get_size()
return self._size
def _all_gather(self, obj):
"""
all_gather(obj) will call MPI's allgather function
"""
self._barrier_all()
return self._comm.allgather(obj)
def _worker_gather(self, obj):
"""
worker_gather(obj) will call MPI's allgather function
"""
if self.is_worker():
self._node_type_comm.barrier()
return self._node_type_comm.allgather(obj)
return None
def _barrier_all(self):
"""
barrier_all() will call MPI's barrier_all function
"""
self._comm.barrier()
def _finalize(self):
"""
finalize the current MPI instance.
"""
self.MPI.Finalize()
def _get_ips(self):
"""
collect current distributed job's ip list
"""
if not self._ips:
self._ips = self._comm.allgather(self.get_local_ip())
return self._ips
def get_local_ip(self):
"""Return get local ip."""
import socket
self._ip = socket.gethostbyname(socket.gethostname())
return self._ip
def generate_role(self):
"""
generate_role() should be called to identify current process's role
"""
raise NotImplementedError("Please implement this method in child class")
class MPISymetricRoleMaker(MPIRoleMaker):
"""
MPISymetricRoleMaker is designed for worker and server assignment
under MPI. Typically, a worker and a server node will be appointed
on each physical node. This role maker can be only used under MPI.
"""
def __init__(self):
"""Init."""
super(MPISymetricRoleMaker, self).__init__()
self._node_type = None
self._proc_per_node = 2
self._pserver_rand_port = 0
def _check_role_generation(self):
"""Check whether role has been generated."""
if not self._role_is_generated:
raise NameError("generate_role() should be called first")
return True
def all_gather(self, input):
"""
all gather between trainers and pservers
Args:
input(int|float): input value
Returns:
return a list of values
"""
if not self._role_is_generated:
self.generate_role()
return self._all_gather(input)
def all_reduce_worker(self, input, output, mode="sum"):
"""
all reduce between trainers if current role is TRAINER,
only support array of one dim.
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
if not self._role_is_generated:
self.generate_role()
if not self.is_worker():
print("warning: current role is not worker in all_reduce_worker")
return
self._all_reduce(input, output, mode)
def barrier_worker(self):
"""
barrier between trainers if current role is TRAINER
"""
if not self._role_is_generated:
self.generate_role()
if self.is_worker():
self._node_type_comm.barrier()
else:
print("warning: current role is not worker in barrier_worker")
def barrier_all(self):
"""
barrier between trainers if current role is PSERVER
"""
if not self._role_is_generated:
self.generate_role()
self._comm.barrier()
def is_first_worker(self):
"""
return whether current process is the first worker assigned by role maker
"""
if self._check_role_generation():
return self.is_worker() and 0 == self.worker_index()
return False
def get_pserver_endpoints(self):
"""
get pserver endpoints
Returns:
endpoints(list): pserver endpoints
"""
if self._pserver_rand_port <= 0:
import random
random.seed(self._server_num())
# port will be randomly generated from 60001 to 63999
# random seed is server num so that all nodes will get
# the same port
self._pserver_rand_port = random.randint(60001, 64000)
endpoints = [
x + ":" + str(self._pserver_rand_port)
for x in self._server_endpoints
]
return endpoints
def worker_num(self):
return self._worker_num()
def is_worker(self):
"""
return whether current process is worker assigned by role maker
"""
if self._check_role_generation():
return self._node_type == 1
return False
def is_server(self):
"""
return whether current process is server assigned by role maker
"""
if self._check_role_generation():
return self._node_type == 0
return False
def _worker_num(self):
"""
return the current number of worker
"""
if self._check_role_generation():
return self._get_size() / self._proc_per_node
return 0
def _server_num(self):
"""
return the current number of server
"""
if self._check_role_generation():
return self._get_size() / self._proc_per_node
else:
self.generate_role()
return self._get_size() / self._proc_per_node
def worker_index(self):
"""
return the index of worker
"""
if self._check_role_generation():
return self._rank / self._proc_per_node
else:
self.generate_role()
return self._get_size() / 2
def server_index(self):
"""
return the index of server
"""
if self._check_role_generation():
return self._rank / self._proc_per_node
else:
self.generate_role()
return self._get_size() / self._proc_per_node
def _all_reduce(self, input, output, mode="sum"):
"""
all reduce between trainers if current role is TRAINER,
only support array of one dim.
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
if not self._role_is_generated:
self.generate_role()
if mode == "sum":
mode = self.MPI.SUM
elif mode == "max":
mode = self.MPI.MAX
elif mode == "min":
mode = self.MPI.MIN
else:
raise ValueError("unknown mode: %s" % mode)
self._node_type_comm.Allreduce(input, output, op=mode)
def _barrier_worker(self):
"""
barrier all workers in current distributed job
"""
if self._check_role_generation():
if self.is_worker():
self._node_type_comm.barrier()
else:
raise Exception("You should check role generation first")
def _barrier_server(self):
"""
barrier all servers in current distributed job
"""
if self._check_role_generation():
if self.is_server():
self._node_type_comm.barrier()
else:
raise Exception("You should check role generation first")
def generate_role(self):
"""
generate currently process's role
"""
if not self._role_is_generated:
# TODO(guru4elephant): only allow to be called once
self._worker_endpoints = self._get_ips()[1::2]
self._server_endpoints = self._get_ips()[::2]
if 0 == self._get_rank() % self._proc_per_node % 2:
self._node_type = 0
else:
self._node_type = 1
self._node_type_comm = self._comm.Split(self._node_type)
self._role_is_generated = True
else:
raise Exception("You should check role generation first")
class PaddleCloudRoleMaker(RoleMakerBase):
"""
role maker for paddle cloud,
base class is RoleMakerBase
"""
def __init__(self, is_collective=False):
super(PaddleCloudRoleMaker, self).__init__()
self._role_is_generated = False
self._is_collective = is_collective
def generate_role(self):
"""Generate role."""
if not self._role_is_generated:
if not self._is_collective:
try:
# Environment variable PADDLE_PSERVERS_IP_PORT_LIST must be set
# format: string(ip:port), eg. 127.0.0.1:6001
eplist = os.environ["PADDLE_PSERVERS_IP_PORT_LIST"].split(
",")
# note that, we usually assign the same port to different ips
# if we run parameter server training in local mode
# port should be different in environment variables
trainers_num = int(os.environ["PADDLE_TRAINERS_NUM"])
training_role = os.environ["TRAINING_ROLE"]
if training_role not in ["TRAINER", "PSERVER"]:
raise ValueError(
"TRAINING_ROLE must be PSERVER or TRAINER")
if training_role == "TRAINER":
role = Role.WORKER
current_id = int(os.environ["PADDLE_TRAINER_ID"])
elif training_role == "PSERVER":
role = Role.SERVER
cur_ip = os.environ["POD_IP"]
curr_port = os.environ["PADDLE_PORT"]
curr_endpoint = ":".join([cur_ip, curr_port])
current_id = eplist.index(curr_endpoint)
else:
raise ValueError(
"TRAINING_ROLE must be PSERVER or TRAINER")
except ValueError as ve:
raise ValueError(
"something wrong with PaddleCloud, please check environment"
)
self._trainers_num = trainers_num
self._server_endpoints = eplist
self._role = role
self._current_id = current_id
else:
self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
self._training_role = os.getenv("PADDLE_TRAINING_ROLE",
"TRAINER")
assert (self._training_role == "TRAINER")
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
self._current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
assert self._worker_endpoints is not None, "can't find PADDLE_TRAINER_ENDPOINTS"
self._worker_endpoints = self._worker_endpoints.split(",")
self._trainers_num = len(self._worker_endpoints)
self._role_is_generated = True
def get_pserver_endpoints(self):
if not self._role_is_generated:
self.generate_role()
return self._server_endpoints
def is_worker(self):
if not self._role_is_generated:
self.generate_role()
return self._role == Role.WORKER
def is_server(self):
if not self._role_is_generated:
self.generate_role()
return self._role == Role.SERVER
def is_first_worker(self):
if not self._role_is_generated:
self.generate_role()
return self._role == Role.WORKER and self._current_id == 0
def worker_index(self):
if not self._role_is_generated:
self.generate_role()
return self._current_id
def server_index(self):
if not self._role_is_generated:
self.generate_role()
return self._current_id
def worker_num(self):
if not self._role_is_generated:
self.generate_role()
return self._trainers_num
class GeneralRoleMaker(RoleMakerBase):
"""
This role maker is for general use, you can set os.environ to customize:
PADDLE_PSERVERS_IP_PORT_LIST : all pservers' ip:port, separated by ','
PADDLE_TRAINER_ENDPOINTS : all trainers' ip:port, separated by ','
TRAINING_ROLE : TRAINER or PSERVER
PADDLE_TRAINER_ID : current trainer id (only for trainer),
it is index in PADDLE_TRAINER_ENDPOINTS
PADDLE_PSERVER_ID : current pserver id (only for pserver)
it is index in PADDLE_PSERVERS_IP_PORT_LIST
"""
def __init__(self, **kwargs):
super(RoleMakerBase, self).__init__()
self._role_is_generated = False
self._hdfs_name = kwargs.get("hdfs_name", "")
self._hdfs_ugi = kwargs.get("hdfs_ugi", "")
self._hdfs_path = kwargs.get("path", "")
self._iface = self.__get_default_iface()
# this environment variable can be empty
self._prefix = os.getenv("SYS_JOB_ID", "")
def generate_role(self):
"""
generate role for general role maker
"""
if not self._role_is_generated:
eplist = os.environ["PADDLE_PSERVERS_IP_PORT_LIST"].split(",")
training_role = os.environ["TRAINING_ROLE"]
worker_endpoints = os.environ["PADDLE_TRAINER_ENDPOINTS"].split(",")
trainers_num = len(worker_endpoints)
if training_role not in ["TRAINER", "PSERVER"]:
raise ValueError("TRAINING_ROLE must be PSERVER or TRAINER")
if training_role == "TRAINER":
role = Role.WORKER
current_id = int(os.environ["PADDLE_TRAINER_ID"])
self._node_type = 1
self._cur_endpoint = worker_endpoints[current_id]
gloo = fluid.core.Gloo()
gloo.init(current_id,
len(worker_endpoints),
self._hdfs_path.rstrip("/") + "/trainer",
self._hdfs_name, self._hdfs_ugi, self._iface,
self._prefix)
self._node_type_comm = gloo
elif training_role == "PSERVER":
role = Role.SERVER
if os.environ.get("PADDLE_PSERVER_ID") is not None:
current_id = int(os.environ["PADDLE_PSERVER_ID"])
cur_endpoint = eplist[current_id]
else:
# this is for compatible with paddlecloud
cur_ip = os.environ["POD_IP"]
cur_port = os.environ["PADDLE_PORT"]
cur_endpoint = ":".join([cur_ip, cur_port])
current_id = eplist.index(cur_endpoint)
self._node_type = 0
self._cur_endpoint = cur_endpoint
gloo = fluid.core.Gloo()
gloo.init(current_id,
len(eplist),
self._hdfs_path.rstrip("/") + "/pserver",
self._hdfs_name, self._hdfs_ugi, self._iface,
self._prefix)
self._node_type_comm = gloo
gloo = fluid.core.Gloo()
all_list = worker_endpoints + eplist
gloo.init(
all_list.index(self._cur_endpoint),
len(all_list),
self._hdfs_path.rstrip("/") + "/all", self._hdfs_name,
self._hdfs_ugi, self._iface, self._prefix)
self._all_comm = gloo
self._trainers_num = trainers_num
self._server_endpoints = eplist
self._role = role
self._current_id = current_id
self._rank = all_list.index(self._cur_endpoint)
self._size = len(all_list)
self._worker_endpoints = worker_endpoints
self._role_is_generated = True
def all_gather(self, input):
"""
all gather between trainers and pservers
Args:
input(int|float): input value
Returns:
return a list of values
"""
return self._all_gather(input)
def all_reduce_worker(self, input, output, mode="sum"):
"""
all reduce between trainers if current role is TRAINER,
only support array of one dim.
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
if not self.is_worker():
return
self._all_reduce(input, output, mode)
def barrier_worker(self):
"""
barrier between trainers if current role is TRAINER
"""
self._barrier_worker()
def barrier_all(self):
"""
barrier between trainers if current role is PSERVER
"""
self._barrier_all()
def get_local_endpoint(self):
"""
get local endpoint of current process
"""
if not self._role_is_generated:
self.generate_role()
return self._cur_endpoint
def get_trainer_endpoints(self):
"""
get endpoint of all trainers
"""
if not self._role_is_generated:
self.generate_role()
return self._worker_endpoints
def get_pserver_endpoints(self):
"""
get endpoint of all pservers
"""
if not self._role_is_generated:
self.generate_role()
return self._server_endpoints
def is_worker(self):
"""
whether current process is worker
"""
if not self._role_is_generated:
self.generate_role()
return self._role == Role.WORKER
def is_server(self):
"""
whether current process is server
"""
if not self._role_is_generated:
self.generate_role()
return self._role == Role.SERVER
def is_first_worker(self):
"""
whether current process is worker of rank 0
"""
if not self._role_is_generated:
self.generate_role()
return self._role == Role.WORKER and self._current_id == 0
def worker_index(self):
"""
get index of current worker
"""
if not self._role_is_generated:
self.generate_role()
return self._current_id
def server_index(self):
"""
get index of current server
"""
if not self._role_is_generated:
self.generate_role()
return self._current_id
def worker_num(self):
"""
retrun the current number of worker
"""
if not self._role_is_generated:
self.generate_role()
return self._worker_num()
def server_num(self):
"""
return the current number of server
"""
if not self._role_is_generated:
self.generate_role()
return self._server_num()
def _barrier_worker(self):
"""
barrier all workers in current distributed job
"""
if not self._role_is_generated:
self.generate_role()
if self.is_worker():
self._node_type_comm.barrier()
def _barrier_all(self):
"""
barrier all workers and servers in current distributed job
"""
if not self._role_is_generated:
self.generate_role()
self._all_comm.barrier()
def _barrier_server(self):
"""
barrier all servers in current distributed job
"""
if not self._role_is_generated:
self.generate_role()
if self.is_server():
self._node_type_comm.barrier()
def _worker_num(self):
"""
return the current number of worker
"""
if not self._role_is_generated:
self.generate_role()
return self._trainers_num
def _server_num(self):
"""
return the current number of server
"""
if not self._role_is_generated:
self.generate_role()
return len(self._server_endpoints)
def _finalize(self):
"""Default do nothing."""
pass
def _all_reduce(self, input, output, mode="sum"):
"""
all reduce between all workers
Args:
input(list|numpy.array): array of one dim
output(list|numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
if not self._role_is_generated:
self.generate_role()
input_list = [i for i in input]
ans = self._node_type_comm.all_reduce(input_list, mode)
for i in range(len(ans)):
output[i] = ans[i]
def _all_gather(self, obj):
"""
gather between all workers and pservers
"""
if not self._role_is_generated:
self.generate_role()
self._barrier_all()
return self._all_comm.all_gather(obj)
def _worker_gather(self, obj):
"""
gather between all workers
"""
if not self._role_is_generated:
self.generate_role()
if not self.is_worker():
return None
self._barrier_worker()
return self._node_type_comm.all_gather(obj)
def _get_rank(self):
"""
get current rank in all workers and pservers
"""
if not self._role_is_generated:
self.generate_role()
return self._rank
def _get_size(self):
"""
get total num of all workers and pservers
"""
if not self._role_is_generated:
self.generate_role()
return self._size
def __get_default_iface(self):
"""
get default physical interface
"""
default1 = self.__get_default_iface_from_gateway()
default2 = self.__get_default_iface_from_interfaces()
return default2 if default1 == "lo" else default1
def __get_default_iface_from_gateway(self):
"""
get default physical interface
"""
import netifaces
gateways = netifaces.gateways()
if gateways.get(netifaces.AF_INET) != None:
gateway = gateways[netifaces.AF_INET]
if len(gateway) > 0 and len(gateway[0]) > 1:
return gateway[0][1]
return "lo"
def __get_default_iface_from_interfaces(self):
"""
get default physical interface
"""
import netifaces
for intf_name in netifaces.interfaces():
addresses = netifaces.ifaddresses(intf_name)
if netifaces.AF_INET in addresses:
ipv4_addresses = addresses[netifaces.AF_INET]
for ipv4_address in ipv4_addresses:
if 'broadcast' in ipv4_address:
return intf_name
return "lo"
class UserDefinedRoleMaker(RoleMakerBase):
"""
UserDefinedRoleMaker is designed for worker and server assignment
under manual. Typically, a worker and a server node will be appointed
on each physical node, It can be assign by user.
"""
def __init__(self,
current_id=0,
role=Role.WORKER,
worker_num=0,
server_endpoints=None):
super(UserDefinedRoleMaker, self).__init__()
if not isinstance(server_endpoints, list):
raise TypeError("server_endpoints must be as string list")
elif len(server_endpoints) <= 0:
raise ValueError(
"the length of server_endpoints list must be greater than 0")
elif len(server_endpoints) != len(set(server_endpoints)):
raise ValueError("server_endpoints can't have duplicate elements")
else:
for server_endpoint in server_endpoints:
if not isinstance(server_endpoint, str):
raise TypeError(
"every element in server_endpoints list must be as string"
)
self._server_endpoints = server_endpoints
if role != Role.WORKER and role != Role.SERVER:
raise TypeError("role must be as Role")
else:
self._role = role
if not isinstance(current_id, int):
raise TypeError("current_id must be as int")
else:
if current_id < 0:
raise ValueError(
"current_id must be greater than or equal to 0")
elif self._role == Role.SERVER and current_id >= len(
server_endpoints):
raise ValueError(
"if role is Role.SERVER, current_id must be less than or equal to len(server_endpoints) - 1"
)
self._current_id = current_id
if not isinstance(worker_num, int):
raise TypeError("worker_num must be as int")
else:
if worker_num <= 0:
raise ValueError("worker_num must be greater than 0")
self._worker_num = worker_num
def generate_role(self):
self._role_is_generated = True
def is_worker(self):
return self._role == Role.WORKER
def is_server(self):
return self._role == Role.SERVER
def is_first_worker(self):
return self._role == Role.WORKER and self._current_id == 0
def worker_index(self):
return self._current_id
def server_index(self):
return self._current_id
def worker_num(self):
return self._worker_num
class UserDefinedCollectiveRoleMaker(RoleMakerBase):
"""
UserDefinedCollectiveRoleMaker is designed for worker assignment
under manual for collective mode.
"""
def __init__(self, current_id=0, worker_endpoints=None):
super(UserDefinedCollectiveRoleMaker, self).__init__()
if not isinstance(worker_endpoints, list):
raise TypeError("worker_endpoints must be as string list")
elif len(worker_endpoints) <= 0:
raise ValueError(
"the length of worker_endpoints list must be greater than 0")
elif len(worker_endpoints) != len(set(worker_endpoints)):
raise ValueError("worker_endpoints can't have duplicate elements")
else:
for worker_endpoint in worker_endpoints:
if not isinstance(worker_endpoint, str):
raise TypeError(
"every element in worker_endpoints list must be as string"
)
self._worker_endpoints = worker_endpoints
if not isinstance(current_id, int):
raise TypeError("current_id must be as int")
else:
if current_id < 0:
raise ValueError(
"current_id must be greater than or equal to 0")
elif current_id >= len(worker_endpoints):
raise ValueError(
"current_id must be less than or equal to len(worker_endpoints) - 1"
)
self._current_id = current_id
self._worker_num = len(self._worker_endpoints)
def generate_role(self):
self._role_is_generated = True
def is_worker(self):
return True
def is_first_worker(self):
return self._current_id == 0
def worker_index(self):
return self._current_id
def worker_num(self):
return self._worker_num
| 2.421875 | 2 |
src/testplates/impl/validators/type.py | kprzybyla/testplates | 0 | 12788992 | <gh_stars>0
__all__ = ("TypeValidator",)
import testplates
from typing import (
Any,
)
from resultful import (
success,
failure,
Result,
)
from testplates.impl.exceptions import TestplatesError, InvalidTypeError
from testplates.impl.utils import (
format_like_tuple,
)
class TypeValidator:
__slots__ = ("allowed_types",)
def __init__(
self,
*allowed_types: type,
) -> None:
self.allowed_types = allowed_types
def __repr__(self) -> str:
allowed_types = format_like_tuple(self.allowed_types)
return f"{testplates.__name__}.type_validator({allowed_types})"
def __call__(self, data: Any, /) -> Result[None, TestplatesError]:
allowed_types = self.allowed_types
if not isinstance(data, allowed_types):
return failure(InvalidTypeError(data, allowed_types))
return success(None)
| 2.484375 | 2 |
extension_management/01_ManageExtensions.py | IBM/api-samples | 172 | 12788993 | <reponame>IBM/api-samples<filename>extension_management/01_ManageExtensions.py<gh_stars>100-1000
#!/usr/bin/env python3
# In this sample you will see how to manage extensions using the REST API.
# The sample contains uploading extension, installing extension, checking
# installing task and delete extension.
import json
import os
import sys
import time
import importlib
sys.path.append(os.path.realpath('../modules'))
client_module = importlib.import_module('RestApiClient')
SampleUtilities = importlib.import_module('SampleUtilities')
def upload_extension():
# Create our client
client = client_module.RestApiClient(version='6.0')
# Add Content-Type to request header
request_header = {}
request_header['Content-Type'] = 'application/zip'
# setup file for posting
cwd = os.path.dirname(os.path.realpath(__file__))
app_zip_file_path = os.path.join(cwd, 'ExtensionPackageTest.zip')
app_zip_file = open(app_zip_file_path, 'rb')
data = app_zip_file.read()
response = client.call_api('config/extension_management/extensions',
'POST', headers=request_header, data=data)
# If the response code is 201, that means the extension package has been
# successfully uploaded and the extension id will be returned.
# Otherwise -1 will be returned and the full response body is provided with
# error message inside.
if (response.code != 201):
print('Failed to upload the extension package.')
SampleUtilities.pretty_print_response(response)
return -1
else:
# Extract the extension id from the response body.
response_body = json.loads(response.read().decode('utf-8'))
extension_id = response_body['id']
print('The extension has been uploaded with id = ' +
str(extension_id))
return extension_id
def install_extension(extension_id):
# Create our client
client = client_module.RestApiClient(version='6.0')
# query parameters
# action_type: The desired action to take on
# the Extension (INSTALL or PREVIEW)
# overwrite: If true, any existing items on the importing system will be
# overwritten if the extension contains the same items.
# If false, existing items will be preserved,
# and the corresponding items in the extension will be skipped.
params = {'action_type': 'INSTALL',
'overwrite': 'true'}
# construct api url with path parameter.
url = 'config/extension_management/extensions/' + str(extension_id)
response = client.call_api(url, 'POST', params=params)
# Installing extension process is asynchronous. If 202 is returned,
# that means the installing task is started and the returned status id
# is used for tracking the asynchronous task status.
if (response.code != 202):
print("Failed to start installing task.")
SampleUtilities.pretty_print_response(response)
return -1
else:
response_body = json.loads(response.read().decode('utf-8'))
status_id = response_body['status_id']
print('The extension installing task has been started.')
return status_id
def check_install_status(status_id):
# Create our client
client = client_module.RestApiClient(version='6.0')
# construct api url with path parameter.
url = 'config/extension_management/extensions_task_status/'+str(status_id)
response = client.call_api(url, 'GET')
# if there is no error, the status of installing task will be returned.
if (response.code != 200):
print("Failed to check installing task status.")
SampleUtilities.pretty_print_response(response)
status = 'FAILED'
else:
response_body = json.loads(response.read().decode('utf-8'))
status = response_body['status']
return status
def delete_installed_extension(extension_id):
# Create our client
client = client_module.RestApiClient(version='6.0')
# construct api url with path parameter.
url = 'config/extension_management/extensions/' + str(extension_id)
response = client.call_api(url, 'DELETE')
if (response.code == 202):
print('The extension has been deleted.')
else:
print('Failed to delete the extension.')
def main():
# upload the extension package
extension_id = upload_extension()
if (extension_id != -1):
# if extension package uploaded successfully, start installing
# extension task
status_id = install_extension(extension_id)
if (status_id != -1):
# if extension installing task start wit no error, keep checking
# the status every 5s until the it is completed or has errors or
# time out.
status = 'PROCESSING'
count = 60
while ((status == 'PROCESSING' or status == "QUEUED") and
count > 0):
status = check_install_status(status_id)
print('Installing status: ' + status)
count = count - 1
if ((status == 'PROCESSING' or status == "QUEUED") and
count == 0):
print('Installing process timed out.')
sys.exit(1)
time.sleep(5)
if (status == 'COMPLETED'):
# delete the extension once it complete installed. If you want
# to keep the extension, please comment out the line below.
delete_installed_extension(extension_id)
else:
sys.exit(1)
else:
sys.exit(1)
if __name__ == "__main__":
main()
| 2.484375 | 2 |
torch_optimizer_study.py | xzgz/vehicle-reid | 3 | 12788994 | <gh_stars>1-10
from __future__ import print_function
from __future__ import division
import os
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import lr_scheduler
from model import init_model
from utils.torchtools import count_num_param
from optimizers import init_optim
gpu_devices = '0'
use_cpu = False
arch = 'resnet50'
loss_type = 'xent'
lr = 2e-2
gamma = 0.1
weight_decay = 2e-4
start_epoch = 0
max_epoch = 12
stepsize = [8, 10]
optim = 'sgd'
def main():
torch.manual_seed(1)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_devices
use_gpu = torch.cuda.is_available()
if use_cpu: use_gpu = False
if use_gpu:
print("Currently using GPU {}".format(gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(1)
else:
print("Currently using CPU (GPU is highly recommended)")
print("Initializing model: {}".format(arch))
model = init_model(name=arch, num_classes=576, loss_type=loss_type)
print("Model size: {:.3f} M".format(count_num_param(model)))
if use_gpu:
model = nn.DataParallel(model).cuda()
optimizer = init_optim(optim, model.parameters(), lr, weight_decay)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=stepsize, gamma=gamma)
model.train()
cnt = 0
for epoch in range(start_epoch, max_epoch, 2):
for step in range(2):
x = torch.randn(1, 3, 200, 200)
y = torch.randint(low=0, high=576, size=(1,), dtype=torch.int64)
if use_gpu:
x = x.cuda()
y = y.cuda()
scheduler.step()
cnt += 1
print(cnt, scheduler.get_lr())
output = model(x)
# loss = nn.CrossEntropyLoss()(output[0], y)
loss = torch.tensor(0.0, dtype=torch.float32).cuda()
# loss = torch.tensor(0.0, dtype=torch.float32)
loss.requires_grad = True
optimizer.zero_grad()
loss.backward()
print(loss)
print(loss._grad)
optimizer.step()
print('Done.')
main()
| 2.515625 | 3 |
template/osi/__base__.py | clayne/syringe-1 | 25 | 12788995 | <filename>template/osi/__base__.py
from ptypes import ptype
class stackable:
def nextlayer(self):
'''returns a tuple of (type,remaining)'''
raise NotImplementedError
class terminal(stackable):
def nextlayer(self):
return None, None
| 2.265625 | 2 |
cash.py | PiggehJB/Cash_PY_Week6_CS50x | 0 | 12788996 | import colorama
from colorama import Fore
while True:
try:
cash = float(input("Enter your cash: "))
while True:
if cash > 0:
print(f"Alright! Your balance is: {cash}")
break
else:
cash = float(input(f"{Fore.RED}Uh oh! Make sure your number is above 0: {Fore.CYAN}"))
break
except ValueError:
print("Uh oh! That wasn't a valid number, please try again.")
amount_of_coins = 0
while True:
if cash == 0:
print(f"{Fore.GREEN}\n\nDone calculating change...\nAmount of Coins: {amount_of_coins}\nRemaining change: ${cash}")
break
if cash >= 0.25:
cash -=0.25
cash = round(cash, 2)
amount_of_coins +=1
print(f"{Fore.CYAN}Diving by quarters\nAmtOfCoins: {amount_of_coins}\nRemaining change {cash}")
elif cash >= 0.10 and cash < 0.25:
cash -=0.1
"""
TLDR; computers represent floating point numbers as binary, and it turns out that storing
a precise decimal fraction as binary is not possible
"""
cash = round(cash, 2)
amount_of_coins +=1
print(f"{Fore.BLUE}\nDiving by dimes\nAmtOfCoins: {amount_of_coins}\nRemaing change: {cash}")
elif cash >=0.05 and cash < 0.10:
cash -=0.05
cash = round(cash, 2)
amount_of_coins +=1
print(f"{Fore.YELLOW}\nDiving by nickels\nAmtOfCoins: {amount_of_coins}\nRemaing change: {cash}")
elif cash >=0.01 and cash < 0.05:
cash -=0.01
cash = round(cash, 2)
print(f"{Fore.YELLOW}\nDiving by pennies\nAmtOfCoins: {amount_of_coins}\nRemaing change: {cash}")
amount_of_coins +=1
print(f"{Fore.RED}\n\nRounded the numbers to 2 decimal places\n") | 3.9375 | 4 |
srfnef/corrections/__init__.py | twj2417/srf | 0 | 12788997 | # encoding: utf-8
'''
@author: <NAME>
@contact: <EMAIL>
@software: nef
@file: __init__.py
@date: 5/8/2019
@desc:
'''
# from . import psf
__all__ = (
'AttenuationCorrect', 'PsfFit', 'PsfCorrect', 'FittedY', 'FittedZ', 'FittedX', 'PointSource')
from .attenuation.attenuation_correct import AttenuationCorrect
from .psf import PsfFit, PsfCorrect, FittedY, FittedZ, FittedX, PointSource
# from .normalization import NormalizationCorrect, PsfToNormalizationX, PsfToNormalizationZ
from .scattering import ScatterCorrect
#from .new_scatter.scatter import ScatterCorrect
| 1 | 1 |
bdsim/bdedit/interface_graphics_view.py | petercorke/bdsim | 64 | 12788998 | <filename>bdsim/bdedit/interface_graphics_view.py<gh_stars>10-100
# PyQt5 imports
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QGraphicsView
# BdEdit imports
from bdsim.bdedit.block import Block
from bdsim.bdedit.block_graphics_wire import GraphicsWire
from bdsim.bdedit.block_graphics_socket import GraphicsSocket
from bdsim.bdedit.floating_label_graphics import GraphicsLabel
from bdsim.bdedit.block_graphics_block import GraphicsBlock, GraphicsConnectorBlock
from bdsim.bdedit.block_wire import Wire, WIRE_TYPE_STEP, WIRE_TYPE_DIRECT, WIRE_TYPE_BEZIER
# =============================================================================
#
# Defining and setting global variables
#
# =============================================================================
# Wire mode variables - used for determining what type of wire to draw
MODE_NONE = 1
MODE_WIRE_DRAG = 2
EDGE_DRAG_LIM = 10
# Variable for enabling/disabling debug comments
DEBUG = False
# =============================================================================
#
# Defining the GraphicsView Class, which handles most auto detected
# press/scroll/click/scroll/key events, and assigns logic to those actions as
# needed.
#
# =============================================================================
class GraphicsView(QGraphicsView):
"""
The ``GraphicsView`` Class extends the ``QGraphicsView`` Class from PyQt5,
and handles most of the user interactions with the ``Interface``, through
press/scroll/click/scroll/key events. It also contains the logic for what Wire
should be drawn and what Sockets it connects to. Here mouse click events
are used to drag the wires from a start to a end socket, when click is
dragged from a socket, the mode == MODE_WIRE_DRAG will be set to True and a
Wire will follow the mouse until a end socket is set or mode == MODE_WIRE_DRAG
is False and the Wire will be deleted.
"""
# # Todo add doc for this, signal for monitoring updates to interface. UNNEEDED CODE?
# scenePosChanged = pyqtSignal(int, int)
# -----------------------------------------------------------------------------
def __init__(self, grScene, parent=None):
"""
This method creates an ``QGraphicsView`` instance and associates it to this
``GraphicsView`` instance.
:param grScene: the ``GraphicsScene`` to which this ``GraphicsView`` belongs to
:type grScene: GraphicsScene, required
:param parent: the parent widget this GraphicsView belongs to (should be None)
:type parent: None, optional
"""
super().__init__(parent)
# The GraphicsScene this GraphicsView belongs to, is assigned to an internal variable
self.grScene = grScene
# The GraphicsScene is initialized with some settings to make things draw smoother
self.initUI()
# The GraphicsScene this GraphicsView belongs to is connected
self.setScene(self.grScene)
# The drawing mode of the wire is initially set to MODE_NONE
self.mode = MODE_NONE
# Definitions of zoom related variables
# The there are 10 zoom levels, with level 7 being the default level
# Levels 8-10 zoom in, while levels 0-6 zoom out
self._default_zoom_level = 7
self.zoom = self._default_zoom_level
self.zoomStep = 1
self.zoomRange = [0, 10]
# -----------------------------------------------------------------------------
def initUI(self):
"""
This method initializes the GraphicsScene with additional settings
to make things draw smoother
"""
self.setRenderHints(QPainter.Antialiasing | QPainter.HighQualityAntialiasing | QPainter.TextAntialiasing | QPainter.SmoothPixmapTransform)
self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.setDragMode(QGraphicsView.RubberBandDrag)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
# -----------------------------------------------------------------------------
def closeParamWindows(self):
"""
This method will close the parameter window used for changing the
user-editable block variables.
"""
# If there are Blocks within the Scene
if len(self.grScene.scene.blocks) != 0:
# Iterate through all the Blocks
for block in self.grScene.scene.blocks:
# And if that Block has a ParamWindow, close it
if block.parameterWindow is not None:
block.parameterWindow.setVisible(False)
block._param_visible = False
# -----------------------------------------------------------------------------
def deleteSelected(self):
"""
This method removes the selected Block or Wire from the scene.
"""
# For each selected item within the GraphicsScene
for item in self.grScene.selectedItems():
# If the item is a Wire, remove it
if isinstance(item, GraphicsWire):
item.wire.remove()
# Or if the item is a Block or Connector Block, remove it
elif isinstance(item, GraphicsBlock) or isinstance(item, GraphicsConnectorBlock):
item.block.remove()
# Or if the item is a Floating_Label, remove it
elif isinstance(item, GraphicsLabel):
item.floating_label.remove()
self.grScene.scene.has_been_modified = True
# -----------------------------------------------------------------------------
def flipBlockSockets(self):
"""
This method flips the selected Block so that the input and output
Sockets change sides.
"""
# For each selected item within the GraphicsScene
for item in self.grScene.selectedItems():
# If the item is a Block or Connector Block, flip its sockets
if isinstance(item, GraphicsBlock) or isinstance(item, GraphicsConnectorBlock):
item.block.updateSocketPositions()
item.block.flipped = not (item.block.flipped)
self.grScene.scene.has_been_modified = True
# -----------------------------------------------------------------------------
def dist_click_release(self, event):
"""
This method checks how for the cursor has moved. This is be used when the
Wire is dragged, to check that wire has been dragged away from the start
socket, so that when it is released on a socket we know its not the
start socket.
:param event: a mouse release event that has occurred with this GraphicsView
:type event: QMouseEvent, automatically recognized by the inbuilt function
:return: - True (if mouse has been released more than an defined distance from
the start_socket)
- False (if mouse has been released too close too the start_socket)
:rtype: bool
"""
# Measures that the cursor has moved a reasonable distance
click_release_poss = self.mapToScene(event.pos())
mouseMoved = click_release_poss - self.last_click_poss
edgeThreshSqr = EDGE_DRAG_LIM * EDGE_DRAG_LIM
return (mouseMoved.x() * mouseMoved.x() + mouseMoved.y() * mouseMoved.y()) > edgeThreshSqr
# -----------------------------------------------------------------------------
def getItemAtClick(self, event):
"""
This method returns the object at the click location. It is used when
checking what item within the GraphicsView has been clicked when starting
to drag a wire.
:param event: a mouse click event that has occurred with this GraphicsView
:type event: QMouseEvent, automatically recognized by the inbuilt function
:return: the item that has been clicked on (can be ``GraphicsBlock``,
``GraphicsSocket``, ``GraphicsWireStep``, ``NoneType``), required
:rtype: GraphicsBlock, GraphicsSocket, GraphicsWireStep or NoneType
"""
pos = event.pos()
obj = self.itemAt(pos)
return obj
# -----------------------------------------------------------------------------
def intersectionTest(self):
"""
This method initiates the checking of all Wires within the Scene for
intersection points where they overlap.
"""
# If there are wires within the Scene
if self.grScene.scene.wires:
# Call the first wire in the Scene to check the intersections
# Calling the first wire will still check intersection points
# of all wires, however since that code is located within the
# Wire class, this is how it's accessed.
self.grScene.scene.wires[0].checkIntersections()
# -----------------------------------------------------------------------------
def edgeDragStart(self, item):
"""
This method starts drawing a Wire between two Blocks. It will
construct a new ``Wire`` and set the start socket to the socket that
has been clicked on, and the end socket to None. The end socket will
be set when either another socket is clicked, or the mouse button is
released over another socket. If neither happen, the wire will be deleted.
:param item: the socket that has been clicked on
:type item: GraphicsSocket, required
"""
# If in DEBUG mode, the follow code will print the start and end
# sockets that have been recognized, as being relevant to this wire.
if DEBUG: print("socket is input socket:", item.socket.isInputSocket())
if DEBUG: print("socket is output socket:", item.socket.isOutputSocket())
# The start socket is extracted from the provided item
self.drag_start_socket = item.socket
# A step wire is made from the start socket, to nothing
self.drag_wire = Wire(self.grScene.scene, item.socket, None, WIRE_TYPE_STEP)
# If in DEBUG mode, the following code will print the wire that has
# just been created
if DEBUG: print('View::wireDragStart ~ dragwire:', self.drag_wire)
# -----------------------------------------------------------------------------
def edgeDragEnd(self, item):
"""
This method is used for setting the end socket of the Wire. The place
where the wire has been released will be checked, and if it is a
``GraphicSocket`` and is not the start socket then a Wire is completed.
Next some check will be made to see that inputs are not connected to inputs
and outputs are not connected to outputs. Additionally, Block Sockets will
be checked to prevent multiple Wires from connecting to a single input socket.
No such restriction is placed on the output sockets. This same logic is
applied to Connector Blocks.
If these conditions are met, the wire that was dragged will be deleted, and
a new Wire will be created with the start socket from the block the wire
drag started at, and the end socket being from the socket of the block the
Wire was dragged to.
If the above-mentioned conditions not met, the wire is simply removed.
:param item: should be the socket that has been clicked on (however could
be one of the following: ``GraphicsBlock``, ``GraphicsSocket``,
``GraphicsWireStep`` or ``NoneType``)
:type item: GraphicsSocket, required
:return: False (if the the Wire has been successfully drawn between Blocks)
:rtype: bool
"""
# The dragging mode of the wire is initially set to being None
self.mode = MODE_NONE
if DEBUG: print('View::edgeDragEnd ~ End dragging edge')
# The previous wire (drag_wire) is removed
self.drag_wire.remove()
self.drag_wire = None
# If the clicked item is a GraphicsSocket
if type(item) is GraphicsSocket:
# And the clicked socket is not the same socket the original wire started from
if item.socket != self.drag_start_socket:
# If we released dragging on a socket (other then the beginning socket)
# We want to keep all the wires coming from target socket
if not item.socket.is_multi_wire:
item.socket.removeAllEdges()
# We want to keep all the wires coming from start socket
if not self.drag_start_socket.is_multi_wire:
self.drag_start_socket.removeAllWires()
# If the block is a socket block check the start socket of the
# wire that ends in the socket block before connecting an end block,
# so that 2 outputs or 2 inputs are not connected through the
# Socket block
if self.drag_start_socket.socket_type == 3:
if len(self.drag_start_socket.wires) > 0:
test = self.drag_start_socket.wires[0].start_socket.socket_type
if self.drag_start_socket.wires[0].start_socket.socket_type != item.socket.socket_type:
if item.socket.socket_type == 1:
if len(item.socket.wires) == 0:
new_wire = Wire(self.grScene.scene, self.drag_start_socket, item.socket, WIRE_TYPE_STEP)
else:
new_wire = Wire(self.grScene.scene, self.drag_start_socket, item.socket, WIRE_TYPE_STEP)
# Socket block can have multi outputs only and not multi inputs
elif item.socket.socket_type == 3:
if len(item.socket.wires) > 0:
i = len(self.drag_start_socket.wires)
if i >= 1:
self.drag_start_socket.wires[i-1].remove()
else:
if item.socket.socket_type == 1:
if len(item.socket.wires) == 0:
new_wire = Wire(self.grScene.scene, self.drag_start_socket, item.socket, WIRE_TYPE_STEP)
else:
new_wire = Wire(self.grScene.scene, self.drag_start_socket, item.socket, WIRE_TYPE_STEP)
# Cannot connect a input to a input or a output to a output
# Input sockets can not have multiple wires
# Wire can only be drawn if start and end sockets are different (input to output, or output to input)
elif self.drag_start_socket.socket_type != item.socket.socket_type:
# Additional logic to ensure the input socket (either at start or end of the wire) only has a single
# wire coming into it
if item.socket.socket_type == 1:
if len(item.socket.wires) == 0:
new_wire = Wire(self.grScene.scene, self.drag_start_socket, item.socket, WIRE_TYPE_STEP)
elif self.drag_start_socket.socket_type == 1:
if len(self.drag_start_socket.wires) == 0:
new_wire = Wire(self.grScene.scene, self.drag_start_socket, item.socket, WIRE_TYPE_STEP)
# Otherwise draw a wire between the two sockets
else:
new_wire = Wire(self.grScene.scene, self.drag_start_socket, item.socket, WIRE_TYPE_STEP)
self.grScene.scene.has_been_modified = True
if DEBUG: print("created wire")
if DEBUG: print('View::edgeDragEnd ~ everything done.')
return True
return False
# -----------------------------------------------------------------------------
def keyPressEvent(self, event):
"""
This is an inbuilt method of QGraphicsView, that is overwritten by
``GraphicsView`` to detect, and assign actions to the following key presses.
- DEL or BACKSPACE: removes selected item from the Scene
- F: flips the sockets on a Block or Connector Block
- I: toggles intersection detection amongst wires (Off by default)
- CTRL + S: previously connected to saving the Scene
- CTRL + L: previously connected to loading a Scene file
The saving and loading of a file using keys has since been disabled, as
it used an old method for saving/loading JSON files which has since been
overwritten in the Interface Class. However these key checks are still
connected if future development should take place.
:param event: key(s) press(es) that have been detected
:type event: QKeyPressEvent, automatically recognized by the inbuilt function
"""
# if event.key() == Qt.Key_Delete or event.key() == Qt.Key_Backspace:
# pass
# # self.deleteSelected()
# # self.intersectionTest()
# elif event.key() == Qt.Key_F:
# self.intersectionTest()
# self.flipBlockSockets()
# elif event.key() == Qt.Key_I:
# self.grScene.enable_intersections = not self.grScene.enable_intersections
# elif event.key() == Qt.Key_M:
# [print(label.content.text_edit.toHtml()) for label in self.grScene.scene.floating_labels]
# elif event.key() == Qt.Key_S and event.modifiers() & Qt.ControlModifier:
# pass
# # self.grScene.scene.saveToFile("graph_testing.json.txt")
# elif event.key() == Qt.Key_L and event.modifiers() & Qt.ControlModifier:
# pass
# # self.grScene.scene.loadFromFile("graph_testing.json.txt")
# else:
super().keyPressEvent(event)
# -----------------------------------------------------------------------------
def mousePressEvent(self, event):
"""
This is an inbuilt method of QGraphicsView, that is overwritten by
``GraphicsView`` to detect, and direct the Left, Middle and Right mouse
button presses to methods that handle their associated logic.
Additionally, when the Left mouse button is pressed anywhere in the
``GraphicsView``, any currently ``ParamWindow`` that relates to an active
``Block`` within the ``Scene`` will be closed.
:param event: a mouse press event (Left, Middle or Right)
:type event: QMousePressEvent, automatically recognized by the inbuilt function
"""
self.intersectionTest()
if event.button() == Qt.MiddleButton:
self.middleMouseButtonPress(event)
elif event.button() == Qt.LeftButton:
self.leftMouseButtonPress(event)
self.closeParamWindows()
elif event.button() == Qt.RightButton:
self.closeParamWindows()
self.rightMouseButtonPress(event)
else:
super().mousePressEvent(event)
# -----------------------------------------------------------------------------
def mouseReleaseEvent(self, event):
"""
This is an inbuilt method of QGraphicsView, that is overwritten by
``GraphicsView`` to detect, and direct the Left, Middle and Right mouse
button releases to methods that handle their associated logic.
:param event: a mouse release event (Left, Middle or Right)
:type event: QMouseReleaseEvent, required
"""
if event.button() == Qt.MiddleButton:
self.middleMouseButtonRelease(event)
elif event.button() == Qt.LeftButton:
self.leftMouseButtonRelease(event)
elif event.button() == Qt.RightButton:
self.rightMouseButtonRelease(event)
else:
super().mouseReleaseEvent(event)
# -----------------------------------------------------------------------------
def leftMouseButtonPress(self, event):
"""
This method handles the logic associate with the Left mouse button press.
It will always run the getItemAtClick method to return the item that
has been clicked on.
- If a GraphicsSocket is pressed on, then a draggable Wire will be started.
- If a GraphicWire is pressed, then the active draggable Wire will be ended
(when the wire is draggable, clicking off at a Socket, will register the
clicked item as a GraphicsWire).
Alternatively, the following logic is applied for selecting items.
- If an empty space within the GraphicsView is pressed, a draggable net
will appear, within which all items will be selected.
- If left clicking while holding the SHIFT or CTRL key, this will incrementally
select an item from within the GraphicsView. The items that are selectable
are ``GraphicsBlock``, ``GraphicsWire`` or ``GraphicsSocketBlock`` (which is
the Connector Block).
Otherwise nothing is done with the left mouse press.
:param event: a Left mouse button press
:type event: QMousePressEvent, required
:return: None to exit the method
:rtype: NoneType
"""
# Item that is clicked on is grabbed
item = self.getItemAtClick(event)
self.last_click_poss = self.mapToScene(event.pos())
if isinstance(item, GraphicsBlock) or isinstance(item, GraphicsWire) or isinstance(item, GraphicsConnectorBlock) or item is None:
if self.grScene.scene.floating_labels:
for label in self.grScene.scene.floating_labels:
cursor = label.content.text_edit.textCursor()
cursor.clearSelection()
label.content.text_edit.setTextCursor(cursor)
label.grContent.setLabelUnfocus()
if event.modifiers() & Qt.ShiftModifier:
event.ignore()
fakeEvent = QMouseEvent(QEvent.MouseButtonPress, event.localPos(), event.screenPos(),
Qt.LeftButton, event.buttons() | Qt.LeftButton,
event.modifiers() | Qt.ControlModifier)
super().mousePressEvent(fakeEvent)
return
if type(item) is GraphicsSocket:
if self.mode == MODE_NONE:
self.mode = MODE_WIRE_DRAG
self.edgeDragStart(item)
return
if self.mode == MODE_WIRE_DRAG:
res = self.edgeDragEnd(item)
if res:
return
if issubclass(item.__class__, GraphicsWire):
if self.mode == MODE_WIRE_DRAG:
res = self.edgeDragEnd(item)
if res:
return
super().mousePressEvent(event)
# -----------------------------------------------------------------------------
def leftMouseButtonRelease(self, event):
"""
This method handles the logic associate with the Left mouse button release.
It will always run the getItemAtClick method to return the item that
the mouse has been released from.
- If a Wire was the item being dragged, it will check how far the Wire has
moved, then an attempt to complete the Wire onto a Socket will be made.
If no Socket is found, the Wire will be ended.
Alternatively, the following logic is applied for selecting items.
- If an empty space within the GraphicsView is released, if a draggable net
was active, all items within that net will be selected.
- If left clicking while holding the SHIFT or CTRL key, this will incrementally
select an item from within the GraphicsView. The items that are selectable
are ``GraphicsBlock``, ``GraphicsWire`` or ``GraphicsSocketBlock`` (which is
the Connector Block).
:param event: a Left mouse button release
:type event: QMouseReleaseEvent, required
:return: None to exit the method
:rtype: NoneType
"""
# Get item which we clicked
item = self.getItemAtClick(event)
if isinstance(item, GraphicsBlock) or isinstance(item, GraphicsWire) or isinstance(item, GraphicsConnectorBlock) or item is None:
if event.modifiers() & Qt.ShiftModifier:
event.ignore()
fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(),
Qt.LeftButton, Qt.NoButton,
event.modifiers() | Qt.ControlModifier)
super().mouseReleaseEvent(fakeEvent)
return
if self.mode == MODE_WIRE_DRAG:
if self.dist_click_release(event):
res = self.edgeDragEnd(item)
if res:
return
super().mouseReleaseEvent(event)
# -----------------------------------------------------------------------------
def rightMouseButtonPress(self, event):
"""
This method handles the logic associate with the Right mouse button press.
Currently no logic is linked to a right mouse press.
:param event: the detected right mouse press event
:type event: QMousePressEvent, required
:return: the mouse press event is returned
:rtype: QMousePressEvent
"""
return super().mousePressEvent(event)
# -----------------------------------------------------------------------------
def rightMouseButtonRelease(self, event):
"""
This method handles the logic associate with the Right mouse button release.
Currently no logic is linked to a right mouse release.
:param event: the detected right mouse release event
:type event: QMousePressEvent, required
:return: the mouse release event is returned
:rtype: QMouseReleaseEvent
"""
return super().mouseReleaseEvent(event)
# -----------------------------------------------------------------------------
def middleMouseButtonPress(self, event):
"""
This method handles the logic associate with the Middle mouse button press
(perhaps more intuitively understood as pressing the scroll wheel).
When the scroll wheel is pressed, the mouse cursor will appear as a hand
that pinches the GraphicsView, allowing the canvas to be dragged around.
:param event: the detected middle mouse press event
:type event: QMousePressEvent, required
"""
releaseEvent = QMouseEvent(QEvent.MouseButtonRelease, event.localPos(), event.screenPos(),
Qt.LeftButton, Qt.NoButton, event.modifiers())
super().mouseReleaseEvent(releaseEvent)
self.setDragMode(QGraphicsView.ScrollHandDrag)
fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(),
Qt.LeftButton, event.buttons() | Qt.LeftButton, event.modifiers())
super().mousePressEvent(fakeEvent)
# -----------------------------------------------------------------------------
def middleMouseButtonRelease(self, event):
"""
This method handles the logic associate with the Middle mouse button release
(perhaps more intuitively understood as releasing the scroll wheel).
When the scroll wheel is releasing, the mouse cursor will change back from
appearing as a hand to the default mouse cursor (pointer arrow on Windows).
:param event: the detected middle mouse release event
:type event: QMouseReleaseEvent, required
"""
fakeEvent = QMouseEvent(event.type(), event.localPos(), event.screenPos(),
Qt.LeftButton, event.buttons() & ~Qt.LeftButton, event.modifiers())
super().mouseReleaseEvent(fakeEvent)
self.setDragMode(QGraphicsView.NoDrag)
# -----------------------------------------------------------------------------
def wheelEvent(self, event):
"""
This is an inbuilt method of QGraphicsView, that is overwritten by
``GraphicsView`` to assign logic to detected scroll wheel movement.
- As the scroll wheel is moved up, this will make the zoom in on the work
area of the ``GraphicsScene``.
- As the scroll wheel is moved down, this will make the zoom out of the work
area of the ``GraphicsScene``.
:param event: the detected scroll wheel movement
:type event: QWheelEvent, automatically recognized by the inbuilt function
"""
# If scroll wheel vertical motion is detected to being upward
if event.angleDelta().y() > 0:
# Set the zoom factor to 1.25, and incrementally increase the zoom step
zoomFactor = 1.25
self.zoom += self.zoomStep
# Else the scroll wheel is moved downwards
else:
# Set the zoom factor to 1/1.25, and incrementally decrease the zoom step
zoomFactor = 0.8
self.zoom -= self.zoomStep
# If the current zoom is within the allowable zoom levels (0 to 10)
# Scale the Scene (in the x and y) by the above-set zoomFactor
if self.zoomRange[0]-1 <= self.zoom <= self.zoomRange[1]:
self.scale(zoomFactor, zoomFactor)
# Otherwise if the current zoom is below the lowest allowable zoom level (0)
# Force the zoom level to the lowest allowable level
elif self.zoom < self.zoomRange[0]-1:
self.zoom = self.zoomRange[0]-1
# Otherwise if the current zoom is above the highest allowable zoom level (10)
# Force the zoom level to the highest allowable level
elif self.zoom > self.zoomRange[1]:
self.zoom = self.zoomRange[1]
# -----------------------------------------------------------------------------
def mouseMoveEvent(self, event):
"""
This is an inbuilt method of QGraphicsView, that is overwritten by
``GraphicsView`` to assign logic to detected mouse movement.
- If the wire is in dragging mode, the position the wire is drawn to
will be updated to the mouse cursor as it is moved around.
- Additionally, the code to check for intersection amongst wires will
be run, and subsequently, if any are found, they will be automatically
marked within the ``GraphicsScene`` Class.
:param event: the detected mouse movement event
:type event: QMouseMoveEvent, automatically recognized by the inbuilt function
"""
super().mouseMoveEvent(event)
try:
# If the wire is in dragging mode
if self.mode == MODE_WIRE_DRAG:
# Grab the on-screen position of the mouse cursor
pos = self.mapToScene(event.pos())
# Set the point that the wire draws to, as the current x,y position of the mouse cursor
self.drag_wire.grWire.setDestination(pos.x(), pos.y())
# Call for the wire to be redrawn/updated accordingly
self.drag_wire.grWire.update()
except AttributeError:
self.mode = MODE_NONE
# Call for the intersection code to be run
self.intersectionTest()
| 1.726563 | 2 |
cursoemvideo/desafios/Desafio007.py | adinsankofa/python | 0 | 12788999 | '''
Desenvolva um programa que leia as duas notas de um aluno, calcule e mostre a sua média
'''
nota1 = float(input('Digite a 1ª nota: '))
nota2 = float(input('Digite a 2ª nota: '))
media = (nota1 + nota2)/2
print("Média: {:0.2f}".format(media))
| 3.828125 | 4 |
wally/suits/io/fio_job.py | Mirantis/rally-results-processor | 41 | 12789000 | <reponame>Mirantis/rally-results-processor<filename>wally/suits/io/fio_job.py
import copy
from collections import OrderedDict
from typing import Optional, Iterator, Union, Dict, Tuple, Any, cast
from cephlib.units import ssize2b, b2ssize
from ..job import JobConfig, JobParams, Var
def is_fio_opt_true(vl: Union[str, int]) -> bool:
return str(vl).lower() in ['1', 'true', 't', 'yes', 'y']
class FioJobParams(JobParams):
"""Class contains all parameters, which significantly affects fio results.
oper - operation type - read/write/randread/...
sync_mode - direct/sync/async/direct+sync
bsize - block size in KiB
qd - IO queue depth,
thcount - thread count,
write_perc - write perc for mixed(read+write) loads
Like block size or operation type, but not file name or file size.
Can be used as key in dictionary.
"""
sync2long = {'x': "sync direct",
's': "sync",
'd': "direct",
'a': "buffered"}
@property
def sync_mode_long(self) -> str:
return self.sync2long[self['sync_mode']]
@property
def summary(self) -> str:
"""Test short summary, used mostly for file names and short image description"""
res = f"{self['oper_short']}{self['sync_mode']}{self['bsize']}"
if self['qd'] is not None:
res += "_qd" + str(self['qd'])
if self['thcount'] not in (1, None):
res += "th" + str(self['thcount'])
if self['write_perc'] is not None:
res += "wr" + str(self['write_perc'])
return res
@property
def long_summary(self) -> str:
"""Readable long summary for management and deployment engineers"""
res = f"{self['oper']}, {self.sync_mode_long}, block size {b2ssize(self['bsize'] * 1024)}B"
if self['qd'] is not None:
res += ", QD = " + str(self['qd'])
if self['thcount'] not in (1, None):
res += f", threads={self['thcount']}"
if self['write_perc'] is not None:
res += f", fwrite_perc={self['write_perc']}%"
return res
def copy(self, **kwargs: Dict[str, Any]) -> 'FioJobParams':
np = self.params.copy()
np.update(kwargs)
return self.__class__(**np)
@property
def char_tpl(self) -> Tuple[Union[str, int], ...]:
mint = lambda x: -10000000000 if x is None else int(x)
return self['oper'], mint(self['bsize']), self['sync_mode'], \
mint(self['thcount']), mint(self['qd']), mint(self['write_perc'])
class FioJobConfig(JobConfig):
"""Fio job configuration"""
ds2mode = {(True, True): 'x',
(True, False): 's',
(False, True): 'd',
(False, False): 'a'}
op_type2short = {"randread": "rr",
"randwrite": "rw",
"read": "sr",
"write": "sw",
"randrw": "rx"}
def __init__(self, name: str, idx: int) -> None:
JobConfig.__init__(self, idx)
self.name = name
self._sync_mode: Optional[str] = None
self._params: Optional[Dict[str, Any]] = None
# ------------- BASIC PROPERTIES -----------------------------------------------------------------------------------
@property
def write_perc(self) -> Optional[int]:
try:
return int(self.vals["rwmixwrite"]) # type: ignore
except (KeyError, TypeError):
try:
return 100 - int(self.vals["rwmixread"]) # type: ignore
except (KeyError, TypeError):
return None
@property
def qd(self) -> int:
return int(self.vals.get('iodepth', '1')) # type: ignore
@property
def bsize(self) -> int:
bsize = ssize2b(self.vals['blocksize'])
assert bsize % 1024 == 0
return bsize // 1024
@property
def oper(self) -> str:
vl = self.vals['rw']
return vl if ':' not in vl else vl.split(":")[0] # type: ignore
@property
def op_type_short(self) -> str:
return self.op_type2short[self.oper]
@property
def thcount(self) -> int:
return int(self.vals.get('numjobs', 1)) # type: ignore
@property
def sync_mode(self) -> str:
if self._sync_mode is None:
direct = is_fio_opt_true(self.vals.get('direct', '0')) # type: ignore
direct = direct or not is_fio_opt_true(self.vals.get('buffered', '0')) # type: ignore
sync = is_fio_opt_true(self.vals.get('sync', '0')) # type: ignore
self._sync_mode = self.ds2mode[(sync, direct)]
return cast(str, self._sync_mode)
# ----------- COMPLEX PROPERTIES -----------------------------------------------------------------------------------
@property
def params(self) -> JobParams:
if self._params is None:
self._params = dict(oper=self.oper,
oper_short=self.op_type_short,
sync_mode=self.sync_mode,
bsize=self.bsize,
qd=self.qd,
thcount=self.thcount,
write_perc=self.write_perc)
return cast(JobParams, FioJobParams(**cast(Dict[str, Any], self._params)))
# ------------------------------------------------------------------------------------------------------------------
def __eq__(self, o: object) -> bool:
if not isinstance(o, FioJobConfig):
return False
return dict(self.vals) == dict(cast(FioJobConfig, o).vals)
def copy(self) -> 'FioJobConfig':
return copy.deepcopy(self)
def required_vars(self) -> Iterator[Tuple[str, Var]]:
for name, val in self.vals.items():
if isinstance(val, Var):
yield name, val
def is_free(self) -> bool:
return len(list(self.required_vars())) == 0
def __str__(self) -> str:
res = "[{0}]\n".format(self.summary)
for name, val in self.vals.items():
if name.startswith('_') or name == name.upper():
continue
if isinstance(val, Var):
res += "{0}={{{1}}}\n".format(name, val.name)
else:
res += "{0}={1}\n".format(name, val)
return res
def __repr__(self) -> str:
return str(self)
def raw(self) -> Dict[str, Any]:
res = super().raw()
res['vals'] = list(map(list, self.vals.items()))
return res
@classmethod
def fromraw(cls, data: Dict[str, Any]) -> 'FioJobConfig':
data['vals'] = OrderedDict(data['vals'])
data['_sync_mode'] = None
data['_params'] = None
return cast(FioJobConfig, super().fromraw(data))
| 2.171875 | 2 |
utils/SRCNN/SR_CNN.py | roman-vygon/pyFAST | 0 | 12789001 | # Generated with SMOP 0.41
from libsmop import *
# SR_CNN.m
@function
def SR_CNN(iml=None,ratio=None,model=None,*args,**kwargs):
varargin = SR_CNN.varargin
nargin = SR_CNN.nargin
if 2 == ratio:
sr_model=model.x2_model
# SR_CNN.m:5
else:
if 3 == ratio:
sr_model=model.x3_model
# SR_CNN.m:7
else:
if 4 == ratio:
sr_model=model.x4_model
# SR_CNN.m:9
else:
error('Unsupported upsampling ratio: %f',ratio)
iml=single(iml) / 255
# SR_CNN.m:14
imh_bicubic=imresize(iml,ratio,'bicubic')
# SR_CNN.m:15
imh_sr=SRCNN(sr_model,imh_bicubic)
# SR_CNN.m:16
border=copy(ratio)
# SR_CNN.m:19
imh=copy(imh_bicubic)
# SR_CNN.m:20
imh[arange((1 + border),(end() - border)),arange((1 + border),(end() - border))]=imh_sr(arange((1 + border),(end() - border)),arange((1 + border),(end() - border)))
# SR_CNN.m:21
imh=uint8(dot(255,imh))
# SR_CNN.m:24
return imh
if __name__ == '__main__':
pass
| 1.867188 | 2 |
barrage/services.py | briannemsick/barrage | 16 | 12789002 | <reponame>briannemsick/barrage
import os
from typing import List
from tensorflow.python.keras import callbacks
from barrage import logger
DATASET = "dataset"
BEST_CHECKPOINT = "best_checkpoint"
BEST_MODEL = "model_best.ckpt"
RESUME_CHECKPOINTS = "resume_checkpoints"
RESUME_MODEL = "model_epoch_{epoch:04d}.ckpt"
TENSORBOARD = "TensorBoard"
CSV_LOGGER_FILENAME = "training_report.csv"
REQUIRED_SUBDIRS = [DATASET, BEST_CHECKPOINT, RESUME_CHECKPOINTS, TENSORBOARD]
def make_artifact_dir(artifact_dir: str):
"""Make the artifact directory and all required subdirectories.
Args:
artifact_dir: str, path to artifact directory.
Raises:
OSError, artifact_dir already exists.
"""
if os.path.isdir(artifact_dir):
raise OSError(f"artifact_dir: {artifact_dir} already exists")
os.mkdir(artifact_dir)
for subdir in REQUIRED_SUBDIRS:
os.mkdir(os.path.join(artifact_dir, subdir))
def create_all_services(
artifact_dir: str, cfg_services: dict
) -> List[callbacks.Callback]:
"""Create all services (callbacks).
Args:
artifact_dir: str, path to artifact directory.
cfg_services: dict, services subsection of config.
Returns:
list[Callback], all services.
"""
return [
_create_best_checkpoint(artifact_dir, cfg_services),
_create_resume_checkpoint(artifact_dir),
_create_tensorboard(artifact_dir, cfg_services),
_create_csv_logger(artifact_dir),
_create_train_early_stopping(cfg_services),
_create_validation_early_stopping(cfg_services),
callbacks.TerminateOnNaN(),
]
def _create_best_checkpoint(
artifact_dir: str, cfg_services: dict
) -> callbacks.ModelCheckpoint:
"""Create a callback that saves the best model.
Args:
artifact_dir: str, path to artifact directory.
cfg_services: dict, services subsection of config.
Returns:
ModelCheckpoint, callback that saves the best model.
"""
checkpoint_params = cfg_services["best_checkpoint"]
checkpoint_params["monitor"] = _force_monitor_to_mode(
checkpoint_params["monitor"], True, "best_checkpoint"
)
filepath = get_best_checkpoint_filepath(artifact_dir)
return callbacks.ModelCheckpoint(
filepath=filepath,
monitor=checkpoint_params["monitor"],
mode=checkpoint_params["mode"],
verbose=1,
save_best_only=True,
save_weights_only=True,
)
def _create_resume_checkpoint(artifact_dir: str) -> callbacks.ModelCheckpoint:
"""Create a callback that saves the model every epoch.
Args:
artifact_dir: str, path to artifact directory.
Returns:
ModelCheckpoint, callback that saves the model every epoch.
"""
filepath = get_resume_checkpoints_filepath(artifact_dir)
return callbacks.ModelCheckpoint(
filepath=filepath,
monitor="val_loss",
mode="min",
verbose=0,
save_best_only=False,
save_weights_only=True,
)
def _create_tensorboard(artifact_dir: str, cfg_services: dict) -> callbacks.TensorBoard:
"""Create a TensorBoard callback.
Args:
artifact_dir: str, path to artifact directory.
cfg_services: dict, services subsection of config.
Returns:
TensorBoard, Tensorboard callback.
"""
tensorboard_params = cfg_services["tensorboard"]
if "log_dir" in tensorboard_params:
logger.warning("'log_dir' automatically handled for 'tensorboard' service")
tensorboard_params["log_dir"] = os.path.join(artifact_dir, TENSORBOARD)
return callbacks.TensorBoard(**tensorboard_params)
def _create_csv_logger(artifact_dir: str) -> callbacks.CSVLogger:
"""Create a CSVLogger callback.
Args:
artifact_dir: str, path to artifact directory.
Returns:
CSVLogger, CSVLogger callbackk.
"""
filename = os.path.join(artifact_dir, CSV_LOGGER_FILENAME)
return callbacks.CSVLogger(filename=filename, separator=",", append=True)
def _create_train_early_stopping(cfg_services: dict) -> callbacks.EarlyStopping:
"""Create an early stopping callback that monitors a training 'metric'.
Args:
cfg_services: dict, services subsection of config.
Returns:
EarlyStopping, EarlyStopping callback that monitors a training 'metric'.
"""
early_stopping_params = cfg_services["train_early_stopping"]
early_stopping_params["monitor"] = _force_monitor_to_mode(
early_stopping_params["monitor"], False, "train_early_stopping"
)
return callbacks.EarlyStopping(**early_stopping_params)
def _create_validation_early_stopping(cfg_services: dict) -> callbacks.EarlyStopping:
"""Create an early stopping callback that monitors a validation 'metric'.
Args:
cfg_services: dict, services subsection of config.
Returns:
EarlyStopping, EarlyStopping callback that monitors a validation 'metric'.
"""
early_stopping_params = cfg_services["validation_early_stopping"]
early_stopping_params["monitor"] = _force_monitor_to_mode(
early_stopping_params["monitor"], True, "validation_early_stopping"
)
return callbacks.EarlyStopping(**early_stopping_params)
def get_best_checkpoint_filepath(artifact_dir: str) -> str:
"""Get the filepath for the best checkpoint.
Args:
artifact_dir: str, path to artifact directory.
Returns:
str, filepath for best checkpoint directory.
"""
return os.path.join(artifact_dir, BEST_CHECKPOINT, BEST_MODEL)
def get_resume_checkpoints_filepath(artifact_dir: str) -> str:
"""Get the filepath for the resume checkpoints.
Args:
artifact_dir: str, path to artifact directory.
Returns:
str, filepath for resume checkpoints.
"""
return os.path.join(artifact_dir, RESUME_CHECKPOINTS, RESUME_MODEL)
def _force_monitor_to_mode(monitor: str, to_val: bool, service_name: str) -> str:
"""Force a monitor quantity to either train or validation mode. For
example 'loss' - train, 'val_loss' - validation.
Args:
monitor: str, metric to monitor.
to_val: bool, validation if true, else false.
service_name: str, corresponding service (for warning purposes).
Returns:
str, monitor maybe forced.
"""
if to_val and not monitor.startswith("val_"):
monitor = f"val_{monitor}"
logger.warning(
f"corrected 'monitor' to validation verison: {monitor} "
f"for service: {service_name}"
)
elif not to_val and monitor.startswith("val_"):
monitor = monitor[4:]
logger.warning(
f"corrected 'monitor' to train verison: {monitor} "
f"for service: {service_name}"
)
return monitor
| 2.3125 | 2 |
codewars/6kyu/One Line Task Element-wise Maximum/main_test.py | ictcubeMENA/Training_one | 0 | 12789003 | import main
import unittest
class OnelineTest(unittest.TestCase):
def example_test(self):
a = [1, 2, 3, 4, 5]
b = [10, 0, 10, 0, 10]
main.fmax(a,b)
self.assertEqual(a,[10, 2, 10, 4, 10])
if __name__ == '__main__':
unittest.main() | 2.8125 | 3 |
mylibrary/controllers/backup.py | keygenqt/MyLibrary-server | 0 | 12789004 | <reponame>keygenqt/MyLibrary-server<gh_stars>0
"""
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import subprocess
import os
import uuid
from pathlib import Path
from cement import Controller, ex
from mylibrary.ext.base.dump import save_dump, save_ftp
class Backup(Controller):
class Meta:
label = 'backup'
description = 'MyLibrary backup'
@ex(
help='server data backup',
arguments=[
(['-t', '--type'],
dict(
dest='type',
action='store',
default='db',
choices=['db', 'tar'])),
],
)
def backup(self):
if self.app.pargs.type is not None:
if self.app.pargs.type == 'db':
self._db()
if self.app.pargs.type == 'tar':
self._tar()
# noinspection PyBroadException
@ex(hide=True)
def _db(self):
db_user = self.app.config.get('db_conf', 'user')
db_pass = self.app.config.get('db_conf', '<PASSWORD>')
db_name = self.app.config.get('db_conf', 'name')
home = self.app.config.get('mylibrary', 'home')
tmp = '{}/{}.sql'.format(home, uuid.uuid4())
# subprocess for suppress output warning
subprocess.getoutput('mysqldump -u {} -p{} {} > {}'.format(db_user, db_pass, db_name, tmp))
try:
save_dump(self.app, tmp)
save_ftp(self.app, tmp)
self.app.log.info('save db dump done')
except:
self.app.log.error('An error occurred while saving, check config file.')
os.remove(tmp)
# noinspection PyBroadException
@ex(hide=True)
def _tar(self):
files = self.app.config.get('dump_tar', 'files')
dirs = self.app.config.get('dump_tar', 'dirs')
exclude = self.app.config.get('dump_tar', 'exclude')
processes = self.app.config.get('dump_tar', 'processes')
result = {}
for item in files:
if os.path.isfile(item):
self.app.log.info('Start compress file: {}'.format(item))
home = self.app.config.get('mylibrary', 'home')
tmp = '{}/{}.tar.gz'.format(home, uuid.uuid4())
subprocess.getoutput(
'tar --absolute-names --use-compress-program="pigz --best --recursive -p {}" -cf {} {}'.format(processes,
tmp,
item))
result[item] = tmp
else:
self.app.log.error('File not exits: {}'.format(item))
for item in dirs:
if os.path.isdir(item):
self.app.log.info('Start compress dir: {}'.format(item))
home = self.app.config.get('mylibrary', 'home')
tmp = '{}/{}.tar.gz'.format(home, uuid.uuid4())
if not exclude:
subprocess.getoutput(
'tar --absolute-names --use-compress-program="pigz --best --recursive -p {}" -cf {} {}'.format(processes,
tmp,
item))
else:
_exclude = '--exclude={}'.format(' --exclude='.join(exclude))
subprocess.getoutput(
'tar --absolute-names --use-compress-program="pigz --best --recursive -p {}" {} -cf {} {}'.format(processes,
_exclude,
tmp,
item))
result[item] = tmp
else:
self.app.log.error('Dir not exits: {}'.format(item))
for item in result:
try:
save_dump(self.app, result[item], item)
save_ftp(self.app, result[item], item)
except:
self.app.log.error('An error occurred while saving, check config file.')
os.remove(result[item])
self.app.log.info('save tars dump done')
| 1.867188 | 2 |
CentroidsGeneration/distribution.py | dudenzz/word_embedding | 0 | 12789005 | from utilities import load_stf
import numpy as np
from scipy.spatial.distance import cosine
import time
#vsm = load_stf('glove.840B.300d.sample.txt',300)
#csm = np.load('centroids').item()
#distrib = np.zeros((100000,10))
#oFile = open('f_distrib','w+')
def dot_product(v1,v2):
total = 0
if len(v1) != len(v2):
throw
for i in range(len(v1)):
total += float(v1[i])*float(v2[i])
return total
def centroid(vsm,w,k):
total = np.zeros(len(vsm.word_vectors[vsm.dictionary[w]]))
for v in vsm.most_similar(w,k+1):
total += vsm.word_vectors[vsm.dictionary[v[0]]]
total /= k
return total
def lcent_similarity(w1,w2,vsm,gamma,k,c):
v1 = vsm.word_vectors[vsm.dictionary[w1]]
v2 = vsm.word_vectors[vsm.dictionary[w2]]
v1v2 = dot_product(v1,v2)
v1c = dot_product(v1,c)
v1cg = np.power(v1c,gamma)
return v1v2 - v1cg
def insert(v,sims,vec,val):
nv = np.zeros(len(v))
nsims = np.zeros((len(sims),300))
swap = 0
for i in range(len(v)):
if v[i]<val:
swap = 1
break
if swap == 0:
return (v,sims)
nv[:i] = v[:i]
nsims[:i] = sims[:i]
nv[i] = val
nsims[i] = vec
nv[i+1:] = v[i:len(v)-1]
nsims[i+1:] = sims[i:len(sims)-1]
return (nv,nsims)
def most_similar_lcent(vsm,csm,word,k,gamma):
sims = np.zeros(10)
vecs = np.zeros(10)
c = csm[word]
for i,d_word in enumerate(vsm.dictionary):
sim = lcent_similarity(word,d_word,vsm,gamma,k,c)
(sims,vecs) = insert(vecs,sims,vsm.dictionary[d_word],sim)
ret = []
for i in range(10):
ret.append((sims[i],vecs[i]))
return ret
'''
centroids = {}
for i,j in enumerate(vsm.dictionary):
if i%100 == 0:
print i
centroids[j] = centroid(vsm,j,11)
'''
#c = time.time()
#for j,w in enumerate(vsm.dictionary):
# print j
# print time.time() - c
# c = time.time()
# ms = most_similar_lcent(vsm,csm,w,11,2)
# for k,s in enumerate(ms):
# print s
# i = vsm.dictionary[s]
# distrib[i,k] += 1
#for c in centroids:
# oFile.write(str(c) + u' ')
# for i in centroids[c]:
# oFile.write(str(i) + u' ')
# oFile.write(u'\n')
#np.save(oFile,distrib)
#oFile.close()
| 2.1875 | 2 |
app/main/views.py | dynamodenis/pitch-master | 0 | 12789006 | <filename>app/main/views.py
import functools
import os
import secrets
from PIL import Image
from flask import render_template,redirect,url_for,abort,flash,request
from . import main
from flask_login import login_required,current_user
from ..models import User,Comment,Pitch
from .forms import UploadPitch,CommentsForm,UpdateBio
from .. import db
from flask import current_app
@main.route('/')
def index():
page=request.args.get('page',1,type=int)
all_pitch=Pitch.query.order_by(Pitch.posted.desc()).paginate(page=page,per_page=10)
return render_template('index.html',pitches=all_pitch,title='Pitch | Master')
def save_picture(form_data):
random_url=secrets.token_hex(6)
_,file_extention=os.path.splitext(form_data.filename)
file_url=random_url+file_extention
# picture_loc=os.path.join("/home/dynamo/Desktop/projects/PYTHON PROJECTS/Pitch-Master/app/static/profile/"+file_url)
picture_loc=os.path.join( current_app.root_path+"/static/profile/"+file_url)
print(picture_loc)
sized_image=(400,600)
cut=Image.open(form_data)
cut.thumbnail(sized_image)
cut.save(picture_loc)
return file_url
@main.route('/user/<string:uname>',methods=['GET','POST'])
@login_required
def profile(uname):
image=url_for('static',filename='profile/'+ current_user.profile_pic_path)
user=User.query.filter_by(username=uname).first()
pitch=Pitch.query.filter_by(user_id=current_user.id).all()
# pitch=Pitch.query.get(user_id=user.id).all()
bio=UpdateBio()
if bio.validate_on_submit():
if bio.picture.data:
pic_file=save_picture(bio.picture.data)
user.profile_pic_path=pic_file
db.session.commit()
user.bio=bio.bio.data
db.session.add(user)
db.session.commit()
print(image)
if user is None:
abort(404)
return render_template('profile/profile.html',user=user,image=image,bio=bio,pitches=pitch,title=current_user.username)
@main.route('/upload/pitch',methods=['GET','POST'])
@login_required
def upload_pitch():
pitch=UploadPitch()
if current_user is None:
abort(404)
if pitch.validate_on_submit():
pitch=Pitch(pitch_category=pitch.category.data,pitch=pitch.pitch.data,user=current_user)
db.session.add(pitch)
db.session.commit()
flash('Pitch Uploaded')
return redirect(url_for('main.index'))
return render_template('profile/update_pitch.html',pitch=pitch,title='Create Pitch',legend='Create Pitch')
@main.route('/<int:pname>/comment',methods=['GET','POST'])
@login_required
def comment(pname):
comments=CommentsForm()
image=url_for('static',filename='profile/'+ current_user.profile_pic_path)
pitch=Pitch.query.filter_by(id=pname).first()
comment_query=Comment.query.filter_by(pitch_id=pitch.id).all()
if request.args.get('likes'):
pitch.upvotes=pitch.upvotes+int(1)
db.session.add(pitch)
db.session.commit()
return redirect(url_for('main.comment',pname=pname))
elif request.args.get('dislike'):
pitch.downvotes=pitch.downvotes+int(1)
db.session.add(pitch)
db.session.commit()
return redirect(url_for('main.comment',pname=pname))
if comments.validate_on_submit():
comment=Comment(comment=comments.comment.data,pitch_id=pitch.id,user_id=current_user.id)
db.session.add(comment)
db.session.commit()
return redirect(url_for('main.comment',pname=pname))
return render_template('pitch.html' ,comment=comments,pitch=pitch,comments=comment_query,title='Pitch Comment',image=image)
@main.route('/<int:pname>/update',methods=['GET','POST'])
@login_required
def update(pname):
pitches=UploadPitch()
pitch=Pitch.query.get(pname)
if pitch.user != current_user:
abort(403)
if pitches.validate_on_submit():
pitch.pitch_category=pitches.category.data
pitch.pitch=pitches.pitch.data
db.session.commit()
flash('Successfully Updated!')
return redirect(url_for('main.profile',uname=pitch.user.username))
elif request.method=='GET':
pitches.category.data=pitch.pitch_category
pitches.pitch.data=pitch.pitch
return render_template('profile/update_pitch.html',pitch=pitches,legend="Update Pitch")
@main.route('/<int:pitch_id>/delete',methods=['POST'])
@login_required
def delete_pitch(pitch_id):
pitch=Pitch.query.get(pitch_id)
if pitch.user != current_user:
abort(403)
db.session.delete(pitch)
db.session.commit()
return redirect(url_for('main.profile',uname=pitch.user.username))
@main.route('/profile/user/<string:username>')
def posted(username):
user=User.query.filter_by(username=username).first_or_404()
image=url_for('static',filename='profile/'+ user.profile_pic_path)
page=request.args.get('page',1,type=int)
all_pitch=Pitch.query.filter_by(user=user)\
.order_by(Pitch.posted.desc())\
.paginate(page=page,per_page=10)
return render_template('posted_by.html',pitches=all_pitch,title=user.username,user=user,image=image) | 2.28125 | 2 |
example/tasks.py | khorolets/kuyruk | 0 | 12789007 | <filename>example/tasks.py
from kuyruk import Kuyruk
kuyruk = Kuyruk()
@kuyruk.task()
def echo(message):
print message
| 1.640625 | 2 |
TerraPi/dashboard.py | sghctoma/terrapi | 2 | 12789008 | #!/usr/bin/env python3
import json
import logging
import pkg_resources
import pytz
import sys
import tzlocal
import yaml
from datetime import datetime, timedelta
from os.path import expanduser, isfile
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
from .db import create_sessionmaker, Measurement, Sensor, SensorType
colors = [
"#1f77b4",
"#7f7f7f",
"#17becf",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#bcbd22"
]
app = dash.Dash(__name__)
app.layout = html.Div([
html.H1('TerraPi dashboard'),
dcc.Interval(
id = 'interval-component',
interval = 5 * 60 * 1000,
n_intervals = 0
),
html.Div(id='intermediate-value', style={'display': 'none'})
])
def generate_update_func(sensor_type):
def update_graph_live(measurements_json, relayout_data):
global sensors
m = json.loads(measurements_json)
sensor_ids = [s.id for s in sensors if s.type==sensor_type]
data = []
i = 0
for sensor_id in sensor_ids:
data.append(go.Scatter(
x = m[str(sensor_id)]['timestamp'],
y = m[str(sensor_id)]['value'],
name = [s.name for s in sensors if s.id==sensor_id][0],
mode = 'lines',
line = dict(color=colors[i%len(colors)])
))
i = i + 1
layout = go.Layout(
title = sensor_type.name.capitalize(),
margin = dict(l=60, r=60, b=30, t=30),
legend = dict(x=0, y=1, xanchor='left'),
xaxis = dict(
type = 'date',
range = [
relayout_data['xaxis.range[0]'],
relayout_data['xaxis.range[1]']
] if 'xaxis.range[0]' in relayout_data else None,
rangeselector = dict(
buttons = list([
dict(count=1, label='1 day', step='day', stepmode='backward'),
dict(count=7, label='1 week', step='day', stepmode='backward'),
dict(count=1, label='1 month', step='month', stepmode='backward'),
dict(step='all')
])
),
),
yaxis = dict(fixedrange = True)
)
return go.Figure(layout=layout, data=data)
return update_graph_live
@app.callback(
Output('intermediate-value', 'children'),
[Input('interval-component', 'n_intervals')])
def update_measurements(n):
global sensors
global sessionmaker
measurements = dict()
session = sessionmaker()
one_day = timedelta(hours=30*24)
local_tz = tzlocal.get_localzone()
for sensor in sensors:
measurements[sensor.id] = dict()
_data = session.query(Measurement).filter(
Measurement.sensor==sensor).filter(
Measurement.timestamp>datetime.now()-one_day).order_by(
Measurement.timestamp).all()
measurements[sensor.id]['timestamp'] = [
m.timestamp.replace(tzinfo=pytz.utc).astimezone(local_tz) for m in _data]
measurements[sensor.id]['value'] = [m.value for m in _data]
session.close()
return json.dumps(measurements, default=str)
def get_connection_string():
config_paths = []
if len(sys.argv) > 1:
config_paths.append(sys.argv[1])
config_paths.append(expanduser('~') + '/.terrapi.yaml')
config_paths.append(expanduser('~') + '/.config/terrapi/config.yaml')
config_paths.append(pkg_resources.resource_filename('TerraPi',
'conf/config-sample.yaml'))
for path in config_paths:
if isfile(path):
configfile = path
break
if not configfile:
logging.error("No config file found! Exiting..")
sys.exit(1)
with open(configfile, 'r') as stream:
config = yaml.load(stream)
if not config:
logging.error("Empty configuration! Exiting...")
sys.exit(1)
connection_string = config.get('connection_string')
if not connection_string:
logging.info("Database configuration not found, using SQLite.")
database = pkg_resources.resource_filename('TerraPi','data/terrapi.db')
connection_string = 'sqlite:///{}'.format(database)
return connection_string
def main():
global sensors
global sessionmaker
connection_string = get_connection_string()
sessionmaker = create_sessionmaker(connection_string)
session = sessionmaker()
sensors = session.query(Sensor).all()
for s in sensors:
app.layout.children.append(
html.Div(
children = dcc.Graph(id = s.type.name),
style = dict(
marginBottom = 80,
marginTop = 80)
))
session.close()
for st in set([s.type for s in sensors]):
app.callback(
Output(st.name, 'figure'),
[Input('intermediate-value', 'children')],
[State(st.name, 'relayoutData')]
)(generate_update_func(st))
app.run_server(debug=True)
if __name__ == '__main__':
main()
| 2.078125 | 2 |
app/views.py | albertodepaola/catalog-project | 0 | 12789009 | <gh_stars>0
from flask import render_template, session, make_response
from app import appbuilder, db
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder import ModelView, expose
from app.models import Category, Item
from flask_appbuilder.fieldwidgets import BS3TextAreaFieldWidget, TextField
from app.widgets import BS3TextFieldROWidget, BS3TextAreaFieldROWidget
import json
# Method used to check if the logged in user is the author of the record.
# If it isn't, it raises an error.
def check_logged_user(item):
user_id = session['user_id']
if item.user_id != int(user_id):
raise ValueError("Cannot modify this record, created by another user")
class ItemModelView(ModelView):
datamodel = SQLAInterface(Item)
label_columns = {'category': 'Category'}
list_columns = ['title', 'description', 'category']
visible_columns = ['title', 'description', 'category']
add_columns = visible_columns
edit_columns = visible_columns
show_columns = visible_columns
show_fieldsets = [
(
'Summary',
{'fields': ['title', 'description', 'category']}
),
]
edit_form_extra_fields = {'description':
TextField('Provide a description',
description='Item description',
widget=BS3TextAreaFieldWidget())}
add_form_extra_fields = {'description':
TextField('Provide a description',
description='Item description',
widget=BS3TextAreaFieldWidget())}
# adds custom endpoint to query items by name
@expose('/<name>')
def detail(self, name):
item = self.appbuilder\
.get_session.query(Item)\
.filter(Item.title == name)\
.one_or_none()
return render_template('item.html',
appbuilder=self.appbuilder,
item=item)
@expose('/<filter_id>')
def as_json(self, filter_id):
item = self.appbuilder\
.get_session.query(Item)\
.filter(Item.id == filter_id) \
.one_or_none()
if item is None:
item_json = json.dumps({})
else:
item_json = json.dumps(item.to_json())
response = make_response(item_json, 200)
response.headers['Content-Type'] = 'application/json'
return response
def pre_add(self, item):
user_id = session['user_id']
item.user_id = user_id
def pre_update(self, item):
check_logged_user(item)
def prefill_form(self, form, pk):
# checks if the logged in user is the author,
# if it's not, shows data readonly
category = self.datamodel.get(pk)
user_id = session['user_id']
if category.user_id != int(user_id):
form.title.widget = BS3TextFieldROWidget()
form.description.widget = BS3TextAreaFieldROWidget()
# TODO create a readonly widget that works correctly
# form.category.widget = Select2ROWWidget()
def pre_delete(self, item):
check_logged_user(item)
class CategoryModelView(ModelView):
datamodel = SQLAInterface(Category)
related_views = [ItemModelView]
visible_columns = ['name']
add_columns = visible_columns
edit_columns = visible_columns
# adds custom endpoint to query categories by name
@expose('/<name>')
def detail(self, name):
category = self.appbuilder\
.get_session.query(Category)\
.filter(Category.name == name) \
.one_or_none()
return render_template('category.html',
appbuilder=self.appbuilder,
category=category)
@expose('/<filter_id>')
def as_json(self, filter_id):
category = self.appbuilder\
.get_session.query(Category)\
.filter(Category.id == filter_id) \
.one_or_none()
if category is None:
category_json = json.dumps({})
else:
category_json = json.dumps(category.to_json())
response = make_response(category_json, 200)
response.headers['Content-Type'] = 'application/json'
return response
def pre_add(self, item):
user_id = session['user_id']
item.user_id = user_id
def pre_update(self, item):
check_logged_user(item)
def prefill_form(self, form, pk):
# checks if the logged in user is the author,
# if it's not, shows data readonly
category = self.datamodel.get(pk)
user_id = session['user_id']
if category.user_id != int(user_id):
form.name.widget = BS3TextFieldROWidget()
def pre_delete(self, item):
check_logged_user(item)
"""
Application wide 404 error handler
"""
@appbuilder.app.errorhandler(404)
def page_not_found(e):
return render_template('404.html',
base_template=appbuilder.base_template,
appbuilder=appbuilder), \
404
db.create_all()
appbuilder.add_view(CategoryModelView,
"List Categories",
icon="fa-folder-open-o",
category="Catalog",
category_icon="fa-envelope")
appbuilder.add_view(ItemModelView,
"List Items",
icon="fa-envelope",
category="Catalog")
| 2.515625 | 3 |
apps/jetbrains/jetbrains_actions.py | RichardHladik/knausj_talon | 1 | 12789010 | <filename>apps/jetbrains/jetbrains_actions.py
from talon import Context, actions
ctx = Context()
ctx.matches = r"""
# Requires https://plugins.jetbrains.com/plugin/10504-voice-code-idea
app: jetbrains
"""
@ctx.action_class('user')
class UserActions:
#talon app actions (+custom tab actions)
def tab_final(): actions.user.idea('action GoToLastTab')
# splits.py support begin
def split_clear_all(): actions.user.idea('action UnsplitAll')
def split_clear(): actions.user.idea('action Unsplit')
def split_flip(): actions.user.idea('action ChangeSplitOrientation')
def split_last(): actions.user.idea('action LastSplitter')
def split_next(): actions.user.idea('action NextSplitter')
def split_window_down(): actions.user.idea('action MoveTabDown')
def split_window_horizontally(): actions.user.idea('action SplitHorizontally')
#action(user.split_window_left): user.idea("action MoveTabLeft")
def split_window_right(): actions.user.idea('action MoveTabRight')
#action(user.split_window_up): user.idea("action MoveTabUp")
def split_window_vertically(): actions.user.idea('action SplitVertically')
def split_window(): actions.user.idea('action EditSourceInNewWindow')
# splits.py support end
# multiple_cursors.py support begin
def multi_cursor_add_above(): actions.user.idea('action EditorCloneCaretAbove')
def multi_cursor_add_below(): actions.user.idea('action EditorCloneCaretBelow')
def multi_cursor_disable(): actions.key('escape')
def multi_cursor_enable(): actions.key('shift-alt-insert')
def multi_cursor_select_all_occurrences(): actions.user.idea('action SelectAllOccurrences')
def multi_cursor_select_fewer_occurrences(): actions.user.idea('action UnselectPreviousOccurrence')
def multi_cursor_select_more_occurrences(): actions.user.idea('action SelectNextOccurrence')
@ctx.action_class('app')
class AppActions:
def tab_next(): actions.user.idea('action NextTab')
def tab_previous(): actions.user.idea('action PreviousTab')
def tab_close(): actions.user.idea('action CloseContent')
def tab_reopen(): actions.user.idea('action ReopenClosedTab')
@ctx.action_class('code')
class CodeActions:
#talon code actions
def toggle_comment(): actions.user.idea('action CommentByLineComment')
@ctx.action_class('edit')
class EditActions:
#talon edit actions
def copy(): actions.user.idea('action EditorCopy')
def cut(): actions.user.idea('action EditorCut')
def delete(): actions.user.idea('action EditorBackSpace')
def paste(): actions.user.idea('action EditorPaste')
def find_next(): actions.user.idea('action FindNext')
def find_previous(): actions.user.idea('action FindPrevious')
def find(text: str=None): actions.user.idea('action Find')
def line_clone(): actions.user.idea('action EditorDuplicate')
def line_swap_down(): actions.user.idea('action MoveLineDown')
def line_swap_up(): actions.user.idea('action MoveLineUp')
def indent_more(): actions.user.idea('action EditorIndentLineOrSelection')
def indent_less(): actions.user.idea('action EditorUnindentSelection')
def select_line(n: int=None): actions.user.idea('action EditorSelectLine')
def select_word(): actions.user.idea('action EditorSelectWord')
def select_all(): actions.user.idea('action $SelectAll')
def file_start(): actions.user.idea('action EditorTextStart')
def file_end(): actions.user.idea('action EditorTextEnd')
def extend_file_start(): actions.user.idea('action EditorTextStartWithSelection')
def extend_file_end(): actions.user.idea('action EditorTextEndWithSelection')
| 2.359375 | 2 |
pyETM/parameters/flexibility_order.py | robcalon/PyETM | 0 | 12789011 | <reponame>robcalon/PyETM
import json
import numpy
class FlexibilityOrder:
@property
def flexibility_order(self):
# get flexibility order
if self._flexibility_order is None:
self.get_flexibility_order()
return self._flexibility_order
@flexibility_order.setter
def flexibility_order(self, order):
self.change_flexibility_order(order)
def get_flexibility_order(self, **kwargs):
"""get the flexibility order"""
# raise without scenario id
self._raise_scenario_id()
# prepare post
headers = {'Connection':'close'}
post = f'/scenarios/{self.scenario_id}/flexibility_order'
# request response and convert to dict
resp = self.get(post, headers=headers, **kwargs)
order = json.loads(resp)
# drop nestedness of order
order = order['order']
# store updated order
self._flexibility_order = order
return order
def change_flexibility_order(self, order, **kwargs):
"""change flexibility order
parameters
----------
order : list
desired flexibiity order"""
# raise without scenario id
self._raise_scenario_id()
# check flexbility order
order = self._check_flexibility_order(order)
# map order to correct scenario parameter
data = {'flexibility_order': {'order': order}}
# prepare post
headers = {'Connection':'close'}
post = f'/scenarios/{self.scenario_id}/flexibility_order'
# evaluate post
self.put(post, json=data, headers=headers, **kwargs)
# reinitialize scenario
self._reset_session()
def _check_flexibility_order(self, order):
"""check if items in flexbility order are in ETM."""
# convert np,array to list
if isinstance(order, numpy.ndarray):
order = order.tolist()
# access dict for order
if isinstance(order, dict):
order = order['order']
# check items in order
for item in order:
if item not in self.flexibility_order:
raise ValueError(f'"{item}" is not permitted as ' +
'flexibility order item in ETM')
return order
| 2.53125 | 3 |
app.py | crumpstrr33/imgur_album_downloader | 0 | 12789012 | from ast import literal_eval
from flask import Flask, jsonify, render_template, request, Response
from scripts.downloader import download_hashes
from scripts.checks import check_info
app = Flask(__name__)
@app.route("/check/")
def check():
"""
Does the following checks:
- The album hash (album_hash) is less than 8 characters long
- The album exists (a get request gives a 200)
- The album exists and has images (some have 0 images)
- The .ini file exists (relative to cwd aka directory this file is in)
- If the new directory option was checked:
- Will attempt to make the new directory
- If it isn't checked:
- The chosen directory exists
- If the empty directory option was checked:
- The chosen directory is also empty
If it passes these checks, the list of the image URLs are passed back.
Otherwise, a response is passed back that triggers an alert and doesn't
follow through with the download.
"""
new_dir = literal_eval(request.args.get("new_dir"))
empty_dir = literal_eval(request.args.get("empty_dir"))
img_dir = request.args.get("img_dir")
album_hash = request.args.get("album_hash")
response, img_list = check_info(new_dir, empty_dir, img_dir, album_hash)
return jsonify(response=response, img_list=img_list)
@app.route("/download_album/<hash_id>")
def download_album(hash_id):
"""
Downloads the album and returns info to the front such as current pic
number downloaded, total to be downloaded and so on.
"""
album_hash = request.args.get("album_hash")
img_dir = request.args.get("img_dir")
new_dir = literal_eval(request.args.get("new_dir"))
empty_dir = literal_eval(request.args.get("empty_dir"))
img_list = literal_eval(request.args.get("img_list"))
return Response(
download_hashes(album_hash, img_dir, hash_id, img_list),
mimetype="text/event-stream",
)
@app.route("/")
def index():
return render_template("main.html")
if __name__ == "__main__":
app.debug = True
app.run(threaded=True)
| 3.015625 | 3 |
API.py | erose1337/export_opera_passwords | 0 | 12789013 | <reponame>erose1337/export_opera_passwords
VERSION = "1.0.1"
LANGUAGE = "python"
PROJECT = "export_opera_passwords"
API = {"export_opera_passwords.main.main" : {"command line arguments" : ("output filename",
"database filename"),
"side effects" : ("creates output file", )}
}
| 1.46875 | 1 |
examples/10-kdh.py | hebrewsnabla/dh | 1 | 12789014 | #!/usr/bin/env python
'''
KDH at an individual k-point
'''
from functools import reduce
import numpy
from pyscf.pbc import gto
from pyscf import pbcdh, lib
#lib.num_threads(28)
cell = gto.Cell()
cell.atom='''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391
'''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
#
# DF-KDH with 2x2x2 k-points
#
kpts = cell.make_kpts([2,2,2])
#kpts = cell.make_kpts([4,4,4])
#kmf = scf.KRHF(cell)#.rs_density_fit()
#kmf.kpts = kpts
#ehf = kmf.kernel()
mypt = pbcdh.KDH(cell, xc="XYG3", kpts=kpts)
mypt.max_memory = 10000
mypt.kernel()
print("PBC-XYG3 energy (per unit cell) =", mypt.e_tot)
| 2.078125 | 2 |
lists_dictionary/Double Char.py | vasetousa/Python-fundamentals | 0 | 12789015 |
word = input()
for i in range(len(word)):
print(word[i]*2, end="") # printing every letter from the string "word" x 2
| 4.125 | 4 |
dork_compose/plugins/cleanup.py | iamdork/dork-compose | 2 | 12789016 | <reponame>iamdork/dork-compose
import dork_compose.plugin
from docker.api.client import APIClient
class Plugin(dork_compose.plugin.Plugin):
def cleanup(self):
client = APIClient()
# Remove unused volumes.
volumes = client.volumes({'dangling': True})
if volumes and volumes['Volumes']:
for volume in volumes['Volumes']:
try:
client.remove_volume(volume['Name'])
except Exception:
pass
# Remove unused images.
images = client.images(filters={'dangling': True})
if images:
for image in images:
try:
client.remove_image(image['Id'], force=True)
except Exception:
pass
| 2.0625 | 2 |
src/api/dataflow/tests/test_modeling/test_model/test_model_task.py | Chromico/bk-base | 84 | 12789017 | <reponame>Chromico/bk-base<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import mock
from rest_framework.test import APITestCase
from dataflow.batch.models.bkdata_flow import ProcessingBatchInfo
from dataflow.modeling.models import MLSqlExecuteLog
from dataflow.modeling.tasks import ReleaseModelTask
from dataflow.stream.api.api_helper import MetaApiHelper
class TestReleaseModelTask(APITestCase):
databases = "__all__"
def setUp(self):
MLSqlExecuteLog.objects.create(id=1, context="", created_by="admin")
def test_set_processing_id_mlsql_link(self):
ProcessingBatchInfo.objects.create(
processing_id="591_rfc_result_release_1",
batch_id="591_rfc_result_release_1",
processor_type="sql",
processor_logic='{"sql": "create table 591_rfc_result_release_1 as '
"run 591_rfc_model_release_1(features_col=<double_1,double_2>) as category1"
",double_1,double_2 "
'from 591_string_indexer_result_release_1;"}',
component_type="spark_mllib",
count_freq=0,
delay=0,
)
ProcessingBatchInfo.objects.create(
processing_id="591_string_indexer_result_release_1",
batch_id="591_string_indexer_result_release_1",
processor_type="sql",
processor_logic='{"sql": "create table 591_string_indexer_result_release_1 as '
"run 591_string_indexer_release_1(input_col=char_1,handle_invalid='keep') as indexed"
',double_1,double_2 from 591_source_data;"}',
component_type="spark_mllib",
count_freq=0,
delay=0,
)
data_processing_values = {
"591_rfc_result_release_1": {"inputs": [{"data_set_id": "591_string_indexer_result_release_1"}]},
"591_string_indexer_result_release_1": {"inputs": [{"data_set_id": "591_source_data"}]},
"591_source_data": {"inputs": [{"data_set_id": "591_clean_data"}]},
}
MetaApiHelper.get_data_processing = mock.Mock(side_effect=lambda p_id: data_processing_values[p_id])
task = ReleaseModelTask(1)
actual_mlsql_link = []
actual_processing_ids = []
task._ReleaseModelTask__set_processing_id_mlsql_link(
"591_rfc_result_release_1",
[
"591_rfc_result_release_1",
"591_rfc_model_release_1",
"591_string_indexer_result_release_1",
"591_string_indexer_release_1",
],
actual_mlsql_link,
actual_processing_ids,
)
expected_mlsql_link = [
"create table 591_string_indexer_result_release_1 as "
"run 591_string_indexer_release_1(input_col=char_1,handle_invalid='keep') as indexed"
",double_1,double_2 from 591_source_data;",
"create table 591_rfc_result_release_1 as "
"run 591_rfc_model_release_1(features_col=<double_1,double_2>) as category1"
",double_1,double_2 from 591_string_indexer_result_release_1;",
]
expected_processing_ids = [
"591_string_indexer_result_release_1",
"591_rfc_result_release_1",
]
assert len(actual_mlsql_link) == len(expected_mlsql_link)
assert all([a == b for a, b in zip(actual_mlsql_link, expected_mlsql_link)])
assert len(expected_processing_ids) == len(actual_processing_ids)
assert all([a == b for a, b in zip(actual_processing_ids, expected_processing_ids)])
| 1.351563 | 1 |
main_pipeline/data_processing/compute_raster_features.py | andher1802/geomodelling_challenge | 0 | 12789018 | from data_collection.read_sentinel import pair_imagenames
from utils.set_user_input import set_arguments_pipeline
from utils.raster_helper import read_url_image, read_input_geometry, array2raster
import numpy as np
import rasterio
def compute_ndvi(band_inf, bands=["red", "nir"]):
"""
This function computes the ndvi (normalized difference vegetation index)
from the image resulting of the data catalog search.
"""
input_geometry = read_input_geometry(set_arguments_pipeline()["input_geometry"])
post_fix = "_band_info"
red_band = band_inf[bands[0] + post_fix]
nir_band = band_inf[bands[1] + post_fix]
imagepairs_url_list = pair_imagenames(red_band, nir_band)
ndvi_results = {}
progress_counter = 0
for image_pair in imagepairs_url_list:
band_red_url = [
red_url for red_url in imagepairs_url_list[image_pair] if "B04" in red_url
][0]
band_nir_url = [
nir_url for nir_url in imagepairs_url_list[image_pair] if "B08" in nir_url
][0]
band_red_image = read_url_image(band_red_url, input_geometry).astype(float)
band_nir_image = read_url_image(band_nir_url, input_geometry).astype(float)
ndvi_result = np.empty(band_red_image.shape, dtype=rasterio.float32)
check = np.logical_or(band_red_image > 0, band_nir_image > 0)
ndvi_result = np.where(
check,
(band_nir_image - band_red_image) / (band_nir_image + band_red_image),
-999,
)
array2raster(ndvi_result, input_geometry, band_red_url)
ndvi_results[image_pair] = [ndvi_result]
progress_counter += 1
print(
"{0} of {1} images processed".format(
progress_counter, len(imagepairs_url_list)
)
)
return ndvi_results
| 2.828125 | 3 |
tests/integration/test_network_plugin.py | vmware/tosca-vcloud-plugin | 14 | 12789019 | <reponame>vmware/tosca-vcloud-plugin
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import mock
from cloudify.mocks import MockCloudifyContext
from network_plugin import (floatingip, network, security_group, public_nat,
keypair, port)
from server_plugin.server import VCLOUD_VAPP_NAME
from network_plugin.network import VCLOUD_NETWORK_NAME
from network_plugin import CheckAssignedExternalIp
from cloudify import exceptions as cfy_exc
from tests.integration import TestCase
# for skipping test add this before test function:
# @unittest.skip("skip test")
class ValidationOperationsTestCase(TestCase):
def setUp(self):
name = "testnode"
self.ctx = MockCloudifyContext(
node_id=name,
node_name=name,
properties={'vcloud_config': self.vcloud_config})
ctx_patch = mock.patch('vcloud_plugin_common.ctx', self.ctx)
ctx_patch.start()
self.addCleanup(ctx_patch.stop)
super(self.__class__, self).setUp()
def test_validation(self):
self.ctx.node.properties.update(
{'floatingip': self.test_config['floatingip']})
with mock.patch('network_plugin.floatingip.ctx', self.ctx):
floatingip.creation_validation()
self.ctx.node.properties.update(
{'floatingip': self.test_config['floatingip_auto']})
with mock.patch('network_plugin.floatingip.ctx', self.ctx):
floatingip.creation_validation()
self.ctx.node.properties.update(
{'private_key_path': os.path.realpath(__file__)})
with mock.patch('network_plugin.keypair.ctx', self.ctx):
keypair.creation_validation()
self.ctx.node.properties.update(
{"resource_id": self.test_config['network']['name'],
"network": self.test_config['network'],
"use_external_resource": False})
with mock.patch('network_plugin.network.ctx', self.ctx):
network.creation_validation()
self.ctx.node.properties.update(
{'port': {
'network': self.test_config['management_network'],
'ip_allocation_mode': 'dhcp',
'primary_interface': True}})
with mock.patch('network_plugin.port.ctx', self.ctx):
port.creation_validation()
self.ctx.node.properties.update(
{"nat": self.test_config['public_nat']['nat'],
"rules": self.test_config['public_nat']['rules_net']})
with mock.patch('network_plugin.public_nat.ctx', self.ctx):
public_nat.creation_validation()
self.ctx.node.properties.update(self.test_config['security_group'])
with mock.patch('network_plugin.security_group.ctx', self.ctx):
security_group.creation_validation()
class FloatingIPOperationsTestCase(TestCase):
def setUp(self):
name = "testnode"
self.properties = {
'vcloud_config': self.vcloud_config,
'floatingip': self.test_config['floatingip']
}
self.ctx = MockCloudifyContext(
node_id=name,
node_name=name,
properties={},
target=MockCloudifyContext(node_id="target",
properties=self.properties),
source=MockCloudifyContext(
node_id="source",
properties={'vcloud_config': self.vcloud_config},
runtime_properties={
VCLOUD_VAPP_NAME: self.test_config['test_vm']}
)
)
ctx_patch1 = mock.patch('network_plugin.floatingip.ctx', self.ctx)
ctx_patch2 = mock.patch('vcloud_plugin_common.ctx', self.ctx)
ctx_patch1.start()
ctx_patch2.start()
self.addCleanup(ctx_patch1.stop)
self.addCleanup(ctx_patch2.stop)
super(self.__class__, self).setUp()
def tearDown(self):
super(self.__class__, self).tearDown()
def test_floating_ip_create_delete_with_explicit_ip(self):
self.ctx.target.node.properties['floatingip'].update(
self.test_config['floatingip'])
public_ip = self.ctx.target.node.properties['floatingip']['public_ip']
CheckAssignedExternalIp(public_ip, self._get_gateway())
floatingip.connect_floatingip()
floatingip.disconnect_floatingip()
CheckAssignedExternalIp(public_ip, self._get_gateway())
def test_floating_ip_create_delete_with_autoget_ip(self):
self.ctx.target.node.properties['floatingip'].update(
self.test_config['floatingip'])
del self.ctx.target.node.properties['floatingip']['public_ip']
floatingip.connect_floatingip()
public_ip = self.ctx.target.instance.runtime_properties['public_ip']
self.assertRaises(cfy_exc.NonRecoverableError,
CheckAssignedExternalIp,
public_ip,
self._get_gateway())
self.assertTrue(public_ip)
floatingip.disconnect_floatingip()
CheckAssignedExternalIp(public_ip, self._get_gateway())
def _get_gateway(self):
return self.vca_client.get_gateway(
self.vcloud_config["org"],
self.ctx.target.node.properties['floatingip']['edge_gateway'])
class OrgNetworkOperationsTestCase(TestCase):
def setUp(self):
self.net_name = self.test_config['network']['name']
self.existing_net_name = self.test_config['test_network_name']
self.ctx = MockCloudifyContext(
node_id=self.net_name,
node_name=self.net_name,
properties={"resource_id": self.existing_net_name,
"network": self.test_config['network'],
"vcloud_config": self.vcloud_config,
"use_external_resource": False})
self.org_name = self.vcloud_config["org"]
ctx_patch1 = mock.patch('network_plugin.network.ctx', self.ctx)
ctx_patch2 = mock.patch('vcloud_plugin_common.ctx', self.ctx)
ctx_patch1.start()
ctx_patch2.start()
self.addCleanup(ctx_patch1.stop)
self.addCleanup(ctx_patch2.stop)
super(self.__class__, self).setUp()
def get_pools(self):
gateway = self.vca_client.get_gateways(self.org_name)[0]
if not gateway:
raise cfy_exc.NonRecoverableError("Gateway not found")
gatewayConfiguration = gateway.me.get_Configuration()
edgeGatewayServiceConfiguration = \
gatewayConfiguration.get_EdgeGatewayServiceConfiguration()
dhcpService = filter(
lambda service: (service.__class__.__name__
== "GatewayDhcpServiceType"),
edgeGatewayServiceConfiguration.get_NetworkService())[0]
return dhcpService.get_Pool()
def tearDown(self):
super(self.__class__, self).tearDown()
def test_orgnetwork_create_delete(self):
self.assertNotIn(self.net_name,
network._get_network_list(self.vca_client,
self.org_name))
start_pools = len(self.get_pools())
network.create()
self.assertIn(self.net_name,
network._get_network_list(self.vca_client,
self.org_name))
self.assertEqual(start_pools + 1, len(self.get_pools()))
network.delete()
self.assertNotIn(self.net_name,
network._get_network_list(self.vca_client,
self.org_name))
self.assertEqual(start_pools, len(self.get_pools()))
class SecurityGroupOperationsTestCase(TestCase):
def setUp(self):
name = "testnode"
self.ctx = MockCloudifyContext(
node_id=name,
node_name=name,
properties={},
target=MockCloudifyContext(
node_id="target",
properties=self.test_config['security_group']),
source=MockCloudifyContext(
node_id="source",
properties={'vcloud_config': self.vcloud_config},
runtime_properties={
VCLOUD_VAPP_NAME: self.test_config['test_vm']}
)
)
ctx_patch1 = mock.patch('network_plugin.security_group.ctx', self.ctx)
ctx_patch2 = mock.patch('vcloud_plugin_common.ctx', self.ctx)
ctx_patch1.start()
ctx_patch2.start()
self.addCleanup(ctx_patch1.stop)
self.addCleanup(ctx_patch2.stop)
self.org_name = self.vcloud_config["org"]
super(self.__class__, self).setUp()
def tearDown(self):
super(self.__class__, self).tearDown()
def test_firewall_rules_create_delete(self):
rules = len(self.get_rules())
security_group.create()
self.assertEqual(rules + 2, len(self.get_rules()))
security_group.delete()
self.assertEqual(rules, len(self.get_rules()))
def get_rules(self):
gateway = self.vca_client.get_gateways(self.org_name)[0]
if not gateway:
raise cfy_exc.NonRecoverableError("Gateway not found")
gatewayConfiguration = gateway.me.get_Configuration()
edgeGatewayServiceConfiguration = \
gatewayConfiguration.get_EdgeGatewayServiceConfiguration()
firewallService = filter(
lambda service: (service.__class__.__name__
== "FirewallServiceType"),
edgeGatewayServiceConfiguration.get_NetworkService())[0]
return firewallService.get_FirewallRule()
class PublicNatOperationsTestCase(TestCase):
def setUp(self):
name = "testnode"
self.ctx = MockCloudifyContext(
node_id=name,
node_name=name,
properties={},
target=MockCloudifyContext(
node_id="target",
properties={
"nat": self.test_config['public_nat']['nat'],
'use_external_resource': False,
"rules": {}}),
source=MockCloudifyContext(
node_id="source",
properties={"vcloud_config": self.vcloud_config},
runtime_properties={
VCLOUD_VAPP_NAME:
self.test_config['public_nat']['test_vm'],
VCLOUD_NETWORK_NAME:
self.test_config['public_nat']['network_name']}
)
)
ctx_patch1 = mock.patch('network_plugin.public_nat.ctx', self.ctx)
ctx_patch2 = mock.patch('vcloud_plugin_common.ctx', self.ctx)
ctx_patch1.start()
ctx_patch2.start()
self.addCleanup(ctx_patch1.stop)
self.addCleanup(ctx_patch2.stop)
super(self.__class__, self).setUp()
def tearDown(self):
super(self.__class__, self).tearDown()
def test_public_network_connected_to_nat(self):
self.ctx.target.node.properties['rules'] = \
self.test_config['public_nat']['rules_net']
self.ctx.source.node.properties['resource_id'] = \
self.test_config['public_nat']['network_name']
rules_count = self.get_rules_count()
public_nat.net_connect_to_nat()
self.assertEqual(rules_count + 1, self.get_rules_count())
public_nat.net_disconnect_from_nat()
self.assertEqual(rules_count, self.get_rules_count())
def test_public_server_connected_to_nat(self):
self.ctx.target.node.properties['rules'] = \
self.test_config['public_nat']['rules_port']
rules_count = self.get_rules_count()
public_nat.server_connect_to_nat()
self.assertEqual(rules_count + 3, self.get_rules_count())
public_nat.server_disconnect_from_nat()
self.assertEqual(rules_count, self.get_rules_count())
def get_rules_count(self):
return len(self._get_gateway().get_nat_rules())
def _get_gateway(self):
return self.vca_client.get_gateway(
self.vcloud_config["org"],
self.ctx.target.node.properties['nat']['edge_gateway'])
| 2.015625 | 2 |
Level.py | blcmill/ILS-Z399-Fall-Down | 0 | 12789020 | <gh_stars>0
import pygame
from Color import Color
from itertools import repeat
class Level:
def __init__(self,filename):
self.block_size = (self.w,self.h) = (80,80)
self.level = []
self.screen_player_offset = (100,300)
self.player_position = (0,0)
self.enemies = []
self.floor = []
self.screen_shake = False
self.default_y = 0
f = open(filename,'r')
for l in f:
self.level.append(l)
if len(self.level):
self.screen = pygame.Surface((len(self.level[0])*self.w,len(self.level)*self.h))
self.screen.fill(Color.white)
j = 0
for r in self.level:
i = 0
for c in r:
pos = (i*self.w,j*self.h)
if c == 'P':
self.player_position = pos
if c == 'E':
self.enemies.append(pos)
if c == '1':
self.floor.append(pos)
i += 1
j += 1
self.rect = self.screen.get_rect()
self.master = pygame.Surface((self.rect.width,self.rect.height))
self.master.blit(self.screen,(0,0),self.rect)
def get_full_screen(self):
self.screen.blit(self.master,(0,0),self.rect)
return self.screen
def get_player_starting_position(self):
return self.player_position
def get_enemies(self):
return self.enemies
def get_floor(self):
return self.floor
def get_screen(self):
return self.screen
def get_rect(self,dim,player):
'''
Return the portion of the level where the player is currently visible
'''
(dx,dy) = dim
rx = 0
'''
if rx < 0:
rx = 0
if rx + dx > self.rect.width:
rx = self.rect.width - dx
'''
'''
if ry < 0:
ry = 0
if ry + dy > self.rect.height:
ry = self.rect.height - dy
'''
rx = 0
self.default_y += 1
self.default_y *= 1.0005
ry = self.default_y
rect = pygame.Rect(rx,ry,dx,dy)
return rect
def shake(self):
s = -1
for _ in range(0, 3):
for x in range(0, 30, 10):
yield (x*s, 0)
for x in range(30, 0, 10):
yield (x*s, 0)
s *= -1
while True:
yield (0, 0)
def set_default_y(self):
self.default_y = 0
rect = pygame.Rect(0, 0, 800, 800)
return rect
class Floor(pygame.sprite.Sprite):
def __init__(self,gravity,position,size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface(size)
self.image.fill(Color.gray_9)
self.rect = self.image.get_rect()
(self.rect.x,self.rect.y) = position
self.gravity = gravity
def get_position(self):
return (self.rect.x,self.rect.y)
def update(self):
'''
update behavior
'''
| 3.046875 | 3 |
Customer Lifetime Value/pre_processing.py | uicloudanalytics/Data-Apps | 0 | 12789021 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
import numpy as np
import plotly.express as px
# pd.options.display.float_format = '${:,.2f}'.format
# Load the data
data = pd.read_excel("./data/Online_Retail.xlsx")
# remove duplicate rows
filtered_data = data.drop_duplicates()
filtered_data.columns
# Plot the bar chart of countries
filtered_data.Country.value_counts()[:10].plot(kind='bar')
# Filter all quantities that are greater than zero
filtered_data = filtered_data[(filtered_data['Quantity']>0)]
# list(filtered_data.Country.unique())
filtered_data = filtered_data [['CustomerID','Description','InvoiceDate','InvoiceNo','Quantity','UnitPrice', 'Country']]
# Calculate total purchase
filtered_data['TotalPurchase'] = filtered_data['Quantity'] * filtered_data['UnitPrice']
filtered_data_group = filtered_data.groupby(['CustomerID','Country']).agg({'InvoiceDate': lambda date: (date.max() - date.min()).days,
'InvoiceNo': lambda num: len(num),
'Quantity': lambda quant: quant.sum(),
'TotalPurchase': lambda price: price.sum()})
# Change the name of columns
filtered_data_group.columns=['num_days','num_transactions','num_units','spent_money']
# Average Order Value
filtered_data_group['avg_order_value'] = filtered_data_group['spent_money']/filtered_data_group['num_transactions']
# Calculate purchase frequency
purchase_frequency = sum(filtered_data_group['num_transactions'])/filtered_data_group.shape[0]
# Repeat rate
repeat_rate = round(filtered_data_group[filtered_data_group.num_transactions > 1].shape[0]/filtered_data_group.shape[0],2)
# Churn Percentage
churn_rate = round(1-repeat_rate,2)
filtered_data_group.reset_index()
filtered_data_group['profit_margin'] = filtered_data_group['spent_money']*0.05
# Customer Value
filtered_data_group['CLV'] = (filtered_data_group['avg_order_value']*purchase_frequency)/churn_rate
# Resetting the index
filtered_data_group.reset_index(inplace = True)
# Formatting the currency fields
# filtered_data_group['spent_money', 'avg_order_value','profit_margin'] = filtered_data_group.spent_money.apply(lambda x : "{:,}".format(x))
df_plot = filtered_data.groupby(['Country','Description','UnitPrice','Quantity']).agg({'TotalPurchase': 'sum'},{'Quantity':'sum'}).reset_index()
# df2 = df1.loc[df1['Country'] == 'USA']
# px.scatter(df_plot[:25000], x="UnitPrice", y="TotalPurchase", color = 'Quantity', size='Quantity', title="Product Sales", size_max=20, log_y= True, log_x= True)
fig_UnitPriceVsQuantity = px.scatter(df_plot[:25000], x="UnitPrice", y="Quantity", color = 'Country',
size='TotalPurchase', size_max=20, log_y= True, log_x= True, title= "PURCHASE TREND ACROSS COUNTRIES")
# formating the float fields
var_float_filtered_group = [i for i in filtered_data_group.columns if filtered_data_group.dtypes[i]=='float64']
for i in var_float_filtered_group:
filtered_data_group[i] = filtered_data_group[i].round(2)
filtered_data_group[i].apply(lambda x : "{:,}".format(x))
var_float_filtered = [i for i in filtered_data.columns if filtered_data.dtypes[i]=='float64']
for i in var_float_filtered:
filtered_data[i] = filtered_data[i].round(2)
filtered_data[i].apply(lambda x : "{:,}".format(x))
| 3 | 3 |
main.py | aysunakarsu/google-shopping-performance-api | 1 | 12789022 | <filename>main.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START gae_python37_app]
from __future__ import absolute_import
from flask import Flask
import os
from google.cloud import storage
from google.cloud import bigquery
import sys
from googleads import adwords
from google.cloud.exceptions import NotFound
import glob
import gcsfs
import pandas as pd
import pandas_gbq as pd_gbq
from pyarrow import csv
google_application_credentials = 'your_file.json'
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = google_application_credentials
app = Flask(__name__)
destination_file_name_01 = 'your_destination_file_name_01.csv'
destination_file_name_02 = 'your_destination_file_name_02.csv'
gcloud_bucket_name = 'your_gcloud_bucket_name'
dataset_id = 'your_dataset_id'
project_id = 'your_project_id'
gcs = storage.Client()
bucket = gcs.get_bucket(gcloud_bucket_name)
client_bigquery = bigquery.Client()
dataset_ref = client_bigquery.dataset(dataset_id)
# Location of big query tables
table_id_01 = 'google_shopping_performance_table_01'
table_id_02 = 'google_shopping_performance_table_02'
table_ref_01 = dataset_ref.table(table_id_01)
table_ref_02 = dataset_ref.table(table_id_02)
# Uri of csv file in cloudstorage
uri_01 = "gs://"+gcloud_bucket_name+"/"+destination_file_name_01
uri_02 = "gs://"+gcloud_bucket_name+"/"+destination_file_name_02
def table_exists(client_bigquery,table_ref):
try:
table = client_bigquery.get_table(table_ref)
if table:
return True
except NotFound as error:
return False
def remove_bigquery_tables(table):
if table_exists(client_bigquery,table):
client_bigquery.delete_table(table)
def load_to_bq(fs,destination_file_name,dataset_id,table,chunk_row_size=2000000):
with fs.open(gcloud_bucket_name+'/'+destination_file_name, "rb") as tmp_file:
table = csv.read_csv(tmp_file)
df = table.to_pandas()
df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '').str.replace('.','')
pd_gbq.to_gbq(df,destination_table=dataset_id+'.'+table,project_id=project_id,chunksize=chunk_row_size,if_exists='append')
@app.route('/')
def load_to_bigquery_google_shopping_performance_report():
fs = gcsfs.GCSFileSystem(project=project_id,token=google_application_credentials)
# if you don't want to remove existing biqquery tables then comment out 2 below lines
remove_bigquery_tables(table_ref_01)
remove_bigquery_tables(table_ref_02)
client = adwords.AdWordsClient.LoadFromStorage('./googleads.yaml')
# Initialize appropriate service.
report_downloader = client.GetReportDownloader(version='v201809')
# Create report1 query
report_query_01 = (
'SELECT CampaignName, '
'OfferId,Impressions,Clicks,AverageCpc,Cost,Ctr,Conversions,ConversionValue, '
'ProductTypeL1,ProductTypeL2,ProductTypeL3,ProductTypeL4,ProductTypeL5, '
'Date '
'FROM SHOPPING_PERFORMANCE_REPORT '
'WHERE CampaignStatus = "ENABLED" AND AdGroupStatus = "ENABLED" '
'DURING LAST_7_DAYS '
)
# Download report1 to temporary directory
with fs.open(gcloud_bucket_name+'/'+destination_file_name_01, "wb") as file1:
report_downloader.DownloadReportWithAwql(
report_query_01, 'CSV',file1, skip_report_header=True,
skip_column_header=False, skip_report_summary=True,
include_zero_impressions=False)
# Create report2 query
report_query_02 = (
'SELECT OfferId,ProductTitle,ConversionTypeName,ConversionCategoryName, '
'ExternalConversionSource '
'FROM SHOPPING_PERFORMANCE_REPORT '
'WHERE CampaignStatus = "ENABLED" AND AdGroupStatus = "ENABLED" '
'DURING LAST_7_DAYS '
)
# Download report2 to temporary directory
with fs.open(gcloud_bucket_name+'/'+destination_file_name_02, "wb") as file2:
report_downloader.DownloadReportWithAwql(
report_query_02, 'CSV', file2, skip_report_header=True,
skip_column_header=False, skip_report_summary=True,
include_zero_impressions=False)
# Load reports to bigquery from cloud storage
load_to_bq(fs,destination_file_name_01,dataset_id,table_id_01)
load_to_bq(fs,destination_file_name_02,dataset_id,table_id_02)
destination_table_01 = client_bigquery.get_table(dataset_ref.table(table_id_01))
destination_table_02 = client_bigquery.get_table(dataset_ref.table(table_id_02))
return ("Loaded for table 01 {} rows.\nLoaded for table 02 {} rows".format(destination_table_01.num_rows,destination_table_02.num_rows))
@app.route('/_ah/warmup')
def warmup():
# Handle your warmup logic here, e.g. set up a database connection pool
return '', 200, {}
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
# [END gae_python37_app]
| 2.1875 | 2 |
tests/agent_tests.py | tomakehurst/saboteur | 258 | 12789023 | from saboteur.agent import SaboteurWebApp
import json
import unittest
from test_utils import MockShell
from saboteur.apicommands import FAULT_TYPES, alphabetical_keys
def post_request(params):
return request('POST', params)
def delete_request():
return {'path': '/',
'method': 'DELETE'}
def request(method, params):
return {'path': '/',
'method': method,
'body': json.dumps(params)}
def http_request(method, params_json):
return {'path': '/',
'method': method,
'body': params_json}
class TestAgent(unittest.TestCase):
def setUp(self):
self.shell = MockShell()
self.app = SaboteurWebApp(self.shell)
def test_successful_iptables_based_fault_returns_200_and_executes_correct_command(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'NETWORK_FAILURE',
'direction': 'IN',
'to_port': 80,
'protocol': 'TCP'
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(response['status'], 200)
self.assertEqual(self.shell.last_command, 'sudo /sbin/iptables -A INPUT -p TCP -j DROP --dport 80')
def test_invalid_json_returns_400(self):
params = '{ "name": }'
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps('Not valid JSON'), response['body'])
def test_invalid_fault_type(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'WORMS'
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps({
"errors": {
"type": "must be present and one of " + str(alphabetical_keys(FAULT_TYPES))
}
}),
response['body'])
def test_fault_with_single_invalid_field_returns_400(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'NETWORK_FAILURE',
'to_port': 7871
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps({
"errors": {
"direction": "required key not provided"
}
}),
response['body'])
def test_fault_with_multiple_invalid_fields_returns_400(self):
params = json.dumps({
'name': 'isolate-web-server',
'type': 'DELAY',
'direction': 'IN',
'to_port': 7871,
'delay': 'bad',
'probability': 'worse'
})
response = self.app.handle(http_request('POST', params))
self.assertEqual(400, response['status'])
self.assertEqual(json.dumps({
"errors": {
"delay": "expected int",
"probability": "expected float"
}
}),
response['body'])
def test_reset(self):
self.shell.next_result = 'eth1'
response = self.app.handle(delete_request())
self.assertEqual(response['status'], 200)
self.assertEqual(self.shell.commands, [
'sudo /sbin/iptables -F',
"netstat -i | tail -n+3 | cut -f1 -d ' '",
'sudo /sbin/tc qdisc del dev eth1 root'])
def test_returns_500_when_shell_command_exits_with_non_zero(self):
params = json.dumps({
'name': 'whatever',
'type': 'NETWORK_FAILURE',
'direction': 'IN',
'to_port': 80,
'protocol': 'TCP'
})
self.shell.next_exit_code = 1
response = self.app.handle(http_request('POST', params))
self.assertEqual(500, response['status'])
if __name__ == '__main__':
unittest.main()
| 2.421875 | 2 |
scrapi/harvesters/cuny.py | wearpants/scrapi | 34 | 12789024 | """
A harvester for City Universtiy of New York for the SHARE project
An example API call: http://academicworks.cuny.edu/do/oai/request?
verb=ListRecords&metadataPrefix=oai_dc
"""
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class CUNY_Harvester(OAIHarvester):
short_name = 'cuny'
long_name = 'City University of New York'
url = 'http://academicworks.cuny.edu'
base_url = 'http://academicworks.cuny.edu/do/oai/'
property_list = ['publisher', 'language', 'format', 'source', 'date', 'identifier', 'type']
approved_Sets = [
u'gc_etds', u'gc_etds_legacy', u'bx_conf_bet14', u'ho_conf_bet15',
u'ols_proceedings_lac', u'gj_etds', u'gc_cs_tr', u'ufs_conf', u'ols_proceedings',
u'gc_etds_all', u'gc_econ_wp', u'cl_pubs', u'gc_pubs', u'ufs_conf_sp15',
u'gc_studentpubs', u'cc_conf_hic', u'lacuny_conf_2015_gallery', u'lacuny_conf_2015',
u'cc_etds_theses', u'lg_oers', u'bc_oers', u'cc_oers', u'oers', u'ny_oers',
u'qc_oers', u'gc_oers', u'hc_oers', u'si_oers', u'qb_oers', u'nc_oers', u'bx_oers',
u'ny_pubs', u'jj_pubs', u'yc_pubs', u'sph_pubs', u'pubs', u'bc_pubs', u'bx_pubs',
u'nc_pubs', u'bb_pubs', u'qc_pubs', u'oaa_pubs', u'gj_pubs', u'kb_pubs', u'lg_pubs',
u'si_pubs', u'cc_pubs', u'ho_pubs', u'qb_pubs', u'le_pubs', u'bm_pubs', u'dsi_pubs',
u'me_pubs', u'hc_pubs', u'hc_sas_etds', u'bb_etds', u'etds', u'ufs'
]
| 1.992188 | 2 |
run.py | RavicharanN/Messenger-scheduler | 1 | 12789025 | import os
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
# Change the info here
messenger_link = "https://www.messenger.com/t/abc"
email = ""
password = ""
message = ""
os.environ["SELENIUM_SERVER_JAR"] = "selenium-server-standalone-2.41.0.jar"
browser = webdriver.Chrome("/Users/nvravicharan/Desktop/chromedriver")
# makes the browser wait if it can't find an element
browser.implicitly_wait(10)
browser.get(messenger_link)
time.sleep(2)
e_input = browser.find_element_by_xpath("//input[@id='email']")
p_input = browser.find_element_by_xpath("//input[@id='pass']")
e_input.send_keys(email)
p_input.send_keys(password)
submit = browser.find_element_by_xpath("//button[@id='loginbutton']")
submit.submit()
text_field = browser.find_element_by_xpath("//div[@aria-label='Type a message...']")
text_field.send_keys(message)
browser.find_element_by_xpath("//a[@aria-label='Send']").click()
time.sleep(5)
browser.quit() | 2.859375 | 3 |
catnip/__init__.py | ramadan8/Catnip | 1 | 12789026 | from .camera import Camera, Frame
from .event import Event
from .manager import Manager
| 1.085938 | 1 |
numba/specialize/exttypes.py | liuzhenhai/numba | 1 | 12789027 | import ast
import numba
from numba import *
from numba import error
from numba import typesystem
from numba import visitors
from numba import nodes
from numba import function_util
from numba.exttypes import virtual
from numba.traits import traits, Delegate
class ExtensionTypeLowerer(visitors.NumbaTransformer):
"""
Lower extension type attribute accesses and method calls.
"""
def get_handler(self, ext_type):
if ext_type.is_extension and not ext_type.is_autojit_exttype:
return StaticExtensionHandler()
else:
assert ext_type.is_autojit_exttype, ext_type
return DynamicExtensionHandler()
# ______________________________________________________________________
# Attributes
def visit_ExtTypeAttribute(self, node):
"""
Resolve an extension attribute.
"""
handler = self.get_handler(node.ext_type)
self.visitchildren(node)
return handler.handle_attribute_lookup(self.env, node)
# ______________________________________________________________________
# Methods
def visit_NativeFunctionCallNode(self, node):
if node.signature.is_bound_method:
assert isinstance(node.function, nodes.ExtensionMethod)
self.visitlist(node.args)
node = self.visit_ExtensionMethod(node.function, node)
else:
self.visitchildren(node)
return node
def visit_ExtensionMethod(self, node, call_node=None):
"""
Resolve an extension method. We currently only support immediate
calls of extension methods.
"""
if call_node is None:
raise error.NumbaError(node, "Referenced extension method '%s' "
"must be called" % node.attr)
handler = self.get_handler(node.ext_type)
return handler.handle_method_call(self.env, node, call_node)
#------------------------------------------------------------------------
# Handle Static VTable Attributes and Methods
#------------------------------------------------------------------------
class StaticExtensionHandler(object):
"""
Handle attribute lookup and method calls for static extensions
with C++/Cython-like virtual method tables and static object layouts.
"""
def handle_attribute_lookup(self, env, node):
"""
Resolve an extension attribute for a static object layout.
((attributes_struct *)
(((char *) obj) + attributes_offset))->attribute
:node: ExtTypeAttribute AST node
"""
ext_type = node.value.type
offset = ext_type.attr_offset
type = ext_type.attribute_table.to_struct()
if isinstance(node.ctx, ast.Load):
value_type = type.ref() # Load result
else:
value_type = type.pointer() # Use pointer for storage
struct_pointer = nodes.value_at_offset(node.value, offset,
value_type)
result = nodes.StructAttribute(struct_pointer, node.attr,
node.ctx, type.ref())
return result
def handle_method_call(self, env, node, call_node):
"""
Resolve an extension method of a static (C++/Cython-like) vtable:
typedef {
double (*method1)(double);
...
} vtab_struct;
vtab_struct *vtab = *(vtab_struct **) (((char *) obj) + vtab_offset)
void *method = vtab[index]
"""
# Make the object we call the method on clone-able
node.value = nodes.CloneableNode(node.value)
ext_type = node.value.type
offset = ext_type.vtab_offset
vtable_struct = ext_type.vtab_type.to_struct()
vtable_struct_type = vtable_struct.ref()
vtab_struct_pointer_pointer = nodes.value_at_offset(
node.value, offset,vtable_struct_type.pointer())
vtab_struct_pointer = nodes.DereferenceNode(vtab_struct_pointer_pointer)
vmethod = nodes.StructAttribute(vtab_struct_pointer, node.attr,
ast.Load(), vtable_struct_type)
# Insert first argument 'self' in args list
args = call_node.args
args.insert(0, nodes.CloneNode(node.value))
result = nodes.NativeFunctionCallNode(node.type, vmethod, args)
return result
#------------------------------------------------------------------------
# Handle Dynamic VTable Attributes and Methods
#------------------------------------------------------------------------
@traits
class DynamicExtensionHandler(object):
"""
Handle attribute lookup and method calls for autojit extensions
with dynamic perfect-hash-based virtual method tables and dynamic
object layouts.
"""
static_handler = StaticExtensionHandler()
# TODO: Implement hash-based attribute lookup
handle_attribute_lookup = Delegate('static_handler')
def handle_method_call(self, env, node, call_node):
"""
Resolve an extension method of a dynamic hash-based vtable:
PyCustomSlots_Table ***vtab_slot = (((char *) obj) + vtab_offset)
lookup_virtual_method(*vtab_slot)
We may cache (*vtab_slot), but we may not cache (**vtab_slot), since
compilations may regenerate the table.
However, we could *preload* (**vtab_slot), where function calls
invalidate the preload, if we were so inclined.
"""
# Make the object we call the method on clone-able
node.value = nodes.CloneableNode(node.value)
ext_type = node.ext_type
func_signature = node.type #typesystem.extmethod_to_function(node.type)
offset = ext_type.vtab_offset
# __________________________________________________________________
# Retrieve vtab
vtab_ppp = nodes.value_at_offset(node.value, offset,
void.pointer().pointer())
vtab_struct_pp = nodes.DereferenceNode(vtab_ppp)
# __________________________________________________________________
# Calculate pre-hash
prehash = virtual.hash_signature(func_signature, func_signature.name)
prehash_node = nodes.ConstNode(prehash, uint64)
# __________________________________________________________________
# Retrieve method pointer
# A method is always present when it was given a static signature,
# e.g. @double(double)
always_present = node.attr in ext_type.vtab_type.methodnames
args = [vtab_struct_pp, prehash_node]
# lookup_impl = NumbaVirtualLookup()
lookup_impl = DebugVirtualLookup()
ptr = lookup_impl.lookup(env, always_present, node, args)
vmethod = ptr.coerce(func_signature.pointer())
vmethod = vmethod.cloneable
# __________________________________________________________________
# Call method pointer
# Insert first argument 'self' in args list
args = call_node.args
args.insert(0, nodes.CloneNode(node.value))
method_call = nodes.NativeFunctionCallNode(func_signature, vmethod, args)
# __________________________________________________________________
# Generate fallback
# TODO: Subclassing!
# if not always_present:
# # TODO: Enable this path and generate a phi for the result
# # Generate object call
# obj_args = [nodes.CoercionNode(arg, object_) for arg in args]
# obj_args.append(nodes.NULL)
# object_call = function_util.external_call(
# env.context, env.crnt.llvm_module,
# 'PyObject_CallMethodObjArgs', obj_args)
#
# # if vmethod != NULL: vmethod(obj, ...)
# # else: obj.method(...)
# method_call = nodes.if_else(
# ast.NotEq(),
# vmethod.clone, nodes.NULL,
# lhs=method_call, rhs=object_call)
return method_call
#------------------------------------------------------------------------
# Method lookup
#------------------------------------------------------------------------
def call_jit(jit_func, args):
return nodes.NativeCallNode(jit_func.signature, args, jit_func.lfunc)
class NumbaVirtualLookup(object):
"""
Use a numba function from numba.utility.virtuallookup to look up virtual
methods in a hash table.
"""
def lookup(self, env, always_present, node, args):
"""
:param node: ExtensionMethodNode
:param args: [vtable_node, prehash_node]
:return: The virtual method as a Node
"""
from numba.utility import virtuallookup
if always_present and False:
lookup = virtuallookup.lookup_method
else:
lookup = virtuallookup.lookup_and_assert_method
args.append(nodes.const(node.attr, c_string_type))
vmethod = call_jit(lookup, args)
return vmethod
class DebugVirtualLookup(object):
"""
Use a C utility function from numba/utility/utilities/virtuallookup.c
to look up virtual methods in a hash table.
Use for debugging.
"""
def lookup(self, env, always_present, node, args):
args.append(nodes.const(node.attr, c_string_type))
vmethod = function_util.utility_call(
env.context, env.crnt.llvm_module,
"lookup_method", args)
return vmethod
| 2.09375 | 2 |
create_event.py | XtremeNolaner/MySchedule-to-Calendar | 0 | 12789028 | from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
import pickle
import datetime
# What the program can access within Calendar
# See more at https://developers.google.com/calendar/auth
scopes = ["https://www.googleapis.com/auth/calendar"]
flow = InstalledAppFlow.from_client_secrets_file("client_secret.json", scopes=scopes)
# Use this to pull the users credentials into a pickle file
#credentials = flow.run_console()
#pickle.dump(credentials, open("token.pkl", "wb"))
# Read the credentials from a saved pickle file
credentials = pickle.load(open("token.pkl", "rb"))
# Build the calendar resource
service = build("calendar", "v3", credentials=credentials)
# Store a list of Calendars on the account
result = service.calendarList().list().execute()
calendar_id = result["items"][1]["id"]
result = service.events().list(calendarId=calendar_id).execute()
def create_event(shift_information):
"""
Create a Google Calendar Event
Args:
my_event: dictionary
"""
print("Created Event for " + str(shift_information[2]))
year = shift_information[2].year
month = shift_information[2].month
day = shift_information[2].day
start_hour = int(shift_information[0][0:2])
start_min = int(shift_information[0][-2:])
end_hour = int(shift_information[1][0:2])
end_min = int(shift_information[1][-2:])
start_date_time = datetime.datetime(year, month, day, start_hour, start_min)
end_date_time = datetime.datetime(year, month, day, end_hour, end_min)
# If the shift carries over into another day
if shift_information[1][0] == '0':
end_date_time += datetime.timedelta(days=1)
event = {
"summary": 'Work',
"location": 'Carlow D/T MSA',
"description": 'Shift',
"start": {
"dateTime": start_date_time.strftime('%Y-%m-%dT%H:%M:%S'),
"timeZone": "Europe/London",
},
"end": {
"dateTime": end_date_time.strftime('%Y-%m-%dT%H:%M:%S'),
"timeZone": "Europe/London",
},
"reminders": {
"useDefault": False,
},
}
return service.events().insert(calendarId=calendar_id, body=event, sendNotifications=True).execute()
def is_events_this_week(start_date):
end_date = start_date + datetime.timedelta(days=7)
start_date = start_date.isoformat() + 'Z'
end_date = end_date.isoformat() + 'Z'
tests = service.events().list(calendarId=calendar_id, timeMin=start_date, timeMax=end_date).execute()
if not tests['items']:
return False
else:
return True
#check_events() | 3.265625 | 3 |
Assignments/Assignment 2/DS_Assignment2_201911189/A5-4.py | h0han/SE274_2020_spring | 0 | 12789029 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#A5-4 (3 points)
from linked_binary_tree import LinkedBinaryTree
class personal_LBP(LinkedBinaryTree):
def _delete_subtree(self, p):
if self.num_children(p) != 0:
for child in self.children(p):
self._delete_subtree(child)
return self._delete(child)
else:
return self._delete(p)
running time의 경우, O(n) time이 소요된다.
| 3.109375 | 3 |
utils.py | chu-data-lab/zeroer | 13 | 12789030 | <reponame>chu-data-lab/zeroer
import numpy as np
from sklearn.metrics import precision_score, recall_score, f1_score
from model import get_y_init_given_threshold,ZeroerModel
DEL = 1e-300
def get_results(true_labels, predicted_labels):
p = precision_score(true_labels, predicted_labels)
r = recall_score(true_labels, predicted_labels)
f1 = f1_score(true_labels, predicted_labels)
return p, r, f1
def run_zeroer(similarity_features_df, similarity_features_lr,id_dfs,true_labels,LR_dup_free,LR_identical,run_trans):
similarity_matrix = similarity_features_df.values
y_init = get_y_init_given_threshold(similarity_features_df)
similarity_matrixs = [similarity_matrix,None,None]
y_inits = [y_init,None,None]
if similarity_features_lr[0] is not None:
similarity_matrixs[1] = similarity_features_lr[0].values
similarity_matrixs[2] = similarity_features_lr[1].values
y_inits[1] = get_y_init_given_threshold(similarity_features_lr[0])
y_inits[2] = get_y_init_given_threshold(similarity_features_lr[1])
feature_names = similarity_features_df.columns
c_bay = 0.1
model, y_pred = ZeroerModel.run_em(similarity_matrixs, feature_names, y_inits,id_dfs,LR_dup_free,LR_identical, run_trans, y_true=true_labels,
hard=False, c_bay=c_bay)
if true_labels is not None:
p, r, f1 = get_results(true_labels, np.round(np.clip(y_pred + DEL, 0., 1.)).astype(int))
print("Results after EM:")
print("F1: {:0.2f}, Precision: {:0.2f}, Recall: {:0.2f}".format(f1, p, r))
return y_pred
| 2.5 | 2 |
talk-dpg-20220316/russia-treemap.py | ohnemax/atomwaffen-in-russland | 9 | 12789031 | <filename>talk-dpg-20220316/russia-treemap.py
###############################################################################
# Some standard modules
import os
import sys
import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import squarify
###############################################################################
whtext = " Kernwaffen\n"
gbforces = "für boden-gestützte\nRaketensysteme"
russiadf = pd.read_excel("russia-notebook.xlsx") # Data from Kristensen/Korda 2022, Bulletin of the Atomic Scientists
russiadf['strNumber'] = russiadf['Number'].astype(str)
russiadf['exLabel'] = russiadf['strNumber'] + whtext + russiadf['Type']
russiadf.loc[russiadf['Number'] == 90, 'exLabel'] = 90
for i in range(12):
plt.figure(figsize=(8, 4))
squarify.plot(sizes=russiadf['Number'],
# label=russiadf['Type'],
label=russiadf['exLabel'],
color=russiadf['Color-{:02d}'.format(i)],
pad = True,
text_kwargs={'fontname': 'Fira Sans Condensed', 'fontsize': 12, 'color': 'white'})
plt.text(70, 100, gbforces,
fontname='Fira Sans Condensed', fontsize = 12,
ha = 'center', va = 'baseline')
plt.axis("off")
plt.tight_layout()
plt.savefig("{:02d}.svg".format(i), transparent=True)
plt.savefig("{:02d}.pdf".format(i), transparent=True)
#plt.show()
plt.clf()
| 2.578125 | 3 |
software/urls.py | RevolutionTech/revolutiontech.ca | 0 | 12789032 | """
:Created: 26 July 2015
:Author: <NAME>
"""
from django.conf.urls import url
from basecategory.views import ItemPageView
from software.models import Software
from software.views import SoftwareListView
app_name = "software"
urlpatterns = [
url(
r"^(?P<slug>[\w_-]+)/?$",
ItemPageView.as_view(),
{"items": Software},
name="item_details",
),
url(r"^$", SoftwareListView.as_view(), {"items": Software}, name="software_list"),
]
| 1.898438 | 2 |
examples/kubeflow/tfjob/tf_job_s3_gateway_minio.py | storey247/pachyderm | 0 | 12789033 | <filename>examples/kubeflow/tfjob/tf_job_s3_gateway_minio.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import MinIO library.
from minio import Minio
from minio.error import (ResponseError, BucketAlreadyOwnedByYou,
BucketAlreadyExists)
import os
import argparse
import sys
import tensorflow as tf
unspecified_value = '';
s3_access_key = os.getenv('S3_ACCESS_KEY', unspecified_value)
s3_secret_key = os.getenv('S3_SECRET_KEY', unspecified_value)
s3_secure = os.getenv('S3_SECURE', 0)
s3_bucket = os.getenv('S3_BUCKET', 'master.testrepo')
s3_endpoint = os.getenv('S3_ENDPOINT', 'localhost:30060')
# this is a directory with four files in it,
# mf, sis, boom, and bah,
# that's built into the image
input_path = os.getenv('INPUT_PATH', "/testdata")
minio_secure = False
if (s3_secure > 0):
minio_secure = True
def main(_):
# Initialize minioClient with an endpoint and access/secret keys.
print("opening {}".format(args.endpoint))
try:
minioClient = Minio(args.endpoint,
access_key=args.accesskey,
secret_key=args.secretkey,
secure=args.secure)
except Error as err:
print (err)
print("opened {}".format(minioClient))
print("walking {}".format(args.inputpath))
for dirpath, dirs, files in os.walk(args.inputpath):
for file in files:
try:
print("copying {} to {} as {}".format(dirpath + "/" + file, args.bucket, file))
minioClient.fput_object(args.bucket, file, dirpath + "/" + file)
except ResponseError as err:
print(err)
print("copied {} to {} as {}".format(dirpath + "/" + file, args.bucket, file))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='put some randomly generated files into an s3 bucket.')
parser.add_argument('-b', '--bucket', required=False,
help="""The bucket where files will be put. This overrides the default and the environment variable S3_BUCKET.""",
default=s3_bucket)
parser.add_argument('-e', '--endpoint', required=False,
help="""S3 endpoint, hostname:port. This overrides the default and the environment variable S3_ENDPOINT.""",
default=s3_endpoint)
parser.add_argument('-s', '--secure', required=False,
help="""Whether the S3 endpoint is using https. This overrides the default and the environment variable S3_SECURE.""",
default=minio_secure)
parser.add_argument('-a', '--accesskey', required=False,
help="""Access key for the bucket. This overrides the default and the environment variable S3_SECURE.""",
default=s3_access_key)
parser.add_argument('-k', '--secretkey', required=False,
help="""Secret key for the bucket. This overrides the default and the environment variable S3_SECRET_KEY.""",
default=s3_secret_key)
parser.add_argument('-i', '--inputpath', required=False,
help="""The directories to walk for files to put in the bucket""",
default=input_path)
args, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 2.140625 | 2 |
crawler.py | EnzoPB/WankilFinderCrawler | 1 | 12789034 | from youtube_transcript_api import YouTubeTranscriptApi, _errors as YouTubeTranscriptApiErrors
from datetime import datetime
from database import con, cur # on importe la connexion et le curseur de la base de donnée
from youtubeAPI import youtubeAPI # on importe la fonction youtubeAPI, qui sert juste à formatter des requêtes à l'API youtube
from config import config
nextPageToken = '' # token retourné par l'API pour accéder à la page de la playlist suivante
run = True
if __name__ == '__main__':
while run:
# on récupère toute les vidéos de la chaine depuis l'API (via la playlist 'uploads')
playlist = youtubeAPI('playlistItems', {
'part': 'snippet',
'pageToken': nextPageToken,
'maxResults': 50, # on veux 50 vidéos (le max. par requètes)
'playlistId': config['playlistId'] # ID de la playlist "uploads"
})
if 'nextPageToken' in playlist:
nextPageToken = playlist['nextPageToken']
else:
run = False
videos = playlist['items'] # 'items' correspond à la liste des vidéos
for video in videos:
video = video['snippet']
print(f'video #{video["position"]}')
videoId = video['resourceId']['videoId']
timestamp = datetime.strptime(video['publishedAt'], '%Y-%m-%dT%H:%M:%SZ').timestamp() # on transforme la date ISO8601 en timestamp UNIX
# on récupère les 10 premiers commentaires
topComments = [] # liste contenant ces commenaires
comments = youtubeAPI('commentThreads', {
'part': 'snippet',
'textFormat': 'plainText', # on veux les commentaires en texte simple
'maxResults': 5, # on veut 5 commentaires
'order': 'relevance', # on trie par 'relevance' (ordre par defaut)
'videoId': videoId # ID de la vidéo
})['items'] # 'items' correspond à la liste des commentaires
for comment in comments:
topComments.append(comment['snippet']['topLevelComment']['snippet']['textDisplay']) # on ajoute le commentaire à la liste (uniquement le texte)
try:
# on récupère les sous-titres français de la vidéo (automatiques et manuels) voir https://pypi.org/project/youtube-transcript-api
transcripts = YouTubeTranscriptApi.list_transcripts(videoId) # on récupère la liste des sous-titres pour la vidéos
except YouTubeTranscriptApiErrors.TranscriptsDisabled: # les sous-titres sont désactivés pour cette vidéo
pass # on ignore l'erreur, un string vide "" sera stocké dans la bdd
autoCaptionsList = [] # liste contenant les sous-titres automatiques
try:
autoCaptions = transcripts.find_generated_transcript(['fr']).fetch() # on récupère les sous-titres automatiques en français
for caption in autoCaptions:
autoCaptionsList.append(caption['text']) # on ajoute le texte du commentaire à la liste
except YouTubeTranscriptApiErrors.NoTranscriptFound: # les sous-titres sont désactivés / indisponibles
pass # on ignore l'erreur, un string vide "" sera stocké dans la bdd
manualCaptionsList = [] # liste contenant les sous-titres manuels
try:
manualCaptions = transcripts.find_manually_created_transcript(['fr']).fetch() # on récupère les sous-titres manuels en français
for caption in manualCaptions:
manualCaptionsList.append(caption['text']) # on ajoute le texte du commentaire à la liste
except YouTubeTranscriptApiErrors.NoTranscriptFound: # les sous-titres sont indisponibles
pass # on ignore l'erreur, un string vide "" sera stocké dans la bdd
# on créér une nouvelle ligne dans la bdd
cur.execute('INSERT INTO videos(id, title, description, timestamp, topComments, autoCaptions, manualCaptions) VALUES(?, ?, ?, ?, ?, ?, ?)',
(videoId, video['title'], video['description'], timestamp, '\n'.join(topComments), '\n'.join(autoCaptionsList), '\n'.join(manualCaptionsList)))
con.commit()
con.close() | 2.59375 | 3 |
tests/web_platform/CSS2/linebox/test_vertical_align_sub.py | fletchgraham/colosseum | 0 | 12789035 | from tests.utils import W3CTestCase
class TestVerticalAlignSub(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'vertical-align-sub-'))
| 1.75 | 2 |
multilayerGM/__init__.py | MultilayerBenchmark/MultilayerBenchmark-py | 10 | 12789036 | <filename>multilayerGM/__init__.py
from . import dependency_tensors
from . import export
from . import comparisons
from .networks import multilayer_DCSBM_network
from .partitions import sample_partition, DirichletNull, dirichlet_null
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
| 1.1875 | 1 |
batch_requests/authentication.py | GoLorry/django-batch-requests | 0 | 12789037 | <gh_stars>0
from django.contrib.auth import authenticate
from rest_framework.authentication import BaseAuthentication
class BatchAuthentication(BaseAuthentication):
'''
Similar to session authentication. Authenticates users already authenticated by
batch_requests.
'''
def authenticate(self, request):
'''
Returns a `User` if the batch request set a logged in user.
Otherwise returns `None`.
'''
# Get the session-based user from the underlying HttpRequest object
user = getattr(request._request, 'batch_user', None)
if not user or not user.is_active:
return None
return (user, None) | 2.640625 | 3 |
OpenMatch/modules/encoders/positional_encoder.py | vishalbelsare/OpenMatch | 403 | 12789038 | import torch
import torch.nn as nn
class PositionalEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
max_len: int = 512
) -> None:
super(PositionalEncoder, self).__init__()
self._embed_dim = embed_dim
self._max_len = max_len
self._embed_matrix = torch.tensor(
[[pos / pow(1.0e4, 2.0 * (i // 2) / self._embed_dim) for i in range(self._embed_dim)] for pos in range(self._max_len)]
)
self._embed_matrix[:, 0::2] = torch.sin(self._embed_matrix[:, 0::2])
self._embed_matrix[:, 1::2] = torch.cos(self._embed_matrix[:, 1::2])
self._embedder = nn.Embedding(self._max_len, self._embed_dim)
self._embedder.weight = nn.Parameter(self._embed_matrix, requires_grad=False)
def forward(self, embed: torch.Tensor) -> torch.Tensor:
token_len = embed.size()[1]
if embed.is_cuda:
ids = torch.cuda.LongTensor([l for l in range(token_len)])
else:
ids = torch.LongTensor([l for l in range(token_len)])
embed += self._embedder(ids)
return embed
| 2.375 | 2 |
src/generate_features.py | isotopi/Android_Permission | 0 | 12789039 | import pandas as pd
from sklearn.preprocessing import LabelEncoder
# read dataset file
df = pd.read_csv("../data/Android_Permission.csv", header=0, delimiter=',')
# Drop the columns which have a remarkable number of identic values.
dropper = []
for col in df.columns[10:]:
if (df[col].value_counts()[0] > 28999 or df[col].value_counts()[0] < 1000):
dropper.append(col)
df = df.drop(df[dropper], axis = 1)
# Drop Related apps column
df = df.drop(['Related apps'], axis = 1)
# Drop non-existent values
df = df.dropna()
# encoding Category column
le = LabelEncoder()
df['Category'] = le.fit_transform(df['Category'])
# encoding the rest of the text type columns using malign apps patterns
## nombre de majúscules entre les tres columnes
df['App_Upper'] = df['App'].apply(lambda message: sum(1 for c in str(message) if c.isupper()))
df['Pack_Upper'] = df['Package'].apply(lambda message: sum(1 for c in str(message) if c.isupper()))
df['Description_Upper'] = df['Description'].apply(lambda message: sum(1 for c in str(message) if c.isupper()))
## nombre de punts a 'Package'
df['Pack_Periods'] = df['Package'].apply(lambda message: sum(1 for c in str(message) if '.' in c))
## paraules com "free" o "better" en el nom
df['App_Free_Better'] = df['App'].str.contains('free|better').astype(int)
df = df.drop(['App'], axis = 1)
df = df.drop(['Package'], axis = 1)
df = df.drop(['Description'], axis = 1)
| 3.21875 | 3 |
plugins/modules/unpackage.py | bferguso/nr-ansible | 0 | 12789040 | <gh_stars>0
#!/usr/bin/env python
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: unpackage.py
short_description: A Custom Version of unarchive
description:
- Untars/Unzips if no valid checksum file is found
- After succesful unpack creates checksum file
version_added: "2.3"
author: "<NAME>"
Status : preview
options:
src:
description:
- The location of the file to attempt to unpack.
required: true
dest:
description:
- The location to unpack to
required: true
safe_mode:
description:
Used to prevent deletion
required: False
default: False
checksum:
description: the checksum to match aganst
required: False
exclude:
Description: files to not be included in the unzip
required: False
type: 'list'
directory_permissions:
description: The permssions of the dest directory if it does not exist
required:False
default=0775
extra_opts:
description: In order to allow for more options in unzip/tar these will be included in the command that gets run
required: False
top_dir:
description:
if using unzip the name of the top_directory that would be created by unzipping the archive
required: for .zip if you want to make the dest the top folder
archive_name:
description:
The name of the program to unarchived used for makeing the checksum_archivename and the tmp dir in zip
required=True
This module is designed to provide similer functionality to the native unarchive with some key differences
1) The dest specified wil be created if it does not exist
2) the dest will be the location all files are placed in
3) the top_dir of the archive will be cut ie apache-tomcat2.6.8 will not exist and all file will be placed in dest
4) if a .checksum_archive_name is present and contains the current checksum of the archive to unpack no action will happen returns unchanged
Expected behavior
This is the main decision tree for the program
(Start) -> [Requriments exist]
(no) -> (fail)
(yes) -> [src archive exists?]
(no) -> (fail)
(yes) -> [dest exists?]
(no) -> (make_dir) -> [success?]
(yes) -> (find_handler) -> [success?]
(yes) -> (update .checksum) -> (end_with_change)
(no) -> (fail)
(no) -> (fail)
(yes) -> [.checksum file exists?]
(yes) -> [.checksum matchs checksum]
(yes) -> (end_no_change)
(no) -> [safe_mode?]
(no) -> (find_handler)
(yes) -> (update .checksum) -> (end_with_change)
(no) -> (fail)
(yes) -> (end_no_change)
(no) -> [safe_mode?]
(no) -> (find_handler) -> [success?]
(yes) -> (end_with_change)
(no) -> (fail)
(yes) -> (end_no_change)
'''
import re
import os
import stat
import pwd
import grp
import datetime
import time
import binascii
import codecs
import os.path
from distutils.dir_util import copy_tree
from zipfile import ZipFile, BadZipfile
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
class unpackage(object):
def __init__(self, module):
self.module = module
self.changed = False
self.state = ''
self.tgz_zip_flag =''
self.file_args = self.module.load_file_common_arguments(self.module.params)
self.extra_opts = module.params['extra_opts']
self.dest = module.params['dest']
self.src = module.params['src']
self.safe_mode = module.params['safe_mode']
self.exclude = module.params['exclude']
self.checksum = module.params['checksum']
self.directory_permissions= module.params['directory_permissions']
self.top_dir = module.params['top_dir']
self.name = ''
def strip_archive_name(self):
self.name = self.src.rsplit('/',1)[1]
self.name = self.name.split('.', 1)[0]
def make_dir(self):
try:
os.makedirs(self.dest)
except:
self.module.fail_json(msg="failed to unpack make dir")
def ZipArchive(self):
if self.top_dir:
path = self.dest + "/tmp_" + self.name
print path
if not os.path.isdir(path):
try:
os.makedirs(path)
except:
self.module.fail_json(msg='failed to make temp dir for unzip')
else :
try:
self.module.run_command("rm -rf " + path)
print ("rm -rf" + path)
os.makedirs(path ,0755)
except:
self.module.fail_json(msg='failed to make temp dir for unzip, dest/temp_archive_name already exists')
directory_structure= os.listdir(path)
command= 'unzip ' + '-o ' + self.src
if self.extra_opts:
command+= self.extra_opts
if self.exclude:
for x in self.exclude:
command += ' -x ' + x
command += ' -d ' + self.dest + "/tmp_" + self.name
print command
rc, out, err = self.module.run_command(command, cwd=self.dest)
if rc != 0:
self.module.run_command("rm -rf " + self.dest + "/tmp_" + self.name)
return False
new_dir = os.listdir(path)
top_dir = [y for y in new_dir if y not in directory_structure]
fullpath = path + '/' + ''.join(top_dir)
copy_tree(fullpath , self.dest)
self.module.run_command("rm -rf " + self.dest + "/tmp_" + self.name)
return True
else:
command= 'unzip ' + '-o ' + self.src
if self.extra_opts:
command+= self.extra_opts
if self.exclude:
for x in self.exclude:
command += ' -x ' + x
command += ' -d ' + self.dest
print command
rc, out, err = self.module.run_command(command, cwd=self.dest)
if rc != 0 :
return False
return True
def TgzArchive(self):
command = 'tar ' + '--extract ' + '-C ' + self.dest + " --strip-components=1 "
if self.extra_opts:
command+= self.extra_opts
if self.file_args['owner']:
command+= ' --owner=' + self.file_args['owner']
if self.file_args['group']:
command +=' --group=' + self.file_args['group']
if self.exclude:
for f in self.exclude:
command+= ' --exclude=' + f
if self.tgz_zip_flag:
command+= self.tgz_zip_flag
command += ' -f ' + self.src
print command
rc, out, err = self.module.run_command(command, cwd=self.dest)
if rc != 0 :
return False
return True
def TarArchive(self):
self.tgz_zip_flag = ''
error = self.TgzArchive()
return error
def TarBzipArchive(self):
print ("Trying TarBZip ")
self.tgz_zip_flag = '-j'
error = self.TgzArchive()
self.tgz_zip_flag = ''
return error
def TarXzArchive(self):
print ("Trying TarXz ")
self.tgz_zip_flag = '-J'
print ("Trying tarxzArchive")
error = self.TgzArchive()
self.tgz_zip_flag = ''
return error
def find_handler(self):
handlers = [self.ZipArchive, self.TgzArchive , self.TarArchive , self.TarBzipArchive, self.TarXzArchive]
for handler in handlers:
obj = handler()
if obj:
return
print ("Fail: Failed to find an appropriate handler, check that archive type is supported")
self.module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed.' % (self.src ))
def main():
module = AnsibleModule(
argument_spec=dict(
safe_mode=dict(required=False, default=False),
src=dict(required=True),
dest=dict(required=True),
checksum=dict(required=False),
exclude=dict(required=False, type='list'),
directory_permissions=dict(required=False, default=0775),
extra_opts=dict(required=False),
top_dir=dict(required=False, default= True),
archive_name=dict(required=False),
check_mode=dict(required=False, default= False)
),
supports_check_mode=False
)
archive = unpackage(module)
if not module.params['archive_name']:
archive.strip_archive_name()
if module.params['check_mode'] == "True":
if not os.path.exists(module.params['src']):
print ("Fail: source file not found, checkmode")
module.fail_json(msg='Src does not exist, checkmode')
if not os.path.isdir(module.params['dest']):
archive.changed= True
print ("It would have tryed to make a dir and find handlers, but in check mode and skips these tasks")
elif os.path.exists(module.params['dest'] +"/.checksum_" + archive.name):
checksum_old = open((module.params['dest'] + "/.checksum_" + archive.name), "r")
file_checksum= checksum_old.read()
if module.params['checksum'] in file_checksum:
print ("No Changes: checksum exists and is the same as supplied checksum, checkmode was enabled so no changes")
archive.changed = False
checksum_old.close()
else:
print ("Not Finding Handler... Checkmode does not support checking handlers as doing so causes changes")
checksum_old.close()
archive.changed= True
elif archive.safe_mode == "False":
print ("Not Finding Handler... checkmode does not support finding handler")
archive.changed= True
else:
print ("No changes: Because Directory exists, checksum does not exist, and safe_mode is enabled, checkmode was enabled")
archive.changed = False
result = {}
result['name'] = archive.dest
result['changed'] = archive.changed
result['state'] = ""
module.exit_json(**result)
if not os.path.exists(module.params['src']):
print ("Fail: source file not found")
module.fail_json(msg='Src does not exist')
if not os.path.isdir(module.params['dest']):
archive.make_dir()
archive.find_handler()
archive.changed= True
elif os.path.exists(module.params['dest'] +"/.checksum_" + archive.name):
checksum_old = open((module.params['dest'] + "/.checksum_" + archive.name), "r")
file_checksum= checksum_old.read()
if module.params['checksum'] in file_checksum:
print ("No Changes: checksum exists and is the same as supplied checksum")
archive.changed = False
checksum_old.close()
else:
print ("Finding Handler...")
checksum_old.close()
archive.find_handler()
archive.changed= True
elif archive.safe_mode == "False":
print ("Finding Handler...")
archive.find_handler()
archive.changed= True
else:
print ("No changes: Because Directory exists, checksum does not exist, and safe_mode is enabled")
archive.changed = False
if archive.changed:
if os.path.exists(module.params['dest'] +"/.checksum_" + archive.name):
os.remove(module.params['dest']+ "/.checksum_"+ archive.name)
checksum_new = open(module.params['dest']+ "/.checksum_"+ archive.name , "w+")
checksum_new.write(module.params['checksum'])
checksum_new.close()
else:
checksum_new = open(module.params['dest']+ "/.checksum_" + archive.name , "w+")
checksum_new.write(module.params['checksum'])
checksum_new.close()
result = {}
result['name'] = archive.dest
result['changed'] = archive.changed
result['state'] = ""
module.exit_json(**result)
if __name__ == '__main__':
main()
| 2.171875 | 2 |
tests/core/sources/test_municipality_database.py | pyramidoereb/pyramid_oereb | 2 | 12789041 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def municipality_data(dbsession, transact):
from pyramid_oereb.contrib.data_sources.standard.models.main import Municipality
del transact
municipalities = [
Municipality(**{
'fosnr': 1234,
'name': u'Test',
'published': True,
'geom': 'SRID=2056;MULTIPOLYGON(((0 0, 0 10, 10 10, 10 0, 0 0)))',
})
]
dbsession.add_all(municipalities)
dbsession.flush()
yield municipalities
@pytest.mark.run(order=2)
def test_init(pyramid_oereb_test_config):
from pyramid_oereb.contrib.data_sources.standard.sources.municipality import DatabaseSource
from pyramid_oereb.core.adapter import DatabaseAdapter
from pyramid_oereb.contrib.data_sources.standard.models.main import Municipality
source = DatabaseSource(**pyramid_oereb_test_config.get_municipality_config().get('source').get('params'))
assert isinstance(source._adapter_, DatabaseAdapter)
assert source._model_ == Municipality
def test_read(pyramid_oereb_test_config, municipality_data):
from pyramid_oereb.contrib.data_sources.standard.sources.municipality import DatabaseSource
source = DatabaseSource(**pyramid_oereb_test_config.get_municipality_config().get('source').get('params'))
source.read()
assert isinstance(source.records, list)
assert len(source.records) == len(municipality_data)
assert source.records[0].fosnr == municipality_data[0].fosnr
| 2.015625 | 2 |
fdp/methods/fft_old.py | Fusion-Data-Platform/fdp | 10 | 12789042 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 15 21:00:08 2016
@author: drsmith
"""
from __future__ import division
from builtins import range
from builtins import object
from past.utils import old_div
import numpy as np
from scipy import fftpack
from .globals import FdpError
class Fft(object):
"""
Fft class
Calculates binned ffts for time interval tmin to tmax.
Attributes
fft: complex-valued fft(time, freq),
single-sided for real input, dboule-sided for complex input
time: time array for bins [s]
freq: frequency array [kHz]
power2: # of points in fft, power-of-2 (>=) enforced
"""
def __init__(self, signal, power2=None, tmin=0.2, tmax=1.0,
hanning=True, offsetminimum=False, offsetdc=False,
normalizetodc=False):
self.signal = signal
self.signalname = signal._name
self.parentname = signal._parent._name
self.shot = signal.shot
self.power2 = power2 # in None, guess value below
self.hanning = hanning # true for Hann window
self.offsetminimum = offsetminimum # true to shift signal by minimum
self.offsetdc = offsetdc # true to remove DC component
self.normalizetodc = normalizetodc
if self.hanning:
# 50% overlap for Hanning window
self.overlapfactor = 2
else:
# no overlap among bins
self.overlapfactor = 1
if tmax > 10:
# assume ms input and convert to s
tmin = old_div(tmin, 1e3)
tmax = old_div(tmax, 1e3)
self.tmin = tmin
self.tmax = tmax
# single-sided, complex-valued fft(time, freq)
self.fft = None
# frequency array
self.freq = None
# array of center times for bins
self.time = None
self.nbins = None
self.nfft = None
self.window = None
self.iscomplexsignal = None
# real, positive definite power spec. density, psd(freq,time)
self.psd = None
self.logpsd = None
self.binavg_psd = None
self.binavg_logpsd = None
# input signal integrated power, intpower(time)
self.intpower = None
self.maxintpowererror = None
self.loadSignal()
self.makeTimeBins()
if self.offsetminimum:
self.applyMinimumOffset()
if self.offsetdc:
self.normalizetodc = False
self.applyDcOffset()
if self.hanning:
self.applyHanningWindow()
self.calcIntegratedSignalPower()
self.calcFft()
if self.normalizetodc and not self.offsetdc:
self.applyNormalizeToDc()
self.calcPsd()
def loadSignal(self):
self.signal[:]
self.signal.time[:]
# real-valued floating or complex-valued?
if self.signal.dtype.kind == 'f':
self.iscomplexsignal = False
elif self.signal.dtype.kind == 'c':
self.iscomplexsignal = True
else:
raise FdpError('Data must be floating or complex')
def makeTimeBins(self):
self.time = []
self.fft = []
time_indices = np.where(np.logical_and(self.signal.time >= self.tmin,
self.signal.time <= self.tmax
))[0]
istart = time_indices[0]
istop = time_indices[time_indices.size - 1]
if self.power2 is None:
# guess appropriate power2 value
self.power2 = np.int(
np.sqrt((istop - istart + 1) * self.overlapfactor))
self.power2 = nextpow2(self.power2)
self.nfft = self.power2
t = np.mean(self.signal.time[istart:istart + self.power2])
while self.signal.time[istart + self.power2 - 1] <= self.tmax:
self.time.append(t)
self.fft.append(self.signal[istart:istart + self.power2])
# candidate istart and t for next iteration
istart = istart + old_div(self.power2, self.overlapfactor)
t = np.mean(self.signal.time[istart:istart + self.power2])
# convert lists to ndarrays
# at this point, fft contains modified input signals
self.fft = np.array(self.fft)
self.time = np.array(self.time)
self.nbins = self.time.size
def applyMinimumOffset(self):
zerosignal = np.min(self.signal[1000:9000])
self.fft -= zerosignal
def applyDcOffset(self):
# remove DC offset bin-wise
for i in range(self.nbins):
self.fft[i, :] -= np.mean(self.fft[i, :])
def applyHanningWindow(self):
self.window = np.hanning(self.power2)
for i in range(self.nbins):
self.fft[i, :] = np.multiply(self.fft[i, :], self.window)
def calcIntegratedSignalPower(self):
self.intpower = np.sum(np.square(np.absolute(self.fft)), axis=1)
def calcFft(self):
timeint = np.mean(np.diff(self.signal.time[1000:9000]))
# complex-valued, double-sided FFT
self.fft = fftpack.fft(self.fft,
n=self.power2,
axis=1)
# frequency array in kHz
self.freq = old_div(fftpack.fftfreq(self.power2, d=timeint), 1e3)
# check integrated power (bin-wise)
self.checkIntegratedPower()
# if real-valued input, convert to single-sided FFT
if not self.iscomplexsignal:
# complex-valued, single-sided FFT
ssfft = self.fft[:, 0:old_div(self.power2, 2) + 1].copy()
ssfft[:, 1:old_div(self.power2, 2)] *= np.sqrt(2.0)
self.fft = ssfft
self.freq = self.freq[0:old_div(self.power2, 2) + 1].copy()
self.freq[old_div(self.power2, 2)] = -self.freq[old_div(self.power2, 2)]
# check integrated power (bin-wise)
self.checkIntegratedPower()
def applyNormalizeToDc(self):
for i in range(self.nbins):
self.fft[i, :] /= np.real(self.fft[i, 0])
def calcPsd(self):
# PSD in dB: 10*log10 (|FFT|^2)
self.psd = np.square(np.absolute(self.fft))
self.logpsd = 10 * np.log10(self.psd)
# bin-averaged PSD in dB: 10*log10 (|FFT|^2)
self.binavg_psd = np.mean(self.psd, axis=0)
self.binavg_logpsd = 10 * np.log10(self.binavg_psd)
def checkIntegratedPower(self):
intpowercheck = old_div(np.sum(np.square(np.absolute(self.fft)),
axis=1), self.power2)
if not np.allclose(self.intpower, intpowercheck):
raise FdpError('Integrated power mismatch')
def nextpow2(number):
"""Return next power of 2 (>= number)"""
exp = int(np.log2(number - 1)) + 1
return np.power(2, exp)
| 1.867188 | 2 |
prompt412/round-105/d.py | honux77/algorithm | 2 | 12789043 | <filename>prompt412/round-105/d.py
n = int(input())
m = list(map(int, input().split()))
m.sort()
ans = 0
if n % 2 != 0:
ans = m[-1]
m.pop()
i = 0
j = len(m) - 1
while i < j:
ans = max(ans, m[i] + m[j])
i += 1
j -= 1
print(ans) | 2.6875 | 3 |
build_migrator/common/algorithm.py | alexsharoff/BuildMigrator | 17 | 12789044 | <gh_stars>10-100
from copy import deepcopy
try:
# Python 3
from collections.abc import Hashable, Iterable
except ImportError:
# Python 2
from collections import Hashable, Iterable
# Explanation by example:
# A = 1 2 3 4 5
# B = 2 3 4 5
#
# Optimized:
# common_set C = 2 3 4 5 (+4)
# A = 1 C (-3)
# B = C (-3)
# Shaved off 2 items, hence fitness = 2
def fitness_by_set_length(candidate, sets):
return len(sets) * (len(candidate) - 1) - len(candidate)
# Explanation by example:
# A = abc def 1234567890
# B = abc def
# C = 1234567890
#
# Optimized 1:
# common_set (D): 1234567890 (+10)
# A = abc def D (-9)
# B = abc def
# C = D (-9)
# Shaved off 9 characters, hence fitness = 9
#
# Optimized 2:
# common_set (E): abc def (+6)
# A = E 1234567890 (-5)
# B = E (-5)
# C = 1234567890
# Shaved off 4 characters, hence fitness = 4
#
# Best common_set is D (maximum fitness)
class FitnessByTotalStringLength(object):
def __init__(self, placeholder_length=1):
self._placeholder_length = placeholder_length
def __call__(self, candidate, sets):
candidate_length = sum([len(s) for s in candidate])
return (
len(sets) * (candidate_length - self._placeholder_length) - candidate_length
)
# TODO: describe algorithm?
# Best case compexity: len(union(sets)) * len(sets)
# Worst case compexity: len(union(sets)) ^ 2 * len(sets)
def find_best_common_set(sets, fitness_func=None):
if fitness_func is None:
fitness_func = fitness_by_set_length
sets_with_value = {}
for set_ in sets:
for value in set_:
if value not in sets_with_value:
sets_with_value[value] = [set_]
else:
sets_with_value[value].append(set_)
common_set = None
fitness = 0
for value, sets_ in sorted(
sets_with_value.items(), key=lambda kv: len(kv[1]), reverse=True
):
candidate_set = set.intersection(*sets_)
candidate_fitness = fitness_func(candidate_set, sets_)
if common_set is None or candidate_fitness > fitness:
common_set = candidate_set
fitness = candidate_fitness
if fitness >= fitness_func(sets_with_value.keys(), sets_):
# next iterations won't reach better fitness
break
return common_set, fitness
def join_nested_lists(flags, delim=" "):
for idx, f in enumerate(flags):
if isinstance(f, list):
flags[idx] = delim.join(f)
return flags
def flatten_list(lst):
result = []
for elem in lst:
if isinstance(elem, list):
result.extend(flatten_list(elem))
else:
result.append(elem)
return result
# Replaces lists with tuples
def make_hashable(v):
if isinstance(v, Hashable):
return v
if isinstance(v, Iterable):
return tuple([make_hashable(i) for i in v])
raise ValueError("Unsupported type: %r", type(v))
def add_unique_stable(l1, *l2):
seen = set(make_hashable(l1))
for x in l2:
hashable_x = make_hashable(x)
if not (hashable_x in seen or seen.add(hashable_x)):
l1.append(x)
return deepcopy(l1)
def add_unique_stable_by_key(l1, key, *l2):
seen = set([x[key] for x in l1])
for x in l2:
if x[key] not in seen:
seen.add(x[key])
l1.append(x)
return deepcopy(l1)
def intersect_unique_stable(*lists):
lists = list(lists)
while len(lists) > 1:
tmp = list()
for x in lists[0]:
if (x not in tmp) and (x in lists[1]):
tmp.append(x)
lists[0:2] = [tmp]
return deepcopy(lists[0]) if len(lists) == 1 else None
def get_subdict(dictionary, *keys):
return {key: dictionary[key] for key in set(dictionary.keys()) & set(keys)}
| 2.890625 | 3 |
webviz_config/utils/_dash_component_utils.py | anders-kiaer/webviz-conf | 44 | 12789045 | <reponame>anders-kiaer/webviz-conf<filename>webviz_config/utils/_dash_component_utils.py
import math
def calculate_slider_step(
min_value: float, max_value: float, steps: int = 100
) -> float:
"""Calculates a step value for use in e.g. dcc.RangeSlider() component
that will always be rounded.
The number of steps will be atleast the number
of input steps, but might not be precisely the same due to use of the floor function.
This function is necessary since there is currently no precision control in the underlying
React component (https://github.com/react-component/slider/issues/275).
"""
return 10 ** math.floor(math.log10((max_value - min_value) / steps))
| 2.578125 | 3 |
compsim/generate/randgen.py | skyman/CompressionSimulator | 0 | 12789046 | # coding=utf-8
import numpy as np
import random
from compsim.simulate import Grid, ColoredParticle, NewSeparationSimulator
from compsim.io import BRIGHT_COLORS
def generate_random_grid(n_particles, simulator_type, weighted_particle_types, size=None):
# weighted particle types is a list of particle types in (single-param initializer, weight) format
if n_particles <= 0:
raise ValueError("At least 1 particle needs to be generated.")
if size is None:
width_height = int(n_particles ** 0.5) * 4
size = (width_height, width_height)
grid = Grid(size)
print "Initialized a grid of size %d, %d" % size
total_weight = float(sum(wp[1] for wp in weighted_particle_types))
# Choose the classes for our particles:
particle_types = list(np.random.choice([wt[0] for wt in weighted_particle_types], n_particles, True,
[wt[1] / total_weight for wt in weighted_particle_types]))
# Manually add the first particle at the center
p_init = particle_types.pop(0)
particle = p_init((0, 0), 0)
grid.add_particle(particle)
try:
simulator_type.validate_grid(grid)
except:
raise ValueError("The simulation should allow starting at the middle.")
i = 1
for particle_type in particle_types:
while True:
# Choose a random position for the particle
coords = random.choice(grid.get_valid_empty_neighborhoods())
particle = particle_type(coords, i)
grid.add_particle(particle)
try:
simulator_type.validate_grid(grid)
except ValueError:
grid.remove_particle(particle)
continue
print "Successfully inserted %s #%d" % (type(particle).__name__, i)
i += 1
break
print "Random grid generation successful"
return grid
def generate_random_separation_grid(n_particles, n_classes, size=None, simulator_type=NewSeparationSimulator,
base_class=ColoredParticle, colors=BRIGHT_COLORS):
classes = [(type('ColoredParticle_%d' % index, (base_class,), {'COLOR': colors[index]}), 1.0 / n_classes) for index
in xrange(n_classes)]
return generate_random_grid(n_particles, simulator_type, classes, size)
| 2.84375 | 3 |
scripts/service_server.py | mankutimma/ros_fundamentals | 0 | 12789047 | <gh_stars>0
#!/usr/bin/env python3
import rospy
from ros_fundamentals.srv import AddTwoIntegers, AddTwoIntegersResponse
def main():
# name of service, type of service and callback
server = rospy.Service(name="addition_service", service_class=AddTwoIntegers, handler=callback)
rospy.init_node(name="addition_server")
rospy.spin()
def callback(request):
# return request.AddTwoIntegersResponse(request.a + request.b)
return request.a + request.b
if __name__ == "__main__":
main()
| 2.5625 | 3 |
pwndbg/commands/nearpc.py | ctfhacker/pwndbg | 0 | 12789048 | <filename>pwndbg/commands/nearpc.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from capstone import *
import collections
import gdb
import pwndbg.arguments
import pwndbg.color
import pwndbg.disasm
import pwndbg.disasm.color
import pwndbg.functions
import pwndbg.ida
import pwndbg.regs
import pwndbg.strings
import pwndbg.symbol
import pwndbg.ui
import pwndbg.vmmap
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def nearpc(pc=None, lines=None, to_string=False):
"""
Disassemble near a specified address.
"""
# Fix the case where we only have one argument, and
# it's a small value.
if lines is None and (pc is None or int(pc) < 0x100):
lines = pc
pc = None
if pc is None:
pc = pwndbg.regs.pc
if lines is None:
lines = 5
pc = int(pc)
lines = int(lines)
# # Load source data if it's available
# pc_to_linenos = collections.defaultdict(lambda: [])
# lineno_to_src = {}
# frame = gdb.selected_frame()
# if frame:
# sal = frame.find_sal()
# if sal:
# symtab = sal.symtab
# objfile = symtab.objfile
# sourcefilename = symtab.filename
# with open(sourcefilename, 'r') as sourcefile:
# lineno_to_src = {i:l for i,l in enumerate(sourcefile.readlines())}
# for line in symtab.linetable():
# pc_to_linenos[line.pc].append(line.line)
result = []
instructions = pwndbg.disasm.near(pc, lines)
# In case $pc is in a new map we don't know about,
# this will trigger an exploratory search.
pwndbg.vmmap.find(pc)
# Find all of the symbols for the addresses
symbols = []
for i in instructions:
symbol = pwndbg.symbol.get(i.address)
if symbol:
symbol = '<%s> ' % symbol
symbols.append(symbol)
# Find the longest symbol name so we can adjust
if symbols:
longest_sym = max(map(len, symbols))
else:
longest_sym = ''
# Pad them all out
for i,s in enumerate(symbols):
symbols[i] = s.ljust(longest_sym)
prev = None
# Print out each instruction
for i,s in zip(instructions, symbols):
asm = pwndbg.disasm.color.instruction(i)
prefix = ' =>' if i.address == pc else ' '
pre = pwndbg.ida.Anterior(i.address)
if pre:
result.append(pwndbg.color.bold(pre))
# for line in pc_to_linenos[i.address]:
# result.append('%s %s' % (line, lineno_to_src[line].strip()))
line = ' '.join((prefix, "%#x" % i.address, s or '', asm))
# If there was a branch before this instruction which was not
# contiguous, put in some ellipses.
if prev and prev.address + prev.size != i.address:
result.append('...')
# Otherwise if it's a branch and it *is* contiguous, just put
# and empty line.
elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)):
result.append('')
# For syscall instructions, put the name on the side
if i.address == pc:
syscall_name = pwndbg.arguments.get_syscall_name(i)
if syscall_name:
line += ' <%s>' % syscall_name
result.append(line)
# For call instructions, attempt to resolve the target and
# determine the number of arguments.
for arg, value in pwndbg.arguments.get(i):
code = False if arg.type == 'char' else True
pretty = pwndbg.chain.format(value, code=code)
result.append('%8s%-10s %s' % ('',arg.name+':', pretty))
prev = i
if not to_string:
print('\n'.join(result))
return result
| 2.59375 | 3 |
storage/models/fighter.py | Some1Nebo/ufcpy | 0 | 12789049 | from storage.models.base import *
from sqlalchemy.orm import validates
class Fighter(Base):
__tablename__ = 'fighters'
id = Column(Integer, primary_key=True)
ref = Column(String(STR_SIZE), unique=True, nullable=False)
name = Column(String(STR_SIZE), nullable=False)
country = Column(String(STR_SIZE))
city = Column(String(STR_SIZE))
birthday = Column(Date, nullable=False)
height = Column(Integer) # centimeters
weight = Column(Float) # kg
reach = Column(Integer) # centimeters
specialization = Column(String(STR_SIZE))
fights = relationship(
"Fight",
primaryjoin="or_(Fighter.id == Fight.fighter1_id, Fighter.id == Fight.fighter2_id)")
@validates('height')
def validate_height(self, key, height):
assert height > 0
return height
@validates('weight')
def validate_weight(self, key, weight):
assert weight > 0
return weight
@validates('reach')
def validate_reach(self, key, reach):
assert reach > 0
return reach | 2.546875 | 3 |
tensorlayerx/nn/core/core_mindspore.py | tensorlayer/TensorLayerX | 34 | 12789050 | <gh_stars>10-100
#! /usr/bin/python
# -*- coding: utf-8 -*-
from .common import check_parameter, processing_act, str2init, random_normal, tolist, construct_graph, ModuleNode, select_attrs
from .common import _save_weights, _load_weights, _save_standard_weights_dict, _load_standard_weights_dict
from mindspore.nn import Cell
import tensorlayerx as tlx
import mindspore as ms
from mindspore import log as logger
import inspect
from mindspore import context
import numpy
from mindspore.common.api import _pynative_executor
from collections import OrderedDict, abc as container_abcs
__all__ = ['Module', 'Sequential', 'ModuleList', 'ModuleDict', 'Parameter', 'ParameterList', 'ParameterDict']
_global_layer_name_dict = {}
_global_layer_node = []
class Module(Cell):
def __init__(self, name=None, act=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# mindspore auto-naming is set to False
self._auto_prefix = False
# Uniform parameter naming
global _global_layer_name_dict
if name is None:
prefix = self.__class__.__name__.lower()
if _global_layer_name_dict.get(prefix) is not None:
_global_layer_name_dict[prefix] += 1
name = prefix + '_' + str(_global_layer_name_dict[prefix])
else:
_global_layer_name_dict[prefix] = 0
name = prefix
while True:
if _global_layer_name_dict.get(name) is None:
break
_global_layer_name_dict[prefix] += 1
name = prefix + '_' + str(_global_layer_name_dict[prefix])
else:
if _global_layer_name_dict.get(name) is not None:
pass
else:
_global_layer_name_dict[name] = 0
self.name = name
self.act = processing_act(act)
# Layer building state
self._built = False
# Layer nodes state
self._nodes_fixed = False
self._build_graph = False
# Layer weight state
self._all_weights = []
self._trainable_weights = []
self._nontrainable_weights = []
# Layer training state
self.is_train = True
# layer forward state
self._forward_state = False
# data_format
self.data_format = "NCHW"
def forward(self, *inputs, **kwargs):
raise Exception("The forward method must be implemented by inherited class")
def construct(self, *inputs, **kwargs):
return self.forward(*inputs, **kwargs)
def build(self, inputs_shape):
raise Exception("The build(self, inputs_shape) method must be implemented by inherited class")
def _get_weights(self, var_name, shape, init=random_normal(), trainable=True, transposed=False, order=False):
""" Get trainable variables. """
var_name = self.name + "/" + var_name
# TODO 2D mindspore weights shape : [out_channel, in_channel, kernel_h, kernel_w]
# TODO 2D mindspore transposed shape [in_channel, out_channel, kernel_h, kernel_w]
if order:
initial_value = init(shape=shape)
return tlx.Variable(initial_value=initial_value, name=var_name, trainable=trainable)
if len(shape) == 3:
shape = shape[::-1]
if len(shape) == 4:
if not transposed and self.data_format in ['NHWC', 'channels_last']:
shape = (shape[3], shape[0], shape[1], shape[2])
else:
shape = (shape[3], shape[2], shape[0], shape[1])
if len(shape) == 5:
shape = (shape[4], shape[3], shape[0], shape[1], shape[2])
initial_value = init(shape=shape)
var = tlx.Variable(initial_value=initial_value, name=var_name, trainable=trainable)
self.trainable = trainable
return var
def save_weights(self, file_path, format=None):
"""Input file_path, save model weights into a file of given format."""
_save_weights(self, file_path, format)
def load_weights(self, file_path, format=None, in_order=True, skip=False):
"""Load model weights from a given file, which should be previously saved by self.save_weights()."""
_load_weights(self, file_path, format, in_order, skip)
def save_standard_weights(self, file_path):
_save_standard_weights_dict(self, file_path)
def load_standard_weights(self, file_path, skip=False, reshape=False, format='npz_dict'):
_load_standard_weights_dict(self, file_path, skip, reshape, format)
@staticmethod
def _compute_shape(tensors):
if isinstance(tensors, list):
shape_mem = [tlx.get_tensor_shape(t) for t in tensors]
else:
shape_mem = tlx.get_tensor_shape(tensors)
return shape_mem
# def __call__(self, *args, **kwargs):
## TODO With MindSpore __call__, refactoring is required when there are special cases to consider
def set_train(self):
"""
Sets the cell to training mode.
The cell itself and all children cells will be set to training mode.
Args:
mode (bool): Specifies whether the model is training. Default: True.
"""
self._phase = 'train'
self.add_flags_recursive(training=True)
return self
def set_eval(self):
"""Set this network in evaluation mode. After calling this method,
all layers in network are in evaluation mode, in particular, BatchNorm, Dropout, etc.
Examples
--------
>>> import tensorlayerx as tlx
>>> net = tlx.model.vgg16()
>>> net.eval()
# do evaluation
"""
self._phase = 'predict'
self.add_flags_recursive(training=False)
return self
def test(self):
"""Set this network in evaluation mode."""
self.eval()
def infer(self):
"""Set this network in evaluation mode."""
self.eval()
@property
def trainable_weights(self):
"""
Returns all trainable weights.
Returns a list of all trainable parmeters.
Args:
recurse (bool): Whether contains the trainable weights of sublayers. Default: True.
Returns:
List, the list of trainable weights.
"""
self._trainable_weights = list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True)))
return self._trainable_weights
@property
def nontrainable_weights(self):
"""
Returns all untrainable weights.
Returns a list of all untrainable weights.
Args:
recurse (bool): Whether contains the untrainable weights of sublayers. Default: True.
Returns:
List, the list of untrainable weights.
"""
return list(filter(lambda x: not x.requires_grad, self.get_parameters(expand=True)))
@property
def all_weights(self):
return list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True))) \
+ list(filter(lambda x: not x.requires_grad, self.get_parameters(expand=True)))
def str_to_init(self, initializer):
return str2init(initializer)
def check_param(self, param, dim='2d'):
return check_parameter(param, dim)
def insert_child_to_layer(self, child_name, child):
"""
Adds a child layer to the current layer.
Parameters
----------
child_name : str
Name of the child layer.
child : Module
The child layer to be inserted.
"""
if not child_name or '.' in child_name:
raise KeyError("Child layer name is incorrect.")
if hasattr(self, child_name) and child_name not in self._layers:
raise KeyError("Duplicate child name '{}'.".format(child_name))
if not isinstance(child, Module) and child is not None:
raise TypeError("Child layer type is incorrect.")
self._cells[child_name] = child
def init_build(self, *inputs, **kwargs):
"""
(1) This method must be called when the Layer has no input in_channels.
(2) Automatic shape inference when the user does not enter in_channels.
"""
self.forward(*inputs, **kwargs)
def build_graph(self, *inputs, **kwargs):
# Add nodes only when the composition is needed.
for layer_name, layer in self._cells.items():
if isinstance(layer, Module):
layer._build_graph = True
self.set_eval()
outputs = self.forward(*inputs, **kwargs)
self.inputs = inputs
self.outputs = outputs
self._node_by_depth, self._all_layers = construct_graph(self.inputs, self.outputs)
return self._node_by_depth, self._all_layers
def _add_node(self, input_tensors, output_tensors):
"""Add a ModuleNode for this layer given input_tensors, output_tensors.
This function should not be called from outside, it should only be called
in __call__ when building static model.
Parameters
----------
input_tensors : Tensor or a list of tensors
Input tensors to this layer.
output_tensors : Tensor or a list of tensors
Output tensors to this layer.
"""
inputs_list = tolist(input_tensors)
outputs_list = tolist(output_tensors)
if self.__class__.__name__ in tlx.layers.inputs.__all__:
# for InputLayer, there should be no in_nodes
in_nodes = []
in_tensor_idxes = [0]
else:
in_nodes = [tensor._info[0] for tensor in inputs_list]
in_tensor_idxes = [tensor._info[1] for tensor in inputs_list]
node_index = len(_global_layer_node)
new_node = ModuleNode(
self, node_index, in_nodes, inputs_list, outputs_list, in_tensor_idxes, select_attrs(self)
)
_global_layer_node.append(new_node)
for idx, tensor in enumerate(outputs_list):
tensor._info = (new_node, idx)
class Sequential(Module):
"""
The class :class:`Sequential` is a linear stack of layers.
The :class:`Sequential` can be created by passing a list of layer instances.
The given layer instances will be automatically connected one by one.
Parameters
----------
layers: list of Layer
A list of layers.
name : str or None
A unique layer name. If None, a unique name will be automatically assigned.
Methods
---------
__init__()
Initializing the ModuleList.
weights()
A collection of weights of all the layer instances.
build()
Build the ModuleList. The layer instances will be connected automatically one by one.
forward()
Forward the computation. The computation will go through all layer instances.
Examples
---------
>>> conv = tlx.nn.Conv2d(3, 2, 3, pad_mode='valid')
>>> bn = tlx.nn.BatchNorm2d(2)
>>> seq = tlx.nn.Sequential([conv, bn])
>>> x = tlx.nn.Input((1, 3, 4, 4))
>>> seq(x)
"""
def __init__(self, *args):
super(Sequential, self).__init__()
# self._built = True
if len(args) == 1:
layers = args[0]
if isinstance(layers, list):
for index, layer in enumerate(layers):
self.insert_child_to_layer(str(index), layer)
elif isinstance(layers, OrderedDict):
for name, layer in layers.items():
self.insert_child_to_layer(name, layer)
else:
raise TypeError('Layers must be list or orderedDict')
else:
for index, layer in enumerate(args):
self.insert_child_to_layer(str(index), layer)
self.layer_list = list(self._cells.values())
def __getitem__(self, index):
if isinstance(index, slice):
return self.__class__(OrderedDict(list(self._cells.items())[index]))
index = _valid_index(len(self), index)
return list(self._cells.values())[index]
def __setitem__(self, index, layer):
if _valid_module(layer):
index = _valid_index(len(self), index)
key = list(self._cells.keys())[index]
self._cells[key] = layer
self.layer_list = list(self._cells.values())
def __delitem__(self, index):
if isinstance(index, int):
index = _valid_index(len(self), index)
key = list(self._cells.keys())[index]
del self._cells[key]
elif isinstance(index, slice):
keys = list(self._cells.keys())[index]
for key in keys:
del self._cells[key]
else:
raise TypeError('Index {} is not int type or slice type'.format(index))
self.layer_list = list(self._cells.values())
def __len__(self):
return len(self._cells)
def set_grad(self, flag=True):
self.requires_grad = flag
for layer in self._cells.values():
layer.set_grad(flag)
def append(self, layer):
if _valid_module(layer):
self._cells[str(len(self))] = layer
self.layer_list = list(self._cells.values())
return self
def build(self, inputs_shape):
pass
def forward(self, input_data):
for layer in self.layer_list:
input_data = layer(input_data)
return input_data
class ModuleList(Module):
"""
Holds Modules in a list.
ModuleList can be used like a regular Python list, support
'__getitem__', '__setitem__', '__delitem__', '__len__', '__iter__' and '__iadd__',
but module it contains are properly registered, and will be visible by all Modules methods.
Parameters
----------
args : list
List of subclass of Module.
Methods
---------
__init__()
Initializing the Layer.
insert()
Inserts a given layer before a given index in the list.
extend()
Appends layers from a Python iterable to the end of the list.
append()
Appends a given layer to the end of the list.
Examples
---------
>>> from tensorlayerx.nn import Module, ModuleList, Linear
>>> import tensorlayerx as tlx
>>> d1 = Linear(out_features=800, act=tlx.ReLU, in_features=784, name='Linear1')
>>> d2 = Linear(out_features=800, act=tlx.ReLU, in_features=800, name='Linear2')
>>> d3 = Linear(out_features=10, act=tlx.ReLU, in_features=800, name='Linear3')
>>> layer_list = ModuleList([d1, d2])
>>> # Inserts a given d2 before a given index in the list
>>> layer_list.insert(1, d2)
>>> layer_list.insert(2, d2)
>>> # Appends d2 from a Python iterable to the end of the list.
>>> layer_list.extend([d2])
>>> # Appends a given d3 to the end of the list.
>>> layer_list.append(d3)
"""
def __init__(self, modules=None):
super(ModuleList, self).__init__()
if modules is not None:
self.extend(modules)
def __getitem__(self, index):
if isinstance(index, slice):
return self.__class__(list(self._cells.values())[index])
if isinstance(index, int):
index = _valid_index(len(self), index)
return self._cells[str(index)]
raise TypeError('Index {} is not int type or slice type'.format(index))
def __setitem__(self, index, layer):
if not isinstance(index, int) and _valid_module(layer):
raise TypeError('Index {} is not int type'.format(index))
index = _valid_index(len(self), index)
self._cells[str(index)] = layer
def __delitem__(self, index):
if isinstance(index, int):
index = _valid_index(len(self), index)
del self._cells[str(index)]
elif isinstance(index, slice):
keys = list(self._cells.keys())[index]
for key in keys:
del self._cells[key]
else:
raise TypeError('Index {} is not int type or slice type'.format(index))
temp_dict = OrderedDict()
for idx, layer in enumerate(self._cells.values()):
temp_dict[str(idx)] = layer
self._cells = temp_dict
def __len__(self):
return len(self._cells)
def __iter__(self):
return iter(self._cells.values())
def __iadd__(self, layers):
self.extend(layers)
return self
def insert(self, index, layer):
idx = _valid_index(len(self), index)
_valid_module(layer)
length = len(self)
while length > idx:
self._cells[str(length)] = self._cells[str(length - 1)]
length -= 1
self._cells[str(idx)] = layer
def extend(self, layers):
if not isinstance(layers, list):
raise TypeError('Modules {} should be list of sublayers'.format(layers))
for layer in layers:
if _valid_module(layer):
self._cells[str(len(self))] = layer
return self
def append(self, layer):
if _valid_module(layer):
self._cells[str(len(self))] = layer
def set_grad(self, flag=True):
self.requires_grad = flag
for layer in self._cells.values():
layer.set_grad(flag)
def forward(self, *inputs):
raise NotImplementedError
class ModuleDict(Module):
def __init__(self, modules=None):
super(ModuleDict, self).__init__()
if modules is not None:
self.update(modules)
def __getitem__(self, key):
return self._cells[key]
def __setitem__(self, key, module):
if not isinstance(key, str):
raise TypeError("module name should be a string, but got {}".format(type(key)))
elif '.' in key:
raise KeyError("module name can't contain \".\", got: {}".format(key))
elif key == '':
raise KeyError("module name can't be empty string \"\"")
if _valid_module(module):
self._cells[key] = module
def __delitem__(self, key):
del self._cells[key]
def __len__(self):
return len(self._cells)
def __iter__(self):
return iter(self._cells)
def __contains__(self, key):
return key in self._cells
def clear(self):
self._cells.clear()
def pop(self, key):
temp = self[key]
del self[key]
return temp
def keys(self):
return self._cells.keys()
def items(self):
return self._cells.items()
def values(self):
return self._cells.values()
def update(self, modules):
if not isinstance(modules, container_abcs.Iterable):
raise TypeError(
"ModuleDict.update should be called with an "
"iterable of key/value pairs, but got " + type(modules).__name__
)
if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
for key, module in modules.items():
self[key] = module
else:
for j, m in enumerate(modules):
if not isinstance(m, container_abcs.Iterable):
raise TypeError(
"ModuleDict update sequence element "
"#" + str(j) + " should be Iterable; is" + type(m).__name__
)
if not len(m) == 2:
raise ValueError(
"ModuleDict update sequence element "
"#" + str(j) + " has length " + str(len(m)) + "; 2 is required"
)
self[m[0]] = m[1]
def Parameter(data=None, requires_grad=True, name=None):
return ms.Parameter(default_input=data, requires_grad=requires_grad, name=name)
class ParameterList(Module):
def __init__(self, parameters=None):
super(ParameterList, self).__init__()
if parameters is not None:
self += parameters
def _get_abs_string_index(self, idx):
if not (-len(self) <= idx < len(self)):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx += len(self)
return str(idx)
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(list(self._params.values())[idx])
else:
idx = self._get_abs_string_index(idx)
return self._params[str(idx)]
def __setitem__(self, idx, parameter):
idx = self._get_abs_string_index(idx)
self._params[str(idx)] = parameter
def __setattr__(self, key, value):
super(ParameterList, self).__setattr__(key, value)
def __len__(self):
return len(self._params)
def __iter__(self):
return iter(self._params.values())
def __iadd__(self, parameters):
return self.extend(parameters)
def __dir__(self):
keys = super(ParameterList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def append(self, parameter):
self._params[str(len(self))] = parameter
return self
def extend(self, parameters):
if not isinstance(parameters, container_abcs.Iterable):
raise TypeError(
"ParameterList.extend should be called with an "
"iterable, but got " + type(parameters).__name__
)
offset = len(self)
for i, para in enumerate(parameters):
self._params[str(offset + i)] = para
return self
def __call__(self, input):
raise RuntimeError('ParameterList should not be called.')
class ParameterDict(Module):
def __init__(self, parameters=None):
super(ParameterDict, self).__init__()
if parameters is not None:
self.update(parameters)
def __getitem__(self, key):
return self._params[key]
def __setitem__(self, key, parameter):
self._params[key] = parameter
def __delitem__(self, key):
del self._params[key]
def __setattr__(self, key, value):
super(ParameterDict, self).__setattr__(key, value)
def __len__(self) -> int:
return len(self._params)
def __reversed__(self):
return reversed(list(self._params.keys()))
def __iter__(self):
return iter(self._params.keys())
def copy(self):
return ParameterDict(self._params.copy())
def __contains__(self, key):
return key in self._params
def setdefault(self, key, default=None):
if key in self._params:
return self._params[key]
self[key] = default
return self._params[key]
def clear(self):
return self._params.clear()
def pop(self, key):
v = self[key]
del self[key]
return v
def popitem(self):
return self._params.popitem()
def get(self, key, default=None):
return self._params.get(key, default)
def fromkeys(self, keys, default=None):
return ParameterDict(self._params.fromkeys(keys, default))
def keys(self):
return self._params.keys()
def items(self):
return self._params.items()
def values(self):
return self._params.values()
def update(self, parameters):
if not isinstance(parameters, container_abcs.Iterable):
raise TypeError(
"ParametersDict.update should be called with an "
"iterable of key/value pairs, but got " + type(parameters).__name__
)
if isinstance(parameters, (OrderedDict, ParameterDict)):
for key, parameter in parameters.items():
self[key] = parameter
elif isinstance(parameters, container_abcs.Mapping):
for key, parameter in sorted(parameters.items()):
self[key] = parameter
else:
for j, p in enumerate(parameters):
if not isinstance(p, container_abcs.Iterable):
raise TypeError(
"ParameterDict update sequence element "
"#" + str(j) + " should be Iterable; is" + type(p).__name__
)
print(p)
if not len(p) == 2:
raise ValueError(
"ParameterDict update sequence element "
"#" + str(j) + " has length " + str(len(p)) + "; 2 is required"
)
# parameters as length-2 list too cumbersome to type, see ModuleDict.update comment
self[p[0]] = p[1] # type: ignore[assignment]
def __call__(self, input):
raise RuntimeError('ParameterDict should not be called.')
def _valid_index(layer_num, index):
if not isinstance(index, int):
raise TypeError("Index {} is not int type")
if not -layer_num <= index < layer_num:
raise IndexError("Index should be a number in range [{}, {}), but got {}".format(-layer_num, layer_num, index))
return index % layer_num
def _valid_module(layer):
if issubclass(layer.__class__, Module):
return True
raise TypeError('Module {} is not subclass of Module'.format(layer))
| 1.828125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.