max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/ftd/history.py | FabienTaxil/ftd | 3 | 12788551 | <reponame>FabienTaxil/ftd
"""History related utilities."""
import contextlib
import functools
import logging
import sys
import trace
from maya import cmds
from maya.api import OpenMaya
__all__ = ["repeat", "undo", "undo_chunk", "undo_repeat", "traceit"]
LOG = logging.getLogger(__name__)
def repeat(func):
"""Decorate a function to make it repeatable.
This means that in maya, when the shortcut ``ctrl+G`` is triggered,
the decorate function will be executed again.
"""
@functools.wraps(func)
def _wrapper(*args, **kwargs):
# Store a partial version on the module variables so that it
# can be executed later when the repeat action will be triggered.
globals()["_callback"] = functools.partial(func, *args, **kwargs)
# find the code to execute to call the function previously stored
command = "_callback()"
if __name__ != "__main__":
command = "import {0};{0}.{1}".format(__name__, command)
# Ddd the function to the repeat system of maya
cmds.repeatLast(
addCommandLabel="{f.__module__}.{f.__name__}".format(f=func),
# The `addCommand` flag only accepts mel code :/
addCommand='python("{}")'.format(command),
)
return func(*args, **kwargs)
return _wrapper
def undo(func):
"""The decorator version of the context manager :func:`ftd.context.undo`.
The chunk will be named by the python path of the function
e.g. ``ftd.interaction.undo``.
See the context manager documentation for more information.
"""
@functools.wraps(func)
def _wrapper(*args, **kwargs):
with undo_chunk("{f.__module__}.{f.__name__}".format(f=func)):
return func(*args, **kwargs)
return _wrapper
def undo_repeat(func):
"""Combine :func:`undo` and :func:`repeat` decorators."""
return repeat(undo(func))
@contextlib.contextmanager
def undo_chunk(name=None):
"""Gather all the maya commands under the same undo chunk.
Using the maya :func:`cmds.undoInfo` command to create the chunk can be
dangerous if used incorrectly. If a chunk is opened but never closed
(e.g. an error occurs during execution), the maya undo list may be
corrupted, and some features may not work properly.
This context manager will handle the issue and like the :func:`open`
function, will ensure that the chunk is properly close whatever happen
during the execution of the body.
Examples:
First, the default behaviour of Maya. When the undo is performed,
the last node created is correctly undone, but the first one still
exists in the scene:
>>> from maya import cmds
>>> _ = cmds.file(new=True, force=True)
>>> _ = cmds.createNode("transform", name="A")
>>> _ = cmds.createNode("transform", name="B")
>>> cmds.undo()
>>> cmds.objExists("B")
False
>>> cmds.objExists("A")
True
The undo chunk allows a block of commands to be collected within
the same undo chunk which can be undo at once:
>>> _ = cmds.file(new=True, force=True)
>>> with undo_chunk(name="create_transform"):
... _ = cmds.createNode("transform", name="A")
... _ = cmds.createNode("transform", name="B")
>>> cmds.undoInfo(query=True, undoName=True)
'create_transform'
>>> cmds.undo()
>>> cmds.objExists("B")
False
>>> cmds.objExists("A")
False
Arguments:
name (str): The name with which the chunk can be identified.
"""
try:
cmds.undoInfo(chunkName=name, openChunk=True)
yield
finally:
cmds.undoInfo(chunkName=name, closeChunk=True)
def traceit(func, path):
"""Trace the execution of the given function and make a report.
Arguments:
func (function): The function to execute and trace.
path (str): The filepath where the report will be saved.
"""
stdin = sys.stdin
stdout = sys.stdout
with open(path, "w") as stream:
sys.stdin = stream
sys.stdout = stream
try:
tracer = trace.Trace(count=False, trace=True)
tracer.runfunc(func)
finally:
sys.stdin = stdin
sys.stdout = stdout
| 2.4375 | 2 |
holobot/discord/sdk/servers/models/member_data.py | rexor12/holobot | 1 | 12788552 | from dataclasses import dataclass
from typing import Optional
@dataclass
class MemberData:
user_id: str
avatar_url: str
name: str
nick_name: Optional[str]
@property
def display_name(self) -> str:
return self.nick_name if self.nick_name else self.name
| 3.03125 | 3 |
python/pyutils/pyutils/terminal_color.py | ASMlover/study | 22 | 12788553 | <reponame>ASMlover/study
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2018 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import ctypes
import platform
import sys
__WINDOWS_PLATFORM = platform.system() == 'Windows'
if __WINDOWS_PLATFORM:
__STD_INPUTHANDLE = -10
__STD_OUTPUTHANDLE = -11
__STD_ERRORHANDLE = -12
__FOREGROUND_INTENSITY = 0x08
__FOREGROUND_BLUE = 0x01
__FOREGROUND_GREEN = 0x02
__FOREGROUND_RED = 0x04
__FOREGROUND_YELLOW = 0x06
__FOREGROUND_LIGHTBLUE = __FOREGROUND_BLUE | __FOREGROUND_INTENSITY
__FOREGROUND_LIGHTGREEN = __FOREGROUND_GREEN | __FOREGROUND_INTENSITY
__FOREGROUND_LIGHTRED = __FOREGROUND_RED | __FOREGROUND_INTENSITY
__FOREGROUND_LIGHTYELLOW = __FOREGROUND_YELLOW | __FOREGROUND_INTENSITY
__stdout_handle = ctypes.windll.kernel32.GetStdHandle(__STD_OUTPUTHANDLE)
def __set_terminal_color(c, h=__stdout_handle):
return ctypes.windll.kernel32.SetConsoleTextAttribute(h, c)
def __reset_ternimal_color():
__set_terminal_color(__FOREGROUND_RED | __FOREGROUND_GREEN | __FOREGROUND_BLUE)
def color_print(msg, c):
__set_terminal_color(c)
sys.stdout.write(msg + '\n')
__reset_ternimal_color()
else:
__FOREGROUND_RESET = '\033[0m'
__FOREGROUND_BLUE = '\033[34m'
__FOREGROUND_GREEN = '\033[32m'
__FOREGROUND_RED = '\033[31m'
__FOREGROUND_YELLOW = '\033[33m'
__FOREGROUND_LIGHTBLUE = '\033[94m'
__FOREGROUND_LIGHTGREEN = '\033[92m'
__FOREGROUND_LIGHTRED = '\033[91m'
__FOREGROUND_LIGHTYELLOW = '\033[93m'
def color_print(msg, c):
sys.stdout.write(c)
sys.stdout.write(msg + '\n')
sys.stdout.write(__FOREGROUND_RESET)
if __name__ == '__main__':
color_print("Hello, world!", __FOREGROUND_RED)
color_print("Hello, world!", __FOREGROUND_GREEN)
color_print("Hello, world!", __FOREGROUND_BLUE)
color_print("Hello, world!", __FOREGROUND_YELLOW)
color_print("Hello, world!", __FOREGROUND_LIGHTRED)
color_print("Hello, world!", __FOREGROUND_LIGHTGREEN)
color_print("Hello, world!", __FOREGROUND_LIGHTBLUE)
color_print("Hello, world!", __FOREGROUND_LIGHTYELLOW)
| 1.148438 | 1 |
Dynamic Programming/741. Cherry Pickup.py | beckswu/Leetcode | 138 | 12788554 | """
741. Cherry Pickup
"""
class Solution:
def cherryPickup(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
n = len(grid)
if grid[0][0] == -1 or grid[n-1][n-1] == -1: return 0
dp = [[-1,]*n for _ in range(n)] # 很重要,
"""
因为比如[[1,-1,1],[-1,1,1],[1,1,1]],
如果初始化为0,k=1 后 , dp = [[-1,-1,0],[-1,-1,0],[0, 0,0]]
然后k = 2, i = 2, j = 2, 只能从(1,0) (1,0) 过来, dp[1][1] = -1,但是因为初始化为0,通过比较max,所以dp最后应该为-1,但是结果为0
"""
dp[0][0] = grid[0][0]
for k in range(1,2*n-1):
for i in range(min(k,n-1),max(-1, k-n),-1):
for j in range(min(k,n-1),max(-1, k-n),-1):
if grid[i][k-i] == -1 or grid[j][k-j] == -1:
dp[i][j] = -1
continue
if i>0 : dp[i][j] = max(dp[i][j], dp[i-1][j]) #向下向右
if j>0 : dp[i][j] = max(dp[i][j], dp[i][j-1]) #向右向下
if i>0 and j>0: dp[i][j] = max(dp[i][j], dp[i-1][j-1]) #向下向下
if dp[i][j]<0 :continue
dp[i][j] += grid[i][k-i]
if i!=j:
dp[i][j] += grid[j][k-j]
return max(dp[-1][-1],0)
class Solution:
def cherryPickup(self, grid: List[List[int]]) -> int:
n = len(grid)
if grid[0][0] == -1 or grid[n-1][n-1] == -1: return 0
dp = [[-1,]*n for _ in range(n)]
dp[0][0] = grid[0][0]
for k in range(1,2*n-1):
for i in range(n-1,-1,-1):
for j in range(n-1,-1,-1):
p, q = k-i, k -j
if p < 0 or p >= n or q<0 or q>=n or grid[i][p] == -1 or grid[j][q] == -1:
dp[i][j] = -1
continue
if i>0 : dp[i][j] = max(dp[i][j], dp[i-1][j]) #向下向右
if j>0 : dp[i][j] = max(dp[i][j], dp[i][j-1]) #向右向下
if i>0 and j>0: dp[i][j] = max(dp[i][j], dp[i-1][j-1]) #向下向下
if dp[i][j]<0 :continue
dp[i][j] += grid[i][p]
if i!=j:
dp[i][j] += grid[j][q]
return max(dp[-1][-1],0)
# Top-Down
class Solution:
def cherryPickup(self, grid: List[List[int]]) -> int:
N = len(grid)
lookup = {}
def solve(x1, y1, x2, y2):
# check if we reached bottom right corner
if x1 == N-1 and y1 == N-1:
return grid[x1][y1] if grid[x1][y1] != -1 else float("-inf")
# out of the grid and thorn check
if x1 == N or y1 == N or x2 == N or y2 == N or grid[x1][y1] == -1 or grid[x2][y2] == -1:
return float("-inf")
# memorization check
lookup_key = (x1, y1, x2, y2)
if lookup_key in lookup: return lookup[lookup_key]
# pick your cherries
if x1 == x2 and y1 == y2:
cherries = grid[x1][y1]
else:
cherries = grid[x1][y1] + grid[x2][y2]
res = cherries + max(
solve(x1 + 1, y1, x2 + 1, y2), # right, right
solve(x1, y1 + 1, x2, y2 + 1), # down, down
solve(x1 + 1, y1, x2, y2 + 1), # right, down
solve(x1, y1 + 1, x2 + 1, y2), # down, right
)
lookup[lookup_key] = res
return res
res = solve(0, 0, 0, 0)
return res if res > 0 else 0 | 3.375 | 3 |
Fair_OS.py | Anon-git-site/Fair-Over-Sampling | 0 | 12788555 | # -*- coding: utf-8 -*-
#code adapted from https://github.com/analyticalmindsltd/smote_variants
import numpy as np
import time
import logging
import itertools
from sklearn.neighbors import NearestNeighbors
# setting the _logger format
_logger = logging.getLogger('smote_variants')
_logger.setLevel(logging.DEBUG)
_logger_ch = logging.StreamHandler()
_logger_ch.setFormatter(logging.Formatter(
"%(asctime)s:%(levelname)s:%(message)s"))
_logger.addHandler(_logger_ch)
def mode(data):
values, counts = np.unique(data, return_counts=True)
return values[np.where(counts == max(counts))[0][0]]
class StatisticsMixin:
"""
Mixin to compute class statistics and determine minority/majority labels
"""
def class_label_statistics(self, X, y):
"""
determines class sizes and minority and majority labels
Args:
X (np.array): features
y (np.array): target labels
"""
unique, counts = np.unique(y, return_counts=True)
self.class_stats = dict(zip(unique, counts))
self.min_label = unique[0] if counts[0] < counts[1] else unique[1]
self.maj_label = unique[1] if counts[0] < counts[1] else unique[0]
# shorthands
self.min_label = self.min_label
self.maj_label = self.maj_label
def check_enough_min_samples_for_sampling(self, threshold=2):
if self.class_stats[self.min_label] < threshold:
m = ("The number of minority samples (%d) is not enough "
"for sampling")
m = m % self.class_stats[self.min_label]
_logger.warning(self.__class__.__name__ + ": " + m)
return False
return True
class RandomStateMixin:
"""
Mixin to set random state
"""
def set_random_state(self, random_state):
"""
sets the random_state member of the object
Args:
random_state (int/np.random.RandomState/None): the random state
initializer
"""
self._random_state_init = random_state
if random_state is None:
self.random_state = np.random
elif isinstance(random_state, int):
self.random_state = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.RandomState):
self.random_state = random_state
elif random_state is np.random:
self.random_state = random_state
else:
raise ValueError(
"random state cannot be initialized by " + str(random_state))
class ParameterCheckingMixin:
"""
Mixin to check if parameters come from a valid range
"""
def check_in_range(self, x, name, r):
"""
Check if parameter is in range
Args:
x (numeric): the parameter value
name (str): the parameter name
r (list-like(2)): the lower and upper bound of a range
Throws:
ValueError
"""
if x < r[0] or x > r[1]:
m = ("Value for parameter %s outside the range [%f,%f] not"
" allowed: %f")
m = m % (name, r[0], r[1], x)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_out_range(self, x, name, r):
"""
Check if parameter is outside of range
Args:
x (numeric): the parameter value
name (str): the parameter name
r (list-like(2)): the lower and upper bound of a range
Throws:
ValueError
"""
if x >= r[0] and x <= r[1]:
m = "Value for parameter %s in the range [%f,%f] not allowed: %f"
m = m % (name, r[0], r[1], x)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less_or_equal(self, x, name, val):
"""
Check if parameter is less than or equal to value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x > val:
m = "Value for parameter %s greater than %f not allowed: %f > %f"
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less_or_equal_par(self, x, name_x, y, name_y):
"""
Check if parameter is less than or equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x > y:
m = ("Value for parameter %s greater than parameter %s not"
" allowed: %f > %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less(self, x, name, val):
"""
Check if parameter is less than value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x >= val:
m = ("Value for parameter %s greater than or equal to %f"
" not allowed: %f >= %f")
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_less_par(self, x, name_x, y, name_y):
"""
Check if parameter is less than another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x >= y:
m = ("Value for parameter %s greater than or equal to parameter"
" %s not allowed: %f >= %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater_or_equal(self, x, name, val):
"""
Check if parameter is greater than or equal to value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x < val:
m = "Value for parameter %s less than %f is not allowed: %f < %f"
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater_or_equal_par(self, x, name_x, y, name_y):
"""
Check if parameter is less than or equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x < y:
m = ("Value for parameter %s less than parameter %s is not"
" allowed: %f < %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater(self, x, name, val):
"""
Check if parameter is greater than value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x <= val:
m = ("Value for parameter %s less than or equal to %f not allowed"
" %f < %f")
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_greater_par(self, x, name_x, y, name_y):
"""
Check if parameter is greater than or equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x <= y:
m = ("Value for parameter %s less than or equal to parameter %s"
" not allowed: %f <= %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_equal(self, x, name, val):
"""
Check if parameter is equal to value
Args:
x (numeric): the parameter value
name (str): the parameter name
val (numeric): value to compare to
Throws:
ValueError
"""
if x == val:
m = ("Value for parameter %s equal to parameter %f is not allowed:"
" %f == %f")
m = m % (name, val, x, val)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_equal_par(self, x, name_x, y, name_y):
"""
Check if parameter is equal to another parameter
Args:
x (numeric): the parameter value
name_x (str): the parameter name
y (numeric): the other parameter value
name_y (str): the other parameter name
Throws:
ValueError
"""
if x == y:
m = ("Value for parameter %s equal to parameter %s is not "
"allowed: %f == %f")
m = m % (name_x, name_y, x, y)
raise ValueError(self.__class__.__name__ + ": " + m)
def check_isin(self, x, name, li):
"""
Check if parameter is in list
Args:
x (numeric): the parameter value
name (str): the parameter name
li (list): list to check if parameter is in it
Throws:
ValueError
"""
if x not in li:
m = "Value for parameter %s not in list %s is not allowed: %s"
m = m % (name, str(li), str(x))
raise ValueError(self.__class__.__name__ + ": " + m)
def check_n_jobs(self, x, name):
"""
Check n_jobs parameter
Args:
x (int/None): number of jobs
name (str): the parameter name
Throws:
ValueError
"""
if not ((x is None)
or (x is not None and isinstance(x, int) and not x == 0)):
m = "Value for parameter n_jobs is not allowed: %s" % str(x)
raise ValueError(self.__class__.__name__ + ": " + m)
class ParameterCombinationsMixin:
"""
Mixin to generate parameter combinations
"""
@classmethod
def generate_parameter_combinations(cls, dictionary, raw):
"""
Generates reasonable paramter combinations
Args:
dictionary (dict): dictionary of paramter ranges
num (int): maximum number of combinations to generate
"""
if raw:
return dictionary
keys = sorted(list(dictionary.keys()))
values = [dictionary[k] for k in keys]
combinations = [dict(zip(keys, p))
for p in list(itertools.product(*values))]
return combinations
class NoiseFilter(StatisticsMixin,
ParameterCheckingMixin,
ParameterCombinationsMixin):
"""
Parent class of noise filtering methods
"""
def __init__(self):
"""
Constructor
"""
pass
def remove_noise(self, X, y):
"""
Removes noise
Args:
X (np.array): features
y (np.array): target labels
"""
pass
def get_params(self, deep=False):
"""
Return parameters
Returns:
dict: dictionary of parameters
"""
return {}
def set_params(self, **params):
"""
Set parameters
Args:
params (dict): dictionary of parameters
"""
for key, value in params.items():
setattr(self, key, value)
return self
class OverSampling(StatisticsMixin,
ParameterCheckingMixin,
ParameterCombinationsMixin,
RandomStateMixin):
"""
Base class of oversampling methods
"""
categories = []
cat_noise_removal = 'NR'
cat_dim_reduction = 'DR'
cat_uses_classifier = 'Clas'
cat_sample_componentwise = 'SCmp'
cat_sample_ordinary = 'SO'
cat_sample_copy = 'SCpy'
cat_memetic = 'M'
cat_density_estimation = 'DE'
cat_density_based = 'DB'
cat_extensive = 'Ex'
cat_changes_majority = 'CM'
cat_uses_clustering = 'Clus'
cat_borderline = 'BL'
cat_application = 'A'
def __init__(self):
pass
def det_n_to_sample(self, strategy, n_maj, n_min):
"""
Determines the number of samples to generate
Args:
strategy (str/float): if float, the fraction of the difference
of the minority and majority numbers to
generate, like 0.1 means that 10% of the
difference will be generated if str,
like 'min2maj', the minority class will
be upsampled to match the cardinality
of the majority class
"""
if isinstance(strategy, float) or isinstance(strategy, int):
return max([0, int((n_maj - n_min)*strategy)])
else:
m = "Value %s for parameter strategy is not supported" % strategy
raise ValueError(self.__class__.__name__ + ": " + m)
def sample_between_points(self, x, y):
"""
Sample randomly along the line between two points.
Args:
x (np.array): point 1
y (np.array): point 2
Returns:
np.array: the new sample
"""
return x + (y - x)*self.random_state.random_sample()
def sample_between_points_componentwise(self, x, y, mask=None):
"""
Sample each dimension separately between the two points.
Args:
x (np.array): point 1
y (np.array): point 2
mask (np.array): array of 0,1s - specifies which dimensions
to sample
Returns:
np.array: the new sample being generated
"""
if mask is None:
return x + (y - x)*self.random_state.random_sample()
else:
return x + (y - x)*self.random_state.random_sample()*mask
def sample_by_jittering(self, x, std):
"""
Sample by jittering.
Args:
x (np.array): base point
std (float): standard deviation
Returns:
np.array: the new sample
"""
return x + (self.random_state.random_sample() - 0.5)*2.0*std
def sample_by_jittering_componentwise(self, x, std):
"""
Sample by jittering componentwise.
Args:
x (np.array): base point
std (np.array): standard deviation
Returns:
np.array: the new sample
"""
return x + (self.random_state.random_sample(len(x))-0.5)*2.0 * std
def sample_by_gaussian_jittering(self, x, std):
"""
Sample by Gaussian jittering
Args:
x (np.array): base point
std (np.array): standard deviation
Returns:
np.array: the new sample
"""
return self.random_state.normal(x, std)
def sample(self, X, y):
"""
The samplig function reimplemented in child classes
Args:
X (np.matrix): features
y (np.array): labels
Returns:
np.matrix, np.array: sampled X and y
"""
return X, y
def fit_resample(self, X, y):
"""
Alias of the function "sample" for compatibility with imbalanced-learn
pipelines
"""
return self.sample(X, y)
def sample_with_timing(self, X, y):
begin = time.time()
X_samp, y_samp = self.sample(X, y)
_logger.info(self.__class__.__name__ + ": " +
("runtime: %f" % (time.time() - begin)))
return X_samp, y_samp
def preprocessing_transform(self, X):
"""
Transforms new data according to the possible transformation
implemented by the function "sample".
Args:
X (np.matrix): features
Returns:
np.matrix: transformed features
"""
return X
def get_params(self, deep=False):
"""
Returns the parameters of the object as a dictionary.
Returns:
dict: the parameters of the object
"""
pass
def set_params(self, **params):
"""
Set parameters
Args:
params (dict): dictionary of parameters
"""
for key, value in params.items():
setattr(self, key, value)
return self
def descriptor(self):
"""
Returns:
str: JSON description of the current sampling object
"""
return str((self.__class__.__name__, str(self.get_params())))
def __str__(self):
return self.descriptor()
class FOS_1(OverSampling): #F4_SMOTE(OverSampling):
categories = [OverSampling.cat_sample_ordinary,
OverSampling.cat_extensive]
def __init__(self,
proportion=1.0,
n_neighbors=5,
n_jobs=1,
random_state=None):
super().__init__()
self.check_greater_or_equal(proportion, "proportion", 0)
self.check_greater_or_equal(n_neighbors, "n_neighbors", 1)
self.check_n_jobs(n_jobs, 'n_jobs')
self.proportion = proportion
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
self.set_random_state(random_state)
@classmethod
def parameter_combinations(cls, raw=False):
parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,
1.0, 1.1,1.5, 2.0],
'n_neighbors': [3, 5, 7]}
return cls.generate_parameter_combinations(parameter_combinations, raw)
def sample(self, X, y,prot_idx, pv_mid_pt, prot_grp, maj_min, nsamp,
pv_max,pv_min):
_logger.info(self.__class__.__name__ + ": " +
"Running sampling via %s" % self.descriptor())
self.class_label_statistics(X, y)
if not self.check_enough_min_samples_for_sampling():
return X.copy(), y.copy()
y = np.squeeze(y)
n_to_sample = nsamp
if maj_min == 0:
X_min = X[y == self.min_label]
y_min = y[y == self.min_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
if prot_grp == 1:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
if maj_min == 1:
X_min = X[y == self.maj_label]
y_min = y[y == self.maj_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
if prot_grp == 1:
X_min = X_min[prot==prot_grp]
y_min = y_min[prot==prot_grp]
self.min_label = np.copy(self.maj_label)
if n_to_sample == 0:
return X.copy(), y.copy()
# fitting the model
n_neigh = min([len(X_min), self.n_neighbors+1])
nn = NearestNeighbors(n_neighbors=n_neigh, n_jobs=self.n_jobs)
nn.fit(X_min)
dist, ind = nn.kneighbors(X_min)
if n_to_sample == 0:
return X.copy(), y.copy()
# generating samples
#np.random.seed(seed=1)
base_indices = self.random_state.choice(list(range(len(X_min))),
n_to_sample)
neighbor_indices = self.random_state.choice(list(range(1, n_neigh)),
n_to_sample)
X_base = X_min[base_indices]
X_neighbor = X_min[ind[base_indices, neighbor_indices]]
samples = X_base + np.multiply(self.random_state.rand(n_to_sample,
1),
X_neighbor - X_base)
return (np.vstack([X, samples]),
np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))
def get_params(self, deep=False):
return {'proportion': self.proportion,
'n_neighbors': self.n_neighbors,
'n_jobs': self.n_jobs,
'random_state': self._random_state_init}
class FOS_2(OverSampling): #F3a_SMOTE(OverSampling):
categories = [OverSampling.cat_sample_ordinary,
OverSampling.cat_extensive]
def __init__(self,
proportion=1.0,
n_neighbors=5,
n_jobs=1,
random_state=None):
super().__init__()
self.check_greater_or_equal(proportion, "proportion", 0)
self.check_greater_or_equal(n_neighbors, "n_neighbors", 1)
self.check_n_jobs(n_jobs, 'n_jobs')
self.proportion = proportion
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
self.set_random_state(random_state)
@classmethod
def parameter_combinations(cls, raw=False):
parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,
1.0, 1.1,1.5, 2.0],
'n_neighbors': [3, 5, 7]}
return cls.generate_parameter_combinations(parameter_combinations, raw)
def sample(self, X, y,prot_idx, pv_mid_pt, prot_grp, maj_min, nsamp):
_logger.info(self.__class__.__name__ + ": " +
"Running sampling via %s" % self.descriptor())
self.class_label_statistics(X, y)
if not self.check_enough_min_samples_for_sampling():
return X.copy(), y.copy()
n_to_sample = nsamp
if maj_min == 0:
X_min = X[y == self.min_label]
y_min = y[y == self.min_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min1 = X_min[prot<pv_mid_pt]
y_min1 = y_min[prot<pv_mid_pt]
if prot_grp == 1:
X_min1 = X_min[prot>pv_mid_pt]
y_min1 = y_min[prot>pv_mid_pt]
if maj_min == 1:
X_min = X[y == self.maj_label]
y_min = y[y == self.maj_label]
prot = X_min[:,prot_idx]
if prot_grp == 0:
X_min1 = X_min[prot<pv_mid_pt]
y_min1 = y_min[prot<pv_mid_pt]
if prot_grp == 1:
X_min1 = X_min[prot>pv_mid_pt]
y_min1 = y_min[prot>pv_mid_pt]
self.min_label = np.copy(self.maj_label)
if n_to_sample == 0:
return X.copy(), y.copy()
# fitting the model
n_neigh = min([len(X_min), self.n_neighbors+1])
nn = NearestNeighbors(n_neighbors=n_neigh, n_jobs=self.n_jobs)
nn.fit(X_min)
dist, ind = nn.kneighbors(X_min1)
if n_to_sample == 0:
return X.copy(), y.copy()
# generating samples
np.random.seed(seed=1)
base_indices = self.random_state.choice(list(range(len(X_min1))),
n_to_sample)
neighbor_indices = self.random_state.choice(list(range(1, n_neigh)),
n_to_sample)
X_base = X_min1[base_indices]
X_neighbor = X_min[ind[base_indices, neighbor_indices]]
samples = X_base + np.multiply(self.random_state.rand(n_to_sample,
1),
X_neighbor - X_base)
return (np.vstack([X, samples]),
np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))
def get_params(self, deep=False):
return {'proportion': self.proportion,
'n_neighbors': self.n_neighbors,
'n_jobs': self.n_jobs,
'random_state': self._random_state_init}
| 2.640625 | 3 |
lambda_function.py | cloudbasic/Lambda-Update-Route53 | 0 | 12788556 | <filename>lambda_function.py
from __future__ import print_function
import boto3, json, re
HOSTED_ZONE_ID = 'YOUR_HOSTED_ZONE_ID'
def lambda_handler(event, context):
route53 = boto3.client('route53')
dns_changes = {
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': event['name'],
'Type': 'CNAME',
'ResourceRecords': [
{
'Value': event['value']
}
],
'TTL': 300
}
}
]
}
print("Updating Route53")
response = route53.change_resource_record_sets(
HostedZoneId=HOSTED_ZONE_ID,
ChangeBatch=dns_changes
)
return {'status':response['ChangeInfo']['Status']} | 2.375 | 2 |
lcd.py | zimolzak/Raspberry-Pi-newbie | 0 | 12788557 | #!/usr/bin/env python
from telnetlib import Telnet
import time
tn = Telnet('192.168.1.4', 13666, None)
tn.write("hello\n")
tn.write("screen_add s1\n")
tn.write("screen_set s1 -priority 1\n")
tn.write("widget_add s1 w1 string\n")
tn.write("widget_add s1 w2 string\n")
def lcd_string(x, telnet_obj, delay=2):
L = []
for i in range(len(x)):
if i % (15+16) == 0:
L.append(x[i:i+15+16])
for s in L:
s1 = s[0:15]
s2 = s[15:]
telnet_obj.write("widget_set s1 w1 1 1 {" + s1 + "}\n")
telnet_obj.write("widget_set s1 w2 1 2 {" + s2 + "}\n")
time.sleep(delay)
| 2.84375 | 3 |
HackerRank/10 Days of Statistics/Day4B.py | ShubhamJagtap2000/competitive-programming-1 | 1 | 12788558 | import math
print(round((pow(0.88, 10) + (1.2*pow(0.88, 9)) + (45*pow(0.88, 8)*pow(0.12, 2))), 3))
print(round((1 - (pow(0.88, 10) + (1.2*pow(0.88, 9)))), 3))
| 3.5 | 4 |
tests/api/views/v1/test_api_execution.py | angry-tony/ceph-lcm-decapod | 41 | 12788559 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for /v1/execution API endpoint."""
import hashlib
import unittest.mock
import gridfs.grid_file
import pytest
from decapod_common.models import cluster
from decapod_common.models import execution
from decapod_common.models import execution_step
from decapod_common.models import role
from decapod_common.models import task
@pytest.fixture
def clean_execution_collection(configure_model, pymongo_connection):
pymongo_connection.db.execution.remove({})
@pytest.fixture
def valid_post_request(new_pcmodel):
return {
"playbook_configuration": {
"id": new_pcmodel.model_id,
"version": new_pcmodel.version
}
}
@pytest.fixture
def sudo_client(sudo_client_v1, public_playbook_name, sudo_role):
role.PermissionSet.add_permission("playbook", public_playbook_name)
sudo_role.add_permissions("playbook", [public_playbook_name])
sudo_role.save()
return sudo_client_v1
@pytest.fixture
def mock_task_class(monkeypatch):
mocked = unittest.mock.MagicMock()
monkeypatch.setattr(task, "PlaybookPluginTask", mocked)
return mocked
@pytest.fixture
def new_execution_with_logfile(new_execution, execution_log_storage):
def side_effect(model_id):
if model_id != new_execution.model_id:
return None
mock = unittest.mock.MagicMock(spec=gridfs.grid_file.GridOut)
mock.read.side_effect = b"LOG", b""
mock.__iter__.return_value = [b"LOG"]
mock.content_type = "text/plain"
mock.filename = "filename.log"
mock.md5 = hashlib.md5(b"LOG").hexdigest()
return mock
execution_log_storage.get.side_effect = side_effect
return new_execution
def create_execution_step(execution_id, srv, state):
db_model = {
"execution_id": execution_id,
"role": pytest.faux.gen_alpha(),
"name": pytest.faux.gen_alpha(),
"result": state.value,
"error": {},
"server_id": srv.model_id,
"time_started": pytest.faux.gen_integer(1, 100),
"time_finished": pytest.faux.gen_integer(101)
}
execution_step.ExecutionStep.collection().insert_one(db_model)
def test_post_access(sudo_client, client_v1, sudo_user, freeze_time,
normal_user, valid_post_request):
response = client_v1.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 401
assert response.json["error"] == "Unauthorized"
client_v1.login(normal_user.login, "qwerty")
response = client_v1.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 403
assert response.json["error"] == "Forbidden"
response = sudo_client.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 200
def test_post_result(sudo_client, new_pcmodel, freeze_time,
valid_post_request):
response = sudo_client.post("/v1/execution/", data=valid_post_request)
assert response.json["data"]["playbook_configuration"]["id"] \
== new_pcmodel.model_id
assert response.json["data"]["playbook_configuration"]["version"] \
== new_pcmodel.version
assert response.json["data"]["playbook_configuration"]["playbook_name"] \
== new_pcmodel.playbook_id
assert response.json["data"]["state"] \
== execution.ExecutionState.created.name
tsk = task.Task.get_by_execution_id(
response.json["id"], task.TaskType.playbook.name)
assert tsk
assert not tsk.time_started
assert not tsk.time_completed
assert not tsk.time_failed
assert not tsk.time_cancelled
assert tsk.time_updated == int(freeze_time.return_value)
assert tsk.time_created == int(freeze_time.return_value)
def test_post_result_deleted_cluster(sudo_client, new_pcmodel, freeze_time,
valid_post_request):
clus = cluster.ClusterModel.create(pytest.faux.gen_alpha())
clus.delete()
new_pcmodel.cluster = clus
new_pcmodel.save()
valid_post_request["playbook_configuration"]["version"] = \
new_pcmodel.version
response = sudo_client.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 400
@pytest.mark.parametrize("what", ("id", "version"))
def test_post_fake_playbook_configuration(what, sudo_client,
valid_post_request):
if what == "id":
valid_post_request["playbook_configuration"]["id"] \
= pytest.faux.gen_uuid()
else:
valid_post_request["playbook_configuration"]["version"] \
= pytest.faux.gen_integer(3)
response = sudo_client.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 400
assert response.json["error"] == "UnknownPlaybookConfiguration"
def test_post_cannot_create_task(sudo_client, mock_task_class,
valid_post_request, pymongo_connection,
clean_execution_collection):
mock_task_class.side_effect = Exception
response = sudo_client.post("/v1/execution/", data=valid_post_request)
assert response.status_code == 400
db_model = pymongo_connection.db.execution.find({})
db_model = list(db_model)
assert len(db_model) == 2
db_model = max((mdl for mdl in db_model), key=lambda x: x["version"])
assert db_model["state"] == execution.ExecutionState.failed.name
def test_delete_access(sudo_client, client_v1, sudo_user, freeze_time,
normal_user, valid_post_request):
resp = sudo_client.post("/v1/execution/", data=valid_post_request)
assert resp.status_code == 200
response = client_v1.delete(
"/v1/execution/{0}/".format(resp.json["id"]))
assert response.status_code == 401
assert response.json["error"] == "Unauthorized"
client_v1.login(normal_user.login, "qwerty")
response = client_v1.delete(
"/v1/execution/{0}/".format(resp.json["id"]))
assert response.status_code == 403
assert response.json["error"] == "Forbidden"
response = sudo_client.delete(
"/v1/execution/{0}/".format(resp.json["id"]))
assert response.status_code == 200
def test_delete_not_started(sudo_client, valid_post_request):
resp = sudo_client.post("/v1/execution/", data=valid_post_request)
resp = sudo_client.delete("/v1/execution/{0}/".format(resp.json["id"]))
excmodel = execution.ExecutionModel.find_by_model_id(resp.json["id"])
assert excmodel.state == execution.ExecutionState.canceled
def test_delete_started(sudo_client, valid_post_request):
resp = sudo_client.post("/v1/execution/", data=valid_post_request)
tsk = task.Task.get_by_execution_id(resp.json["id"],
task.TaskType.playbook)
tsk.start()
excmodel = execution.ExecutionModel.find_by_model_id(resp.json["id"])
assert excmodel.state == execution.ExecutionState.started
resp = sudo_client.delete("/v1/execution/{0}/".format(resp.json["id"]))
assert resp.status_code == 200
excmodel = execution.ExecutionModel.find_by_model_id(resp.json["id"])
assert excmodel.state == execution.ExecutionState.canceling
tsk = task.Task.get_by_execution_id(resp.json["id"], task.TaskType.cancel)
assert tsk
def test_api_get_access(sudo_client, client_v1, normal_user):
response = client_v1.get("/v1/execution/")
assert response.status_code == 401
assert response.json["error"] == "Unauthorized"
client_v1.login(normal_user.login, "qwerty")
response = client_v1.get("/v1/execution/")
assert response.status_code == 403
assert response.json["error"] == "Forbidden"
response = sudo_client.get("/v1/execution/")
assert response.status_code == 200
def test_get(sudo_client, clean_execution_collection, valid_post_request):
resp = sudo_client.post("/v1/execution/", data=valid_post_request)
model_id = resp.json["id"]
resp = sudo_client.get("/v1/execution/")
assert resp.status_code == 200
assert resp.json["total"] == 1
assert len(resp.json["items"]) == 1
resp = sudo_client.get("/v1/execution/{0}/".format(model_id))
assert resp.status_code == 200
resp = sudo_client.get("/v1/execution/{0}/version/".format(model_id))
assert resp.status_code == 200
assert resp.json["total"] == 1
assert len(resp.json["items"]) == 1
resp = sudo_client.get("/v1/execution/{0}/version/1/".format(model_id))
assert resp.status_code == 200
@pytest.mark.parametrize("state", execution_step.ExecutionStepState)
def test_get_execution_steps(state, sudo_client, new_server,
valid_post_request):
resp = sudo_client.post("/v1/execution/", data=valid_post_request)
model_id = resp.json["id"]
for _ in range(5):
create_execution_step(model_id, new_server, state)
resp = sudo_client.get("/v1/execution/{0}/steps/".format(model_id))
assert resp.status_code == 200
assert resp.json["total"] == 5
assert len(resp.json["items"]) == 5
assert all((item["data"]["execution_id"] == model_id)
for item in resp.json["items"])
assert all((item["data"]["result"] == state.name)
for item in resp.json["items"])
def test_get_execution_log_fail(sudo_client, client_v1, normal_user,
new_execution_with_logfile):
response = client_v1.get(
"/v1/execution/{0}/log/".format(new_execution_with_logfile.model_id))
assert response.status_code == 401
assert response.json["error"] == "Unauthorized"
client_v1.login(normal_user.login, "qwerty")
response = client_v1.get(
"/v1/execution/{0}/log/".format(new_execution_with_logfile.model_id))
assert response.status_code == 403
assert response.json["error"] == "Forbidden"
response = sudo_client.get(
"/v1/execution/{0}/log/".format(new_execution_with_logfile.model_id))
assert response.status_code == 200
@pytest.mark.parametrize("download", (True, False))
def test_get_execution_plain_text_log(download, sudo_client,
new_execution_with_logfile):
query = "?download=yes" if download else ""
response = sudo_client.get(
"/v1/execution/{0}/log/{1}".format(
new_execution_with_logfile.model_id, query))
assert response.status_code == 200
assert response.headers.get("Content-Type").startswith("text/plain")
assert response.headers.get("ETag") == "\"{0}\"".format(
hashlib.md5(b"LOG").hexdigest()
)
assert response.data == b"LOG"
if download:
assert response.headers["Content-Disposition"] == \
"attachment; filename=filename.log"
else:
assert "Content-Disposition" not in response.headers
@pytest.mark.parametrize("download", (False,))
def test_get_execution_json_log(download, sudo_client,
new_execution_with_logfile):
query = "?download=yes" if download else ""
response = sudo_client.get(
"/v1/execution/{0}/log/{1}".format(
new_execution_with_logfile.model_id, query),
content_type="application/json"
)
assert response.status_code == 200
if download:
assert response.headers.get("Content-Type").startswith("text/plain")
else:
assert response.headers.get("Content-Type").startswith(
"application/json")
assert response.json == {"data": "LOG"}
if download:
assert response.headers["Content-Disposition"] == \
"attachment; filename=filename.log"
else:
assert "Content-Disposition" not in response.headers
| 1.71875 | 2 |
torchnlp/samplers/noisy_sorted_sampler.py | MPetrochuk/PyTorch-NLP | 2,125 | 12788560 | <reponame>MPetrochuk/PyTorch-NLP
import random
from torch.utils.data.sampler import Sampler
from torchnlp.utils import identity
def _uniform_noise(_):
return random.uniform(-1, 1)
class NoisySortedSampler(Sampler):
""" Samples elements sequentially with noise.
**Background**
``NoisySortedSampler`` is similar to a ``BucketIterator`` found in popular libraries like
`AllenNLP` and `torchtext`. A ``BucketIterator`` pools together examples with a similar size
length to reduce the padding required for each batch. ``BucketIterator`` also includes the
ability to add noise to the pooling.
**AllenNLP Implementation:**
https://github.com/allenai/allennlp/blob/e125a490b71b21e914af01e70e9b00b165d64dcd/allennlp/data/iterators/bucket_iterator.py
**torchtext Implementation:**
https://github.com/pytorch/text/blob/master/torchtext/data/iterator.py#L225
Args:
data (iterable): Data to sample from.
sort_key (callable): Specifies a function of one argument that is used to extract a
numerical comparison key from each list element.
get_noise (callable): Noise added to each numerical ``sort_key``.
Example:
>>> from torchnlp.random import set_seed
>>> set_seed(123)
>>>
>>> import random
>>> get_noise = lambda i: round(random.uniform(-1, 1))
>>> list(NoisySortedSampler(range(10), sort_key=lambda i: i, get_noise=get_noise))
[0, 1, 2, 3, 5, 4, 6, 7, 9, 8]
"""
def __init__(self, data, sort_key=identity, get_noise=_uniform_noise):
super().__init__(data)
self.data = data
self.sort_key = sort_key
self.get_noise = get_noise
def __iter__(self):
zip_ = []
for i, row in enumerate(self.data):
value = self.get_noise(row) + self.sort_key(row)
zip_.append(tuple([i, value]))
zip_ = sorted(zip_, key=lambda r: r[1])
return iter([item[0] for item in zip_])
def __len__(self):
return len(self.data)
| 3.359375 | 3 |
gawseed/threatfeed/search/ssh.py | gawseed/threat-feed-tools | 2 | 12788561 | from gawseed.threatfeed.search.ip import IPSearch
class SSHSearch(IPSearch):
"""Searches data for threats, but requires the auth_success field to
be True. IE, only successful logins will be considered a match.
Use the 'ip' module if you don't want this restriction applied."""
def __init__(self, conf, search_list, data_iterator, binary_search):
super().__init__(conf, search_list, data_iterator, binary_search)
# Really need a tri-nary option for this
self._auth_success_key = self.config('auth_success_key', 'auth_success',
help="When searching for authenticated ssh connections, use this column name to determine if authentication suceeded")
self._auth_success_value = self.config('auth_success_value', True,
help="The value that should match the authentication field identified by auth_success_key")
def initialize(self):
super().initialize()
self._auth_success_key = self._data_iterator.encode_item(self._auth_success_key)
if type(self._auth_success_value) == str:
self._auth_success_value = self._data_iterator.encode_item(self._auth_success_value)
def search(self, row):
if not self._auth_success_key or self._auth_success_key not in row:
return None
if row[self._auth_success_key] != self._auth_success_value:
return None
return super().search(row) # pass the rest to the IP searcher
| 2.765625 | 3 |
survey/mixins/data_mixins.py | vahndi/quant-survey | 2 | 12788562 | from typing import Callable, Optional
from numpy import nan
from pandas import Series, isnull, Interval
from pandas.core.dtypes.inference import is_number
class ObjectDataMixin(object):
_data: Optional[Series]
_validate_data: Callable[[Series], None]
def _set_data(self, data: Series):
self.data = data
@property
def data(self) -> Series:
return self._data
@data.setter
def data(self, data: Series):
if data is None:
self._data = None
else:
self._validate_data(data)
self._data = data
class NumericDataMixin(object):
_data: Optional[Series]
_validate_data: Callable[[Series], None]
def _set_data(self, data: Series):
self.data = data
@property
def data(self) -> Series:
return self._data
@data.setter
def data(self, data: Series):
if data is None:
self._data = None
else:
self._validate_data(data)
try:
data = data.astype(int)
except ValueError:
data = data.astype(float)
self._data = data
class SingleCategoryDataMixin(object):
_data: Optional[Series]
name: str
_validate_data: Callable[[Series], None]
def _set_data(self, data: Series):
if data is not None:
self.data = data
@property
def data(self) -> Series:
return self._data
@data.setter
def data(self, data: Series):
if data is None:
self._data = None
else:
data = Series(
index=data.index,
data=[nan if isnull(d)
else d if type(d) is str
else d if type(d) is Interval
else str(int(d)) if is_number(d) and d == int(d)
else str(d)
for d in data.values],
name=self.name
).astype('category')
self._validate_data(data)
self._data = data
class MultiCategoryDataMixin(object):
_data: Optional[Series]
name: str
_validate_data: Callable[[Series], None]
def _set_data(self, data: Series):
self.data = data
@property
def data(self) -> Series:
return self._data
@data.setter
def data(self, data: Series):
if data is None:
self._data = None
else:
data = Series(
index=data.index,
data=[
nan if isnull(d)
else nan if type(d) is str and d == ''
else d if type(d) is str
else str(d)
for d in data.values
],
name=self.name
)
self._validate_data(data)
self._data = data
| 2.765625 | 3 |
tests/comply/__init__.py | repsistance/keripy | 26 | 12788563 | """
Compliance test package
"""
| 0.953125 | 1 |
src/gae_flask_boilerplate/app.py | euri10/python-gae_flask_boilerplate | 0 | 12788564 | <filename>src/gae_flask_boilerplate/app.py<gh_stars>0
from gae_flask_boilerplate import create_app
app = create_app('default')
| 1.390625 | 1 |
20-hs-redez-sem/groups/05-decentGames/src/DontGetAngry.py | Kyrus1999/BACnet | 8 | 12788565 | <gh_stars>1-10
import copy
import json
import os
import random
import socket
import sys
import xmlrpc.client
import State
from AbsGame import AbsGame, MY_IP
from DGA import DGA
from Exceptions import FileAlreadyExists
from GameInformation import GameInformation
class DontGetAngry(AbsGame):
is_looping = True
def _sync_log(self) -> None:
pass
def fetch(self):
n = self.__ginfo.get_seq()
self._fetch_lines(self.__game_path, n, self.__ip1, self.__ip2)
def refresh(self):
print('Refreshing')
pass
# with xmlrpc.client.ServerProxy("http://%s:8001/" % self.__ip1) as proxy:
# file_string = proxy.is_even(self.__game_path)
#
# # Only refresh if it is the next sequence number
# if DGA(json.loads(file_string)).get_seq() == self.__ginfo.get_seq() + 1:
# with open(self.__game_path, 'w') as f:
# f.write(file_string + '\n')
# f.close()
# return
#
# with xmlrpc.client.ServerProxy("http://%s:8001/" % self.__ip2) as proxy:
# file_string = proxy.is_even(self.__game_path)
#
# # Only refresh if it is the next sequence number
# if DGA(json.loads(file_string)).get_seq() == self.__ginfo.get_seq() + 1:
# with open(self.__game_path, 'w') as f:
# f.write(file_string + '\n')
# f.close()
def __init__(self, game_id: str, ip1: str, ip2):
self.__game_id = game_id
self.__game_path = 'games/%s.dga' % game_id
self.__ip1 = ip1
self.__ip2 = ip2
self.__playable = False
self.__game_is_updated = False
if game_id is not None:
with open(self.__game_path, 'r') as f:
time, game_info = f.read().splitlines()[-1].split('$')
self.__ginfo: DGA = DGA(json.loads(game_info))
self.__curr_game = self.__ginfo.get_board()
if self._validate(self.__curr_game):
if not self.__ginfo.game_is_initiated():
if self.__ginfo.can_i_update():
self._update()
print('Game is leaving the loop')
self.is_looping = False
if self.__ginfo.get_player(self._get_turn_of()) == self.get_who_am_i()\
and self.get_ginfo().get_status() == State.ONGOING:
self.__playable = True
else:
print('Not validated?')
def get_turn_of(self) -> str:
p = self._get_turn_of()
return p + ': ' + self.__ginfo.get_player(p)
def get_who_am_i(self) -> str:
return list(self.__ginfo.get_dic().keys())[list(self.__ginfo.get_dic().values()).index(self.__ginfo.get_mac())]
def get_allowed_moves(self):
return [1, 2, 3, 4, 5, 6]
def move(self, move: str):
move = random.randint(1, 6)
if self._get_playable():
self.__ginfo.apply_move(move)
self.get_ginfo().inc_seq()
self._update()
self._set_playable(False)
else:
print('You cannot make a move.')
def get_ginfo(self):
return self.__ginfo
def forfeit(self):
return 'Not possible in this game'
def _get_playable(self):
return self.__playable
def _set_playable(self, state: bool):
self.__playable = state
def _update(self) -> None:
with open(self.__game_path, 'a') as f:
f.write(self.get_time() + str(self.__ginfo) + '\n')
f.close()
self.ping_the_updates(self.__game_path, self.__ip1, self.__ip2, MY_IP)
def _validate(self, curr_board: dict) -> bool:
with open(self.__game_path, 'r')as f:
lines = f.read().splitlines()
second_last_line = lines[-2]
prev_ginfo = DGA(json.loads(second_last_line.split('$')[1]))
# Check if same file/string
if str(self.__ginfo) == str(prev_ginfo):
print('Absolute same string')
self.__game_is_updated = False
return True
prev_board = prev_ginfo.get_board()
# Check only board
if str(prev_board) == str(curr_board):
print('Same board, but other things changed')
self.__game_is_updated = True
return True
# Check if moves before were legit
for move in self.get_allowed_moves():
tmp: DGA = copy.deepcopy(prev_ginfo)
tmp.apply_move(str(move))
if str(tmp.get_board()) == str(curr_board):
self.__game_is_updated = True
print('Valid move was made: %s' % move)
return True
print('An opponent seems to be cheating... Game aborted.')
self.__ginfo.set_status(State.CHEATED)
self.__ginfo.inc_seq()
self._update()
print(self.__ginfo.get_status())
return False
def _get_turn_of(self) -> str:
return self.__ginfo.get_playing_rn()
def _get_game_id(self) -> str:
return self.__game_id
def get_board(self) -> dict:
return self.__curr_game
| 2.40625 | 2 |
JDComment/JDComment/spiders/JDCommentSpider.py | Dengqlbq/JDSpider | 6 | 12788566 | <filename>JDComment/JDComment/spiders/JDCommentSpider.py<gh_stars>1-10
from scrapy_redis.spiders import RedisSpider
from JDComment.items import JDCommentItem
from scrapy.utils.project import get_project_settings
import scrapy
import json
import re
class JDCommentSpider(RedisSpider):
# 获取指定商品的评论(完整评论,非摘要)
name = 'JDCommentSpider'
allow_domains = ['www.jd.com']
redis_key = 'JDCommentSpider'
settings = get_project_settings()
comment_url = settings['COMMENT_URL']
def parse(self, response):
try:
comment_json = json.loads(response.text)
except json.decoder.JSONDecodeError:
return
good_number = re.findall(r'productId=(\d+)', response.url)[0]
max_page_num = comment_json['maxPage']
for com in comment_json['comments']:
item = JDCommentItem()
item['good_num'] = good_number
item['content'] = com['content']
yield item
for i in range(2, max_page_num):
yield scrapy.Request(self.comment_url.format(good_number, i), callback=self.get_leftover)
def get_leftover(self, response):
try:
comment_json = json.loads(response.text)
except json.decoder.JSONDecodeError:
return
good_number = re.findall(r'productId=(\d+)', response.url)[0]
for com in comment_json['comments']:
item = JDCommentItem()
item['good_num'] = good_number
item['content'] = com['content']
yield item
| 2.515625 | 3 |
interactive/shell.py | vodik/pytest-interactive | 0 | 12788567 | """
An extended shell for test selection
"""
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.core.magic import (Magics, magics_class, line_magic)
from IPython.core.history import HistoryManager
class PytestShellEmbed(InteractiveShellEmbed):
"""Custom ip shell with a slightly altered exit message
"""
def init_history(self):
"""Sets up the command history, and starts regular autosaves.
.. note::
A separate history db is allocated for this plugin separate
from regular ip shell sessions such that only relevant
commands are retained.
"""
self.history_manager = HistoryManager(
shell=self, parent=self, hist_file=self.pytest_hist_file)
self.configurables.append(self.history_manager)
def exit(self):
"""Handle interactive exit.
This method calls the ``ask_exit`` callback and if applicable prompts
the user to verify the current test selection
"""
if getattr(self, 'selection', None):
print(" \n".join(self.selection.keys()))
msg = "\nYou have selected the above {} test(s) to be run."\
"\nWould you like to run pytest now? ([y]/n)?"\
.format(len(self.selection))
else:
msg = 'Do you really want to exit ([y]/n)?'
if self.ask_yes_no(msg, 'y'):
self.ask_exit()
@magics_class
class SelectionMagics(Magics):
"""Custom magics for performing multiple test selections
within a single session
"""
def ns_eval(self, line):
'''Evalutate line in the embedded ns and return result
'''
ns = self.shell.user_ns
return eval(line, ns)
@property
def tt(self):
return self.ns_eval('tt')
@property
def selection(self):
return self.tt._selection
@property
def tr(self):
return self.tt._tr
def err(self, msg="No tests selected"):
self.tr.write("ERROR: ", red=True)
self.tr.write_line(msg)
@line_magic
def add(self, line):
'''Add tests from a test set to the current selection.
Usage:
add tt : add all tests in the current tree
add tt[4] : add 5th test in the current tree
add tt.tests[1:10] : add tests 1-9 found under the 'tests' module
'''
if line:
ts = self.ns_eval(line)
if ts:
self.selection.addtests(ts)
else:
raise TypeError("'{}' is not a test set".format(ts))
else:
print("No test set provided?")
@line_magic
def remove(self, line, delim=','):
"""Remove tests from the current selection using a slice syntax
using a ',' delimiter instead of ':'.
Usage:
remove : remove all tests from the current selection
remove -1 : remove the last item from the selection
remove 1, : remove all but the first item (same as [1:])
remove ,,-3 : remove every third item (same as [::-3])
"""
selection = self.selection
if not self.selection:
self.err()
return
if not line:
selection.clear()
return
# parse out slice
if delim in line:
slc = slice(*map(lambda x: int(x.strip()) if x.strip() else None,
line.split(delim)))
for item in selection[slc]:
selection.remove(item)
else: # just an index
try:
selection.remove(selection[int(line)])
except ValueError:
self.err("'{}' is not and index or slice?".format(line))
@line_magic
def show(self, test_set):
'''Show all currently selected test by pretty printing
to the console.
Usage:
show: print currently selected tests
'''
items = self.selection.values()
if items:
self.tt._tprint(items)
else:
self.err()
| 2.765625 | 3 |
vmtkScripts/contrib/vmtksurfacetagger.py | michelebucelli/vmtk | 1 | 12788568 | #!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtksurfaceclipper.py,v $
## Language: Python
## Date: $Date: 2006/05/26 12:35:13 $
## Version: $Revision: 1.9 $
## Copyright (c) <NAME>, <NAME>. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this class was contributed by
## <NAME> (<EMAIL>)
## Politecnico di Milano
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
import sys
import math
from vmtk import vmtkrenderer
from vmtk import pypes
class vmtkSurfaceTagger(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.Method = 'cliparray'
self.CellEntityIdsArrayName = 'CellEntityIds'
self.CellEntityIdsArray = None
self.ArrayName = None
self.Array = None
self.Value = None
self.Range = None
self.InsideTag = 2
self.OutsideTag = 1
self.OverwriteOutsideTag = 0
self.InsideOut = 0
self.TagsToModify = None
self.ConnectivityOffset = 1
self.TagSmallestRegion = 1
self.CleanOutput = 1
self.PrintTags = 1
self.Tags = None
self.HarmonicRadius = 1.0
self.HarmonicGenerateTag = 0
self.HarmonicCleaningFixPoints = 0
self.SetScriptName('vmtksurfacetagger')
self.SetScriptDoc('tag a surface exploiting an array defined on it')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['Method','method','str',1,'["cliparray","array","harmonic","connectivity","constant","drawing"]','tagging method (cliparray: exploit an array to clip the surface at a certain value tagging the two parts, it creates skew triangles that need a successive remeshing; array: the same of cliparray, but without clipping the original triangles, thus creating a zig-zag tag; harmonic: move harmonically the original points of the input surface toward the array value in order to be able to obtain a precise tag also with a successive call of the array method without the need of remeshing; connectivity: given an already tagged surface, tag disconnected part of each input tag; constant: assign a constant tag to the input surface; drawing: interactive drawing a region)'],
['CellEntityIdsArrayName','entityidsarray','str',1,'','name of the array where the tags are stored'],
['ArrayName','array','str',1,'','name of the array with which to define the boundary between tags'],
['Value','value','float',1,'','scalar value of the array identifying the boundary between tags'],
['Range','range','float',2,'','range scalar values of the array identifying the region for the new tag (alternative to value, only array method)'],
['InsideTag','inside','int',1,'','tag of the inside region (i.e. where the Array is lower than Value; used also in case of "constant" method)'],
['HarmonicRadius','harmonicradius','float',1,'','buffer zone radius for the harmonic method beyond which the points are not moved'],
['HarmonicGenerateTag','harmonicgeneratetag','float',1,'','toggle tagging with the array method after the harmonic movement, it is suggested not to tag directly the surface, but to recompute the array on the warped surface and to use the array method on the recomputed array'],
['HarmonicCleaningFixPoints','harmoniccleanfixpoints','bool',1,'','toggle if the cleaning harmonic method has to fix the points or to leave them free'],
['OverwriteOutsideTag','overwriteoutside','bool',1,'','overwrite outside value also when the CellEntityIdsArray already exists in the input surface'],
['OutsideTag','outside','int',1,'','tag of the outside region (i.e. where the Array is greater than Value)'],
['InsideOut','insideout','bool',1,'','toggle switching inside and outside tags ("cliparray" and "array" methods, only when specifying value and not range)'],
['TagsToModify','tagstomodify','int',-1,'','if set, new tag is created only in this subset of existing tags ("cliparray" only)'],
['ConnectivityOffset','offset','int',1,'','offset added to the entityidsarray of each disconnected parts of each input tag ("connectivity" only)'],
['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or the largest region (drawing only)'],
['CleanOutput','cleanoutput','bool',1,'','toggle cleaning the unused points'],
['PrintTags','printtags','bool',1,'','toggle printing the set of tags']
])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'],
['CellEntityIdsArray','oentityidsarray','vtkIntArray',1,'','the output entity ids array']
])
def CleanSurface(self):
cleaner = vtk.vtkCleanPolyData()
cleaner.SetInputData(self.Surface)
cleaner.Update()
self.Surface = cleaner.GetOutput()
self.CellEntityIdsArray = self.Surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
def ClipArrayTagger(self,onlyRing=False):
from vmtk import vmtkscripts
# clip the surface according to the Array
clipper = vmtkscripts.vmtkSurfaceClipper()
clipper.Surface = self.Surface
clipper.Interactive = False
clipper.InsideOut = 1-self.InsideOut # inside means regions where the Array is lower than Value
clipper.CleanOutput = self.CleanOutput
clipper.ClipArrayName = self.ArrayName
clipper.ClipValue = self.Value
clipper.Execute()
if onlyRing:
return clipper.CutLines
else:
insideSurface = clipper.Surface
outsideSurface = clipper.ClippedSurface
# change values of the inside tags
insideCellEntityIdsArray = insideSurface.GetCellData().GetArray( self.CellEntityIdsArrayName )
outsideCellEntityIdsArray = outsideSurface.GetCellData().GetArray( self.CellEntityIdsArrayName )
if self.TagsToModify!=None:
for i in range(insideCellEntityIdsArray.GetNumberOfTuples()):
if insideCellEntityIdsArray.GetValue(i) in self.TagsToModify:
insideCellEntityIdsArray.SetValue(i,self.InsideTag)
else:
insideCellEntityIdsArray.FillComponent(0,self.InsideTag)
# merge the inside and the outside surfaces
mergeSurface = vtk.vtkAppendPolyData()
mergeSurface.AddInputData(insideSurface)
mergeSurface.AddInputData(outsideSurface)
mergeSurface.Update()
self.Surface = mergeSurface.GetOutput()
self.CellEntityIdsArray = self.Surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
def ArrayTagger(self,surface=None,arrayName=None,insideTag=None,rangeValues=[]):
if surface == None:
surface = self.Surface
if arrayName == None:
arrayName = self.ArrayName
if insideTag == None:
insideTag = self.InsideTag
if rangeValues == []:
rangeValues = self.Range
pointsToCells = vtk.vtkPointDataToCellData()
pointsToCells.SetInputData(surface)
pointsToCells.PassPointDataOn()
pointsToCells.Update()
surface = pointsToCells.GetPolyDataOutput()
cellEntityIdsArray = surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
cellArray = surface.GetCellData().GetArray(arrayName)
for i in range(surface.GetNumberOfCells()):
if cellArray.GetValue(i) > rangeValues[0] and cellArray.GetValue(i) < rangeValues[1]:
cellEntityIdsArray.SetValue(i,insideTag)
return surface
def CleanPreciseRingDistance(self,ring):
from vmtk import vmtkscripts
def nextPointId(ring,cellId,currentPointId):
idList = vtk.vtkIdList()
ring.GetCellPoints(cellId,idList)
if idList.GetId(0) == currentPointId:
return idList.GetId(1)
else:
return idList.GetId(0)
def nextCellId(ring,pointId,currentCellId):
idList = vtk.vtkIdList()
ring.GetPointCells(pointId,idList)
if idList.GetId(0) == currentCellId:
return idList.GetId(1)
else:
return idList.GetId(0)
def checkThreeConsecutivePointsOnATriangle(lastThreeCellIdLists):
for item in lastThreeCellIdLists[2]:
if item in lastThreeCellIdLists[1]:
if item in lastThreeCellIdLists[0]:
return True
return False
nP = ring.GetNumberOfPoints()
nC = ring.GetNumberOfCells()
# print ("points and cells: ", nP, ", ", nC)
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(self.Surface)
pointLocator.BuildLocator()
lastThreePointsIds = []
lastThreeCellIdLists = []
distanceCleaned = [0, 0, 0, 0]
cleanRingPointsIds = set()
currentCellId = 0
pointIdList = vtk.vtkIdList()
cellIdList = vtk.vtkIdList()
ring.GetCellPoints(currentCellId,pointIdList)
currentRingPointId = pointIdList.GetId(0)
for i in range(nP):
lastThreePointsIds.append(currentRingPointId)
currentSurfPointId = pointLocator.FindClosestPoint(ring.GetPoint(currentRingPointId))
self.Surface.GetPointCells(currentSurfPointId,cellIdList)
cellIds=[]
for k in range(cellIdList.GetNumberOfIds()):
cellIds.append(cellIdList.GetId(k))
lastThreeCellIdLists.append(cellIds)
currentCellId = nextCellId(ring,currentRingPointId,currentCellId)
currentRingPointId = nextPointId(ring,currentCellId,currentRingPointId)
if i > 1:
# print("last three points: ",lastThreePointsIds)
# print("last three cell id Lists: ",lastThreeCellIdLists)
answer = checkThreeConsecutivePointsOnATriangle(lastThreeCellIdLists)
# print("answer: ", answer)
if answer:
if distanceCleaned[1] == 0:
distanceCleaned[2] = 1
cleanRingPointsIds.add(lastThreePointsIds[1])
else:
distanceCleaned[1] = 1
cleanRingPointsIds.add(lastThreePointsIds[0])
# print("distance cleaned: ", distanceCleaned)
# print("")
lastThreePointsIds.pop(0)
lastThreeCellIdLists.pop(0)
distanceCleaned.append(0)
distanceCleaned.pop(0)
cleanRingPointsIds = sorted(cleanRingPointsIds)
print(cleanRingPointsIds)
if self.HarmonicCleaningFixPoints:
outputRing = ring
distanceArray = outputRing.GetPointData().GetArray('PreciseRingDistance')
else:
outputRing = vtk.vtkPolyData()
distanceArray = vtk.vtkDoubleArray()
if self.HarmonicCleaningFixPoints:
for pointId in cleanRingPointsIds:
distanceArray.SetComponent(pointId,0,0.0)
distanceArray.SetComponent(pointId,1,0.0)
distanceArray.SetComponent(pointId,2,0.0)
else:
points = vtk.vtkPoints()
lines = vtk.vtkCellArray()
countCleanedPoints = 0
ko = 1
firstInsertedId = 0
while ko:
if firstInsertedId not in cleanRingPointsIds:
ko = 0
else:
firstInsertedId = firstInsertedId + 1
lastInsertedId = firstInsertedId
for i in range(lastInsertedId+1,nP):
# currentSurfPointId = pointLocator.FindClosestPoint(ring.GetPoint(i))
if i not in cleanRingPointsIds:
# print ('IDS: ring = ',i,'; surface = ',currentSurfPointId)
points.InsertNextPoint(ring.GetPoint(i))
line = vtk.vtkLine()
line.GetPointIds().SetId(0,lastInsertedId)
line.GetPointIds().SetId(1,i)
lines.InsertNextCell(line)
lastInsertedId = i
else:
countCleanedPoints = countCleanedPoints + 1
# print ('IDS: ring = ',i,'; surface = ',currentSurfPointId)
print ('IDS: ring = ',i)
# cloase the loop
line = vtk.vtkLine()
line.GetPointIds().SetId(0,lastInsertedId)
line.GetPointIds().SetId(1,firstInsertedId)
lines.InsertNextCell(line)
print('\ncleaned points: ',countCleanedPoints,'/',nP,'\n')
outputRing.SetPoints(points)
outputRing.SetLines(lines)
surfaceProjection = vmtkscripts.vmtkSurfaceProjection()
surfaceProjection.Surface = outputRing
surfaceProjection.ReferenceSurface = ring
surfaceProjection.Execute()
outputRing = surfaceProjection.Surface
# FIRST AND LAST POINTS NOT YET CHECKED
return outputRing
def HarmonicTagger(self):
from vmtk import vmtkscripts
from vmtk import vmtkcontribscripts
from vmtk import vtkvmtk
def zigZagRingExtractor(surface,arrayname,tag,rangevalues):
surf = vtk.vtkPolyData()
surf.DeepCopy(surface)
surf = self.ArrayTagger(surf,arrayname,tag,rangevalues)
th = vmtkcontribscripts.vmtkThreshold()
th.Surface = surf
th.ArrayName = self.CellEntityIdsArrayName
th.CellData = True
th.LowThreshold = tag
th.HighThreshold = tag
th.Execute()
surf = th.Surface
# boundaryExtractor = vtkvmtk.vtkvmtkPolyDataBoundaryExtractor()
# boundaryExtractor.SetInputData(surf)
# boundaryExtractor.Update()
# zigZagRing = boundaryExtractor.GetOutput()
featureEdges = vtk.vtkFeatureEdges()
featureEdges.SetInputData(surf)
featureEdges.BoundaryEdgesOn()
featureEdges.FeatureEdgesOff()
featureEdges.NonManifoldEdgesOff()
featureEdges.ManifoldEdgesOff()
featureEdges.ColoringOff()
featureEdges.CreateDefaultLocator()
featureEdges.Update()
zigZagRing = featureEdges.GetOutput()
return zigZagRing
tags = set()
for i in range(self.Surface.GetNumberOfCells()):
tags.add(self.CellEntityIdsArray.GetComponent(i,0))
tags = sorted(tags)
# use clip-array method only to extract the ring
preciseRing = self.ClipArrayTagger(True)
if self.HarmonicGenerateTag:
self.ArrayTagger()
zigZagRing = zigZagRingExtractor(self.Surface,self.ArrayName,12345,[-math.inf, self.Value])
surfaceDistance = vmtkscripts.vmtkSurfaceDistance()
surfaceDistance.Surface = zigZagRing
surfaceDistance.ReferenceSurface = preciseRing
surfaceDistance.DistanceVectorsArrayName = 'PreciseRingDistance'
surfaceDistance.Execute()
zigZagRing = surfaceDistance.Surface
passArray = vtk.vtkPassArrays()
passArray.SetInputData(zigZagRing)
passArray.AddPointDataArray('PreciseRingDistance')
passArray.Update()
zigZagRing = passArray.GetOutput()
zigZagRing = self.CleanPreciseRingDistance(zigZagRing)
writer = vtk.vtkXMLPolyDataWriter()
writer.SetInputData(zigZagRing)
writer.SetFileName('zigZagRing.vtp')
writer.SetDataModeToBinary()
writer.Write()
surfaceDistance2 = vmtkscripts.vmtkSurfaceDistance()
surfaceDistance2.Surface = self.Surface
surfaceDistance2.ReferenceSurface = zigZagRing
surfaceDistance2.DistanceArrayName = 'ZigZagRingDistance'
surfaceDistance2.Execute()
self.Surface = surfaceDistance2.Surface
print('OK!')
homogeneousBoundaries = zigZagRingExtractor(self.Surface,'ZigZagRingDistance',2435,[-math.inf,self.HarmonicRadius])
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(self.Surface)
pointLocator.BuildLocator()
for k in range(3):
print("Harmonic extension of component ",k)
boundaryIds = vtk.vtkIdList()
temperature = vtk.vtkDoubleArray()
temperature.SetNumberOfComponents(1)
for i in range(homogeneousBoundaries.GetNumberOfPoints()):
idb = pointLocator.FindClosestPoint(homogeneousBoundaries.GetPoint(i))
boundaryIds.InsertNextId(idb)
temperature.InsertNextTuple1(0.0)
warpArray = zigZagRing.GetPointData().GetArray('PreciseRingDistance')
for i in range(zigZagRing.GetNumberOfPoints()):
idb = pointLocator.FindClosestPoint(zigZagRing.GetPoint(i))
boundaryIds.InsertNextId(idb)
#temperature.InsertNextTuple1(1.0)
temperature.InsertNextTuple1(warpArray.GetComponent(i,k))
# perform harmonic mapping using temperature as boundary condition
harmonicMappingFilter = vtkvmtk.vtkvmtkPolyDataHarmonicMappingFilter()
harmonicMappingFilter.SetInputData(self.Surface)
harmonicMappingFilter.SetHarmonicMappingArrayName('WarpVector'+str(k))
harmonicMappingFilter.SetBoundaryPointIds(boundaryIds)
harmonicMappingFilter.SetBoundaryValues(temperature)
harmonicMappingFilter.SetAssemblyModeToFiniteElements()
harmonicMappingFilter.Update()
self.Surface = harmonicMappingFilter.GetOutput()
warpVector = vtk.vtkDoubleArray()
warpVector.SetNumberOfComponents(3)
warpVector.SetNumberOfTuples(self.Surface.GetNumberOfPoints())
warpVector.SetName('WarpVector')
warpVectorX = self.Surface.GetPointData().GetArray('WarpVector0')
warpVectorY = self.Surface.GetPointData().GetArray('WarpVector1')
warpVectorZ = self.Surface.GetPointData().GetArray('WarpVector2')
for i in range(self.Surface.GetNumberOfPoints()):
warpVector.SetComponent(i,0,warpVectorX.GetComponent(i,0))
warpVector.SetComponent(i,1,warpVectorY.GetComponent(i,0))
warpVector.SetComponent(i,2,warpVectorZ.GetComponent(i,0))
self.Surface.GetPointData().AddArray(warpVector)
warper = vtk.vtkWarpVector()
warper.SetInputData(self.Surface)
warper.SetInputArrayToProcess(0,0,0,0,'WarpVector')
warper.SetScaleFactor(1.)
warper.Update()
self.Surface = warper.GetOutput()
def ConnectivityTagger(self):
self.CleanSurface()
tags = set()
for i in range(self.Surface.GetNumberOfCells()):
tags.add(self.CellEntityIdsArray.GetComponent(i,0))
tags = sorted(tags)
if self.PrintTags:
self.PrintLog('Initial tags: '+str(tags))
surface = []
mergeTags = vtk.vtkAppendPolyData()
for k, item in enumerate(tags):
th = vtk.vtkThreshold()
th.SetInputData(self.Surface)
th.SetInputArrayToProcess(0, 0, 0, 1, self.CellEntityIdsArrayName)
th.ThresholdBetween(item-0.001,item+0.001)
th.Update()
gf = vtk.vtkGeometryFilter()
gf.SetInputConnection(th.GetOutputPort())
gf.Update()
surface.append(gf.GetOutput())
connectivityFilter = vtk.vtkConnectivityFilter()
connectivityFilter.SetInputData(surface[k])
connectivityFilter.SetExtractionModeToAllRegions()
connectivityFilter.ColorRegionsOn()
connectivityFilter.Update()
surface[k] = connectivityFilter.GetOutput()
cellEntityIdsArray = surface[k].GetCellData().GetArray(self.CellEntityIdsArrayName)
regionIdArray = surface[k].GetCellData().GetArray('RegionId')
for i in range(surface[k].GetNumberOfCells()):
tag = cellEntityIdsArray.GetComponent(i,0) +regionIdArray.GetComponent(i,0)*self.ConnectivityOffset
cellEntityIdsArray.SetComponent(i,0,tag)
mergeTags.AddInputData(surface[k])
mergeTags.Update()
self.Surface = mergeTags.GetOutput()
self.CellEntityIdsArray = self.Surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
def DrawingTagger(self):
from vmtk import vmtkscripts
drawer = vmtkscripts.vmtkSurfaceRegionDrawing()
drawer.Surface = self.Surface
drawer.InsideValue = self.InsideTag
drawer.OutsideValue = self.OutsideTag
drawer.OverwriteOutsideValue = self.OverwriteOutsideTag
drawer.ArrayName = self.CellEntityIdsArrayName
drawer.TagSmallestRegion = self.TagSmallestRegion
drawer.CellData = 1
drawer.ComputeDisance = 0
drawer.Execute()
self.Surface = drawer.Surface
def Execute(self):
if self.Surface == None:
self.PrintError('Error: no Surface.')
self.CellEntityIdsArray = self.Surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
# initialize the CellEntityIdsArray with OutsideTag in some cases
if self.CellEntityIdsArray == None or (self.OverwriteOutsideTag and self.Method != "connectivity"):
self.CellEntityIdsArray = vtk.vtkIntArray()
self.CellEntityIdsArray.SetName(self.CellEntityIdsArrayName)
self.CellEntityIdsArray.SetNumberOfComponents(1)
self.CellEntityIdsArray.SetNumberOfTuples(self.Surface.GetNumberOfCells())
self.Surface.GetCellData().AddArray(self.CellEntityIdsArray)
self.CellEntityIdsArray.FillComponent(0,self.OutsideTag)
if self.Method in ['array','harmonic']: # to be extended also to other method ['cliparray','array','harmonic']:
if self.Value == None and self.Range == None:
self.PrintError("This method need the definition of a value or a range")
elif self.Range == None:
if self.InsideOut:
self.Range = [self.Value, math.inf]
else:
self.Range = [-math.inf, self.Value]
# print("range: ",self.Range)
if self.Method == 'cliparray':
self.ClipArrayTagger()
elif self.Method == 'array':
self.ArrayTagger()
elif self.Method == 'harmonic':
self.HarmonicTagger()
elif self.Method == 'connectivity':
self.ConnectivityTagger()
elif self.Method == 'constant':
self.CellEntityIdsArray.FillComponent(0,self.InsideTag)
elif self.Method == 'drawing':
self.DrawingTagger()
else:
self.PrintError("Method unknown (available: cliparray, array, connectivity, constant, drawing)")
if self.CleanOutput:
self.CleanSurface()
if self.PrintTags:
self.CellEntityIdsArray = self.Surface.GetCellData().GetArray(self.CellEntityIdsArrayName)
self.Tags = set()
for i in range(self.Surface.GetNumberOfCells()):
self.Tags.add(self.CellEntityIdsArray.GetComponent(i,0))
self.Tags = sorted(self.Tags)
self.PrintLog('Tags of the output surface: '+str(self.Tags))
# useless, already triangulated
# if self.Triangulate:
# triangleFilter = vtk.vtkTriangleFilter()
# triangleFilter.SetInputData(self.Surface)
# triangleFilter.Update()
# self.Surface = triangleFilter.GetOutput()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 2.1875 | 2 |
KalmanMachine/Kalman4LogisticReg.py | marc-h-lambert/L-RVGA | 0 | 12788569 | <filename>KalmanMachine/Kalman4LogisticReg.py
###################################################################################
# THE KALMAN MACHINE LIBRARY #
# Code supported by <NAME> #
###################################################################################
# Online second order method for logistic regression : #
# The extended Kalman filter = online natural gradient #
# --> see "Online natural gradient as a Kalman filter, <NAME> 2018" #
# The quadratic Kalman filter= online version of the bounded variational approach #
# --> see "A variational approach to Bayesian logistic regression models \ #
# and their extensions, Jaakkola and Jordan 1997" #
# The recursive VGA implicit #
# The recursive VGA explicit #
# --> see "The recursive variational Gaussian approximation (R-VGA), #
# <NAME>, <NAME> and <NAME> 2020" #
# The recursive VGA explicit with extragrad #
# --> see "The limited memory recursive variational Gaussian approximation(L-RVGA)#
# <NAME>, <NAME> and <NAME> 2021" #
###################################################################################
from .KUtils import sigmoid, sigp, sigpp, graphix, negbayesianlogisticPdf
import numpy as np
import numpy.random
import numpy.linalg as LA
from .KBayesianReg import BayesianRegression, OnlineBayesianRegression
import math
from math import log, exp
from scipy import optimize
class LogisticPredictor(object):
def __init__(self):
super().__init__()
def predict(self,X):
return np.multiply(self.predict_proba(X)>0.5,1)
#prediction of N outputs for inputs X=(N,d)
#use approximation of the integration over a Gaussian
def predict_proba(self,X):
N,d=X.shape
beta=math.sqrt(8/math.pi)
vec_nu=np.diag(X.dot(self.Cov).dot(X.T))
k=beta/np.sqrt(vec_nu+beta**2).reshape(N,1)
return sigmoid(k*X.dot(self._theta))
def plotPredictionMap(self,ax,size=6):
N=100
x=np.zeros([2,1])
theta1=np.linspace(-size/2,size/2,N)
theta2=np.linspace(-size/2,size/2,N)
probaOutput=np.zeros((N,N))
xv,yv=np.meshgrid(theta1,theta2)
for i in np.arange(0,N):
for j in np.arange(0,N):
x[0]=xv[i,j]
x[1]=yv[i,j]
probaOutput[i,j]=self.predict_proba(x.T)
contr=ax.contourf(xv,yv,probaOutput,20,zorder=1,cmap='jet')
ax.set_xlim(-size/2, size/2)
ax.set_ylim(-size/2, size/2)
return contr
import time, tracemalloc
# Batch Laplace version of Bayesian Logistic Regression (with a Gaussian model)
class LaplaceLogisticRegression(BayesianRegression, LogisticPredictor):
# !!! implement only for theta0=0 and Cov0=sigma0^2 I,
# otherwise using sikit.logreg method produce biased maximum posterior
def __init__(self, theta0, Cov0):
super().__init__(theta0)
self._Cov0=Cov0 # the initial covariance (ie uncertainty on the initial guess)
self._Cov=np.copy(self._Cov0)
def fit(self,X,y):
N,d=X.shape
(sign, logdetCov) = LA.slogdet(self._Cov0)
logdetCov0=logdetCov
invCov0=LA.inv(self._Cov0)
tracemalloc.start()
tic=time.perf_counter()
sol=optimize.minimize(negbayesianlogisticPdf, self._theta0, args=(self._theta0,invCov0,logdetCov0,X,y.reshape(N,),1,),method='L-BFGS-B')
self._theta=sol.x
toc=time.perf_counter()
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
memoryUsedLAP=current/10**6
timeCostLAP=toc-tic
print('Compute MAP with sikkit learn/LBFGS... in {0:.2} s'.format(timeCostLAP))
print('Memory cost for MAP with sikkit learn/LBFGS... is {0:.2} MB'.format(memoryUsedLAP))
tracemalloc.start()
tic=time.perf_counter()
# the Hessian
L=sigmoid(X.dot(self._theta))
K=(L*(1-L)).reshape(N,1,1)
# Tensor version
#A=X[...,None]*X[:,None]
#H=np.sum(K*A,axis=0)+LA.inv(self._Cov0)
# Memory free version
H=invCov0
for i in range(0,N):
xt=X[i,:].reshape(d,1)
H=H+K[i]*xt.dot(xt.T)
self._Cov=LA.inv(H)
toc=time.perf_counter()
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
self.memoryUsedLAP=current/10**6
self.timeCostLAP=toc-tic
print('Compute LAP with inversion of Hessian... in {0:.2} s'.format(self.timeCostLAP))
print('Memory cost for LAP ... is {0:.2} MB'.format(self.memoryUsedLAP))
return self
def plotEllipsoid(self,ax,nbLevels=1,u=0,v=1,labelize=True):
d=self._theta.shape[0]
thetaproj,Covproj=graphix.projEllipsoid(self._theta,self._Cov.reshape(d,d),u,v)
if labelize:
graphix.plot_ellipsoid2d(ax,thetaproj,Covproj,col='r',linewidth=1.2,zorder=3,linestyle='-',label='Laplace')
else:
graphix.plot_ellipsoid2d(ax,thetaproj,Covproj,col='r',linewidth=1.2,zorder=3,linestyle='-')
ax.scatter(self._theta[0],self._theta[1],color='r')
@property
def Cov(self):
return self._Cov
@property
def maxP(self):
return self._theta
# The natural gradient or EKF
class EKFLogReg(OnlineBayesianRegression, LogisticPredictor):
def __init__(self, theta0, Cov0, sigma=1, passNumber=1):
super().__init__(theta0, Cov0, sigma, passNumber)
def update(self,xt,yt):
# intermediate variables
nu=xt.T.dot(self._Cov.dot(xt))
Pu=self._Cov.dot(xt)
a=xt.T.dot(self._theta)
m=sigp(a)
m=max(m,1e-100)
# update state
self._Cov=self._Cov-np.outer(Pu,Pu)/(1/m+nu)
self._theta=self._theta+self._Cov.dot(xt)*(yt-sigmoid(a))
# equivalent formulas using the Kalman form
# def update(self,xt,yt):
# # compute R
# mu=sigmoid(xt.T.dot(self._theta))
# R=max(mu*(1-mu),1e-100)
# H=R*xt.T
# # prediction error
# err=yt-mu
# # computation of optimal gain
# S=R+H.dot(self._Cov).dot(H.T)
# K=self._Cov.dot(H.T).dot(LA.inv(S))
# # update state and covariance of state
# self._theta=self._theta+K.dot(err)
# self._Cov=self._Cov-K.dot(H).dot(self._Cov)
# The local variational Kalman or quadratic Kalman
class QKFLogReg(OnlineBayesianRegression,LogisticPredictor):
def __init__(self, theta0, Cov0, sigma=1, passNumber=1):
super().__init__(theta0, Cov0, sigma, passNumber)
@staticmethod
def eta(x):
return -1/(2*x)*(sigmoid(x)-0.5)
def update(self,xt,yt):
# compute matrix R
ksi=math.sqrt(xt.T.dot(self._Cov+np.outer(self._theta,self._theta)).dot(xt))
invR=np.ones([1,1])*(-2*QKFLogReg.eta(ksi))
R=LA.inv(invR)
# compute gain K
H=xt.T
S=R+H.dot(self._Cov).dot(H.T)
K=self._Cov.dot(H.T).dot(LA.inv(S))
#update theta
self._theta=self._theta+K.dot(R.dot(yt-0.5)-H.dot(self._theta))
#update Cov
self._Cov=self._Cov-K.dot(H).dot(self._Cov)
# equivalent formulas from Jordan paper:
# def update(self,xt,yt):
# ksi=math.sqrt(xt.T.dot(self._Cov+np.outer(self._theta,self._theta)).dot(xt))
# print(ksi)
# P=LA.inv(LA.inv(self._Cov)-2*QKFLogReg2.eta(ksi)*np.outer(xt,xt))
# self._theta=P.dot(LA.inv(self._Cov).dot(self._theta)+(yt-0.5)*xt)
# self._Cov=P
# The implicit RVGA (with a Newton solver)
class RVGALogReg(OnlineBayesianRegression, LogisticPredictor):
def __init__(self, theta0, Cov0, sigma=1,passNumber=1):
super().__init__(theta0, Cov0, sigma, passNumber)
beta=math.sqrt(8/math.pi)
def fun2D(x,alpha0,nu0,y):
k=RVGALogReg.beta/math.sqrt(exp(x[1])+RVGALogReg.beta**2)
f=x[0]+nu0*sigmoid(x[0]*k)-alpha0-nu0*y
g=exp(x[1])-nu0/(1+nu0*k*sigp(x[0]*k))
return [f,g]
def jac(x,alpha0,nu0,y):
k=RVGALogReg.beta/math.sqrt(exp(x[1])+RVGALogReg.beta**2)
kp=-0.5*RVGALogReg.beta*exp(x[1])/((exp(x[1])+RVGALogReg.beta**2)**(3/2))
f_a=1+nu0*k*sigp(x[0]*k)
f_gamma=nu0*x[0]*kp*sigp(x[0]*k)
g_a=nu0**2*k**2*sigpp(x[0]*k)/((1+nu0*k*sigp(x[0]*k))**2)
g_gamma=exp(x[1])+nu0**2*kp*(sigp(x[0]*k)+k*x[0]*sigpp(x[0]*k))/((1+nu0*k*sigp(x[0]*k))**2)
return np.array([[f_a,f_gamma],[g_a,g_gamma]])
def optim2D(alpha0,nu0,y):
alphaMin=alpha0+nu0*y-nu0
alphaMax=alpha0+nu0*y
nuMin=nu0*(1-nu0/(4+nu0))
nuMax=nu0
a=(alphaMin+alphaMax)/2
gamma=log((nuMin+nuMax)/2)
#a,nu=alpha0,nu0
sol=optimize.root(RVGALogReg.fun2D, [a,gamma], tol=1e-6, args=(alpha0,nu0,y,),jac=RVGALogReg.jac,method='hybr')
return sol.x[0],exp(sol.x[1]) ,sol.nfev
def update(self,xt,yt):
# init parameters
nu0=xt.T.dot(self._Cov.dot(xt))
alpha0=xt.T.dot(self._theta)
a,nu,nbInnerLoop=RVGALogReg.optim2D(np.asscalar(alpha0), np.asscalar(nu0), np.asscalar(yt))
#updates
k=RVGALogReg.beta/math.sqrt(nu+RVGALogReg.beta**2)
self._theta=self._theta+self._Cov.dot(xt)*(yt-sigmoid(k*a))
s=1/(nu0+1/(k*sigp(k*a)))
self._Cov=self._Cov-s*np.outer(self._Cov.dot(xt),self._Cov.dot(xt))
# The explicit RVGA (without Newton solver)
class RVGALogRegExplicit(OnlineBayesianRegression, LogisticPredictor):
def __init__(self, theta0, Cov0, sigma=1, passNumber=1):
super().__init__(theta0, Cov0, sigma, passNumber)
def update(self,xt,yt):
beta=RVGALogReg.beta # beta = math.sqrt(8/math.pi)
# intermediate variables
nu=xt.T.dot(self._Cov.dot(xt))
# compute sigma(a)
k=beta/math.sqrt(nu+beta**2)
a=xt.T.dot(self._theta)
m=k*sigp(k*a)
m=max(m,1e-100)
# update covariance
Pu=self._Cov.dot(xt)
self._Cov=self._Cov-np.outer(Pu,Pu)/(1/m+nu)
self._theta=self._theta+self._Cov.dot(xt)*(yt-sigmoid(k*a))
# update state
# The explicit RVGA with one extragrad (akka Mirror prox)
# the mean is always updated two times, the covariance is updated two times
# if updateCovTwoTimes=True
class RVGALogRegIterated(OnlineBayesianRegression, LogisticPredictor):
def __init__(self, theta0, Cov0, sigma=1, passNumber=1,updateCovTwoTimes=True):
super().__init__(theta0, Cov0, sigma, passNumber)
self.updateCovTwoTimes=updateCovTwoTimes
@staticmethod
def updateExpectationParameters(xt,theta,Cov):
beta=RVGALogReg.beta
nu=xt.T.dot(Cov.dot(xt))
k=beta/math.sqrt(nu+beta**2)
a=xt.T.dot(theta)
return k,a,nu
def update(self,xt,yt):
Cov=self._Cov
theta=self._theta
nuOld=xt.T.dot(self._Cov.dot(xt))
PuOld=self._Cov.dot(xt)
k,a,nu=RVGALogRegIterated.updateExpectationParameters(xt,theta,Cov)
# update covariance: may be transfer in the loop but pose problem on the
# condition number
m=k*sigp(k*a)
if m<1e-100:
print(m)
m=max(m,1e-100)
# update state
Cov=self._Cov-np.outer(PuOld,PuOld)/(1/m+nuOld)
theta=self._theta+Cov.dot(xt)*(yt-sigmoid(k*a))
k,a,nu=RVGALogRegIterated.updateExpectationParameters(xt,theta,Cov)
m=k*sigp(k*a)
if m<1e-100:
m=max(m,1e-100)
if self.updateCovTwoTimes:
Cov=self._Cov-np.outer(PuOld,PuOld)/(1/m+nuOld)
theta=self._theta+Cov.dot(xt)*(yt-sigmoid(k*a))
self._Cov=Cov
self._theta=theta
| 1.890625 | 2 |
day15_p1.py | venomousmoog/adventofcode2021 | 0 | 12788570 | <filename>day15_p1.py
import heapq
import sys
def compute(data):
map = [[int(e) for e in l] for l in data.split('\n')]
w = len(map)
h = len(map[0])
end = (w-1, h-1)
# pq format is [cost, length, counter, position]
q = [[0, 0, 0, (0,0)]]
counter = 1
visited = set([(0,0)])
while len(q) > 0:
cost, length, _, p = heapq.heappop(q)
# print(f'{cost}, {length}, {_}, {p}')
if p == end:
print(cost)
return
# add neighbors if they aren't in the path already:
neighbors = [(p[0], p[1]+1), (p[0], p[1]-1), (p[0]+1, p[1]), (p[0]-1, p[1])]
for n in neighbors:
if not n in visited and not (n[0] < 0 or n[0] >= w or n[1] < 0 or n[1] >= h):
# print(f'n = ({n[0]}, {n[1]})')
heapq.heappush(q, [cost + map[n[0]][n[1]], length+1, counter, n])
counter = counter + 1
visited.add(n)
test_data = """1163751742
1381373672
2136511328
3694931569
7463417111
1319128137
1359912421
3125421639
1293138521
2311944581"""
data="""4395194575929822238989941988598994946581953236424841813955769288219282998336161199274582725132193662
1715719228759138638397684864996679719167258159599658826174926447916886499139963731181569842792979836
5641957947399279962631596818774779918898868615738299911674929785663922472189972649893918935926989965
2585497851318911812329521892518876883669883794214786673934129871993567882759933254875129861732949942
6326121293629918828545199576444799485199997872984968987116189717399321966789925619859799917896971919
1197891193163157699998465137799499428622885997715629793728799611991922169771186415959393796833328797
9971941176236517711968176833694122359799994332972933319744917758263654615939671218999722759897946883
8999294422711677672138234871949951867361994331595339178487499997983949131794946775549222972399289896
3349291493887252933563637858998292429461181979119894978282958316785791165839546981395193797183659157
7189439835616936936452916284895714389978973983979946899115361688238479929831181859649911588342717174
9919687931991844963923118455824186376858799159986659568599889942949751127924998243532126861192227813
8556752119948211196297781958736978151712384292658126551391659754132796214991112228251319499881769877
1979947819698244184997611639891169294112917898297946132711657818979459522579998285949272941189651219
8526183359487314556663171586821716949298464911898919115161199393481771956882329857552411954641939441
1781919896684119688349391339192196799588645991242746996794955718159288456491121922268271789679196351
1197567977371916661139687299725917634957272938365923151889699976438687129398228799597921729929992714
4612932372899778339369954694952999219672973397661799332114944198582981641791452579999989699742756964
6999162799681597281641651777891979395397835152949992426916756338121612297639319849519239816897924832
2929523873329969919578959424699989299758674993329912629488597872529465316591581194999996221889681928
8228656689838938962979299798868965176111786181396381619674278311396838891887432267914997982584599249
3291189727977984366996998628998999313854947791298837533988923314799893553121114286295795551978824232
1969899739799949932911885277583879342673999999696225694755379718993111283179891261733382476218558941
4139678172235724118199726211119671191998665963518891164122317193835651793851195981494569791148964119
8491859444877458918921985959989568279219722914284989216186773895898218317795329987999133881714858193
2771894559749588998196893858679239272818243739928684589924781981773791798991892831999869587961311167
8698775892993989977451194464865721268484216875311866222111987933418291995941958137194897996649415669
9815831789813838789211849713941639192861814785979938811825669343313889446468439829866799197356199957
8391217969784868593822994733189558658223987532137877169778153978669662784849729448991221591737912928
1286923398138799667979629228969771916586553188988688124597199878412159468838995398755911831124947458
1114829212648462813962865127721383493959596596219921932831948169799141594693396881528179473168999246
8229274989319485131759119628887993878981894123939971871579472951958128273735938172877719459979616966
9499516139937388199431887411757519697113346973918231323926539878654177977781846978129796428916386862
4976998292388572314456265734789687366686748398788899297684913137779698579739992995231592719964862978
2298177775583527949898397959425767832825991552239939999392925878819892226485971811999876289184259875
3598947472448691635139959991352673979496318718231961399989962929352899269143994934946299795998128783
7112174898192377964819299748639161162263488897467958394886818188981289197818113459389985525596199819
9987787819769819287382267885519776534891123996871992292115788186562861498191537481789591841846186882
4932928968769839163293479931958989358947715254682158299764179952675896919281473293998826971956719781
9992216325832365211598714373826949876761198899344963795899592692291996816722739297999357184959969777
8299492186897712559157176181191385891631253997468944991976578985988414979191999779899999718894199893
9599634119932298479281989351167939998231691111219295175188529818284912884666292812879581588751975666
9893411889151989895121929229639869648431989868714855378743883971235628399646641369715828182779286514
8939515728674611191619883917891998789585382765378965697452669997798898499199181191539751395986113886
9985666981399318453128358762184119989474811319212718761249919671981988848392275694674398146181416198
8193892754125669889688695711759861961188261942984936153189888489199611811681269985959959968811384341
8299917437379188117917628771687562449889987984991225519931596159725913681979787939969976651741749698
8285297977988918992157859998969934917144295338397889967636236876746919995248975499929976279814914931
9129499129593186731916693856178488768855292651679735946159317969489119952269991769981967997489693511
7186637914671953873792124571983949693887419197239989992917386832793799278886872648567913786197639677
9667487981719871696217751899991188952177479933799965849967487849683858162517919957637748993181344613
4798696188279263112441176132921127173319858683256972916675925279917946959468161918992879929186429996
1875628214911419676889344457796569819463131975985822779517679476958819822619947289779578492991712138
9729987223699868162597477587276969892989969399943929635225993221991199187491492967948629623789668744
9916111911755127191948896779129983784992661981998299971599989198761289123181443629292169995715974949
1291917878889332658369889135368111113759781552671763529821597651871398558593277892399994199844188899
8857659891128611558849143993177991511814858869793389498556946631129988799922937638978992359857839357
9498927473319717382175369484488993971151116118963956979372884748999382475159478948297342397888172486
8838311993693897959989699851182958819758515849648216159982472145775919486496793748386329827875114795
2646323635456985931835993825299725819616464197571881778646919914938572377378887919941193912366189981
4192777568853894179559191115999836896832987112829422358328999616261299876112799681459689496973841216
4182522211326928934549829947681273461197958221628297222967289879895821718559188491763567998193425175
9941452899981779489899453416943116824565113896695891691999972969869313999989231351588275691638731173
4279199328962391513117193251941896142249199541185874189116658753849872967489875833815972485753775799
2692166517168484891439119972374885161996829937192277189246449949998287236914964598317589854294191818
9691899795175315938351268169518149836746199789392955191246577759192964575629739479919199793594669346
4928854341918568913973278223498427458884195453535861967991191927617293996995778496126372492999583688
1869735122593925853281477159999674511464912381748647211595414615939131393112325927225913832758726989
8673156999937894893938221988172251959669284384873549417558434149199712793826799147878341238872169799
3786244397665631994388328169996499172551393184212436839161983999116249888235795513969896377319987648
3299841254618376148274689819516491144697712179196959898422484829331951929556251191292922792943475111
8979685787892899891511168966657683893794989939899716589793811126757987574288699799754629917678927199
5291969299298572611431791117595444556315389932888323856938713114932929293397712995429591559999339269
5571919139695338299758473188558593632658195263118579497481394132986981877919799978847911917349971951
9447171299311825169951839995999391971743173914736767913282767941796338297879485196369168312316114619
8965937476731877998867589955999781971348978541526728915989299973637949395212351839999632681871119852
7154818129168364927991293583982982964961899491189199159915998987259614876919693993277919976424821339
5863889995599382179127198593932798869731787297368813494929776247986339942885147596969931861841289387
8291299218914728523912933158961272669279911995972979992183926289243118311811961558131192613197929186
8961913468988371188879695178682994979962878269287243998918854923819591848296141587688276923892888765
5496139281898589971291971391186884911787687869879279111147189911627194976288937484497924811159764722
9875395289318577859812192389977827241588661341589921792229379945659193616899945199691585158611997752
9897872534836989139768375684846127999667957297219249999952812149314798361868111441889987449192925623
9138263544984999193841859937147586599945426897796171621827994461911885818295944939999799882269659399
6914856489683269528879792799877759191976961973971938559173939387419513299922889959976555221521424697
1715191986151162997669282366919988483318991958172785998996643612786988727959277619145619891677993797
9942283447167676428491385853611565159691399722133269739927118911631657887991431834279716429273499866
4799972923699768188516165542947747617263791192642512198299894199889995551779128118616736724861699788
4958919625759529361425491889628879199991912868912939317496198948944989139197771291619455967185949537
2223977898998997958945226793679996855922957491361985987152262111657894546195945683493497719917994917
2169115388563391991925213886793982979499187969173993849967129291479453796965869487632218484794186428
8181979148789991463199414217818677797879614992639757619958818656897787992873919194129981917198598753
7298898662149861558411999137899133141939277162229327197891618169842134976193137994322715445297289799
3295636966178697931489122998118891198718116639897914658911799996899497168942241323161993721164174647
1198816571471558697411816978791898982893995992519493175459877178916533784561911924112689959897991892
5618962846185938989183714393155171999657459292691693826899689462277599981997875426218165946485993981
9347291669523992969899819288568678793199991148799181869131292289712895996712494789999957779656718174
1211541591292966678912459115993944851245448935997663676619889855226196181522818261837465192373166996
1797339189173753393939899772787971418172195729879466938989599928482964996915184691991154998224136189
3421551897114792849579376681776925441448992599913934985689149424792976946888488111919491919499858968
1971473988843177925911719936118862197889673997928889139314775919195258317837961344298715732432565457"""
compute(data) | 3.0625 | 3 |
bp/common/maths/Maths.py | JAlvarezJarreta/pecan | 5 | 12788571 | <gh_stars>1-10
#!/usr/bin/env python
#Copyright (C) 2006-2011 by <NAME> (<EMAIL>)
#
#Released under the MIT license, see LICENSE.txt
#!/usr/bin/env python
import sys
import os
import re
import math
NEG_INFINITY = -1e30000
def exp(x):
#return math.exp(x)
if x > -2:
if x > -0.5:
if x > 0:
return math.exp(x)
return ((((0.03254409303190190000 * x + 0.16280432765779600000)\
* x + 0.49929760485974900000)\
* x + 0.99995149601363700000)\
* x + 0.99999925508501600000)
if x > -1:
return ((((0.01973899026052090000 * x + 0.13822379685007000000)\
* x + 0.48056651562365000000)\
* x + 0.99326940370383500000)\
* x + 0.99906756856399500000)
return ((((0.00940528203591384000 * x + 0.09414963667859410000)\
* x + 0.40825793595877300000)\
* x + 0.93933625499130400000)\
* x + 0.98369508190545300000)
if x > -8:
if x > -4:
return ((((0.00217245711583303000 * x + 0.03484829428350620000)\
* x + 0.22118199801337800000)\
* x + 0.67049462206469500000)\
* x + 0.83556950223398500000)
return ((((0.00012398771025456900 * x + 0.00349155785951272000)\
* x + 0.03727721426017900000)\
* x + 0.17974997741536900000)\
* x + 0.33249299994217400000)
if x > -16:
return ((((0.00000051741713416603 * x + 0.00002721456879608080)\
* x + 0.00053418601865636800)\
* x + 0.00464101989351936000)\
* x + 0.01507447981459420000)
return 0
def log(x):
return math.log(x)
def logAddQuality(x, y):
if x < y:
if x <= NEG_INFINITY:
return y
return math.log(math.exp(x - y) + 1) + y
if y <= NEG_INFINITY:
return x
return math.log(math.exp(y - x) + 1) + x
__LOG_UNDERFLOW_THRESHOLD = 7.5
__LOG_ZERO = -2e20
# three decimal places
def logAdd(x, y):
"""
if x < y:
if x <= __LOG_ZERO or y - x >= __LOG_UNDERFLOW_THRESHOLD:
return y
return lookup(y - x) + x
if y <= __LOG_ZERO or x - y >= __LOG_UNDERFLOW_THRESHOLD:
return x
return lookup(x - y) + y;
"""
return logAddQuality(x, y)
def lookup(x):
#return (float)Math.log (Math.exp(x) + 1);
if x <= 2.50:
if x <= 1.00:
return ((-0.009350833524763 * x + 0.130659527668286)\
* x + 0.498799810682272)\
* x + 0.693203116424741
return ((-0.014532321752540 * x + 0.139942324101744)\
* x + 0.495635523139337)\
* x + 0.692140569840976
if x <= 4.50:
return ((-0.004605031767994 * x + 0.063427417320019)\
* x + 0.695956496475118)\
* x + 0.514272634594009
return ((-0.000458661602210 * x + 0.009695946122598) * x + 0.930734667215156)\
* x + 0.168037164329057
def main():
pass
def _test():
import doctest
return doctest.testmod()
if __name__ == '__main__':
_test()
main() | 2.546875 | 3 |
gehomesdk/erd/converters/laundry/tank_status_converter.py | bendavis/gehome | 0 | 12788572 | import logging
from gehomesdk.erd.converters.abstract import ErdReadOnlyConverter
from gehomesdk.erd.converters.primitives import *
from gehomesdk.erd.values.laundry import ErdTankStatus, TankStatus, TANK_STATUS_MAP
_LOGGER = logging.getLogger(__name__)
class TankStatusConverter(ErdReadOnlyConverter[TankStatus]):
def erd_decode(self, value: str) -> TankStatus:
try:
om = ErdTankStatus(erd_decode_int(value))
return TANK_STATUS_MAP[om].value
except (KeyError, ValueError):
return ErdTankStatus.NA
| 2.171875 | 2 |
pdc/apps/common/serializers.py | hluk/product-definition-center | 18 | 12788573 | <filename>pdc/apps/common/serializers.py
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from rest_framework import serializers
from contrib.drf_introspection.serializers import StrictSerializerMixin
from .models import Label, Arch, SigKey
class LabelSerializer(StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
"""
Label Serializer
"""
class Meta:
model = Label
fields = ('url', 'name', 'description')
class ArchSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = Arch
fields = ('name',)
class SigKeySerializer(StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
name = serializers.CharField(default=None)
description = serializers.CharField(required=False, default="")
class Meta:
model = SigKey
fields = ('name', 'key_id', 'description')
| 2.171875 | 2 |
generator/make-permutations.py | Badisches-Landesmuseum/also-known-as | 2 | 12788574 | <gh_stars>1-10
import argparse
def main ():
parser = argparse.ArgumentParser()
parser.add_argument("inputFilePath", help="path of input file")
parser.add_argument("outputFilePath", help="path of output file")
parser.add_argument("--german", help="add -es suffix behind german base adjectives", action="store_true")
args = parser.parse_args()
make_permutations(args.inputFilePath, args.outputFilePath, args.german)
def make_permutations(inputFilePath, outputFilePath, german=False):
with open(inputFilePath, 'r') as readFile:
lines = readFile.readlines()
lines2 = lines[:]
print("read " + str(len(lines)) + " lines from " + inputFilePath)
suffix = "es" if german else ""
permutations = [w1.strip()+suffix+'-'+w2.strip()+suffix for w1 in lines for w2 in lines2 if w1 != w2]
with open(outputFilePath, 'w') as writeFile:
for perm in permutations:
writeFile.write(perm+"\n")
print("written " + str(len(permutations)) + " permutations to " + outputFilePath)
if __name__ == '__main__':
main() | 3.5 | 4 |
web/impact/impact/v1/helpers/criterion_option_spec_helper.py | masschallenge/impact-api | 5 | 12788575 | <reponame>masschallenge/impact-api
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from accelerator.models import CriterionOptionSpec
from impact.v1.helpers.model_helper import (
ModelHelper,
OPTIONAL_FLOAT_FIELD,
OPTIONAL_INTEGER_FIELD,
PK_FIELD,
REQUIRED_INTEGER_FIELD,
REQUIRED_STRING_FIELD,
)
from impact.v1.helpers.criterion_helper import CriterionHelper
from impact.v1.helpers.validators import (
validate_string,
validate_integer,
validate_float,
)
CRITERION_OPTION_SPEC_FIELDS = {
"id": PK_FIELD,
"option": REQUIRED_STRING_FIELD,
"count": OPTIONAL_INTEGER_FIELD,
"weight": OPTIONAL_FLOAT_FIELD,
"criterion_id": REQUIRED_INTEGER_FIELD,
}
class CriterionOptionSpecHelper(ModelHelper):
'''Encapsulates business logic for CriterionOptionSpecs, including logic
around allocation, analysis, and cloning.
'''
model = CriterionOptionSpec
VALIDATORS = {
"option": validate_string,
"count": validate_integer,
"weight": validate_float,
"criterion_id": validate_integer,
}
REQUIRED_KEYS = [
"option",
"criterion_id",
]
ALL_KEYS = REQUIRED_KEYS + [
"count",
"weight",
]
INPUT_KEYS = ALL_KEYS
def __init__(self, subject, criterion_helpers=None):
super().__init__(subject)
criterion = subject.criterion
if criterion_helpers is not None:
self.criterion_helper = criterion_helpers.get(criterion.id)
else:
CriterionHelper.find_helper(criterion)
def options(self, apps):
return self.criterion_helper.options(self.subject, apps)
def app_count(self, apps, option_name):
return self.criterion_helper.app_count(apps, option_name)
@classmethod
def fields(cls):
return CRITERION_OPTION_SPEC_FIELDS
@classmethod
def clone_option_specs(cls, clones):
for original_id, copy_id in clones:
cls.clone_options(original_id, copy_id)
@classmethod
def clone_options(cls, original_id, copy_id):
option_specs = cls.model.objects.filter(criterion_id=original_id)
cls.model.objects.bulk_create([
cls.model(option=spec.option,
count=spec.count,
weight=spec.weight,
criterion_id=copy_id)
for spec in option_specs])
| 1.96875 | 2 |
rlscore/measure/fscore_measure.py | vishalbelsare/RLScore | 61 | 12788576 | <reponame>vishalbelsare/RLScore<filename>rlscore/measure/fscore_measure.py
#
# The MIT License (MIT)
#
# This file is part of RLScore
#
# Copyright (c) 2010 - 2016 <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from .measure_utilities import multitask
from rlscore.utilities import array_tools
from rlscore.measure.measure_utilities import UndefinedPerformance
def fscore_singletask(Y, P):
correct = Y
predictions = P
if not np.all((Y==1) + (Y==-1)):
raise UndefinedPerformance("fscore accepts as Y-values only 1 and -1")
assert len(correct) == len(predictions)
TP = 0
FP = 0
FN = 0
for i in range(len(correct)):
if correct[i] == 1:
if predictions[i] > 0.:
TP += 1
else:
FN += 1
elif correct[i] == -1:
if predictions[i] > 0.:
FP += 1
else:
assert False
P = float(TP)/(TP+FP)
R = float(TP)/(TP+FN)
F = 2.*(P*R)/(P+R)
return F
def fscore_multitask(Y, P):
return multitask(Y, P, fscore_singletask)
def fscore(Y, P):
"""F1-Score.
A performance measure for binary classification problems.
F1 = 2*(Precision*Recall)/(Precision+Recall)
If 2-dimensional arrays are supplied as arguments, then macro-averaged
F-score is computed over the columns.
Parameters
----------
Y : {array-like}, shape = [n_samples] or [n_samples, n_labels]
Correct labels, must belong to set {-1,1}
P : {array-like}, shape = [n_samples] or [n_samples, n_labels]
Predicted labels, can be any real numbers. P[i]>0 is treated
as a positive, and P[i]<=0 as a negative class prediction.
Returns
-------
fscore : float
number between 0 and 1
"""
Y = array_tools.as_2d_array(Y)
P = array_tools.as_2d_array(P)
if not Y.shape == P.shape:
raise UndefinedPerformance("Y and P must be of same shape")
return np.mean(fscore_multitask(Y,P))
fscore.iserror = False
| 1.617188 | 2 |
cve-manager/cve_manager/tests/test_function/test_cache.py | seandong37tt4qu/jeszhengq | 0 | 12788577 | #!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN 'AS IS' BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Time:
Author:
Description:
"""
import unittest
from cve_manager.function.cache import LRUCache
class TestCache(unittest.TestCase):
def setUp(self):
self.cache = LRUCache(2)
def test_common(self):
self.cache.put('a', [1])
self.cache.put('b', 2)
self.cache.put('a', 3)
self.assertEqual(len(self.cache.queue), 2)
res = self.cache.get('a')
self.assertEqual(res, 3)
| 2.265625 | 2 |
src/app.py | tdmalone/bitbucket-approvals-to-jira | 0 | 12788578 | <filename>src/app.py<gh_stars>0
# get PR details from incoming approve webhook
# quit if all approvers have not approved
# look for ticket ID in branch name
# look up ticket for any other unapproved PRs (if not possible thru Jira API, use bitbucket API - will have to spread across every repo in the team, though!) - quit if any
# make a request to an AfJ webhook to transition ticket from Code Review to Code Review Passed
# bonus: work out which jira user should be the actor of the transition change (if that's possible) OR just set a custom field to the user - if we can find them based on the bitbucket user, that is
from flask import Flask, request
from os import getenv
app = Flask(__name__)
@app.route('/', methods=['POST'])
def handle_webhook():
payload = request.get_json()
return 'Hello, world! I don\'t do a lot just yet.'
| 2.203125 | 2 |
unit_testing/test_transect_methods.py | British-Oceanographic-Data-Centre/NEMO-ENTRUST | 0 | 12788579 | """
"""
# IMPORT modules. Must have unittest, and probably coast.
import coast
from coast import general_utils
import unittest
import numpy as np
import os.path as path
import xarray as xr
import matplotlib.pyplot as plt
import unit_test_files as files
class test_transect_methods(unittest.TestCase):
def test_determine_extract_transect_indices(self):
nemo_t = coast.Gridded(files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid)
yt, xt, length_of_line = nemo_t.transect_indices([51, -5], [49, -9])
# Test transect indices
yt_ref = [
164,
163,
162,
162,
161,
160,
159,
158,
157,
156,
156,
155,
154,
153,
152,
152,
151,
150,
149,
148,
147,
146,
146,
145,
144,
143,
142,
142,
141,
140,
139,
138,
137,
136,
136,
135,
134,
]
xt_ref = [
134,
133,
132,
131,
130,
129,
128,
127,
126,
125,
124,
123,
122,
121,
120,
119,
118,
117,
116,
115,
114,
113,
112,
111,
110,
109,
108,
107,
106,
105,
104,
103,
102,
101,
100,
99,
98,
]
length_ref = 37
check1 = xt == xt_ref
check2 = yt == yt_ref
check3 = length_of_line == length_ref
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
self.assertTrue(check3, msg="check3")
def test_calculate_transport_velocity_and_depth(self):
with self.subTest("Calculate_transports and velocties and depth"):
nemo_t = coast.Gridded(
fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid
)
nemo_u = coast.Gridded(
fn_data=files.fn_nemo_grid_u_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_u_grid
)
nemo_v = coast.Gridded(
fn_data=files.fn_nemo_grid_v_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_v_grid
)
nemo_f = coast.Gridded(fn_domain=files.fn_nemo_dom, config=files.fn_config_f_grid)
tran_f = coast.TransectF(nemo_f, (54, -15), (56, -12))
tran_f.calc_flow_across_transect(nemo_u, nemo_v)
cksum1 = tran_f.data_cross_tran_flow.normal_velocities.sum(dim=("t_dim", "z_dim", "r_dim")).item()
cksum2 = tran_f.data_cross_tran_flow.normal_transports.sum(dim=("t_dim", "r_dim")).item()
check1 = np.isclose(cksum1, -253.6484375)
check2 = np.isclose(cksum2, -48.67562136873888)
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
with self.subTest("plot_transect_on_map"):
fig, ax = tran_f.plot_transect_on_map()
ax.set_xlim([-20, 0]) # Problem: nice to make the land appear.
ax.set_ylim([45, 65]) # But can not call plt.show() before adjustments are made...
# fig.tight_layout()
fig.savefig(files.dn_fig + "transect_map.png")
plt.close("all")
with self.subTest("plot_normal_velocity"):
plot_dict = {"fig_size": (5, 3), "title": "Normal velocities"}
fig, ax = tran_f.plot_normal_velocity(time=0, cmap="seismic", plot_info=plot_dict, smoothing_window=2)
fig.tight_layout()
fig.savefig(files.dn_fig + "transect_velocities.png")
plt.close("all")
with self.subTest("plot_depth_integrated_transport"):
plot_dict = {"fig_size": (5, 3), "title": "Transport across AB"}
fig, ax = tran_f.plot_depth_integrated_transport(time=0, plot_info=plot_dict, smoothing_window=2)
fig.tight_layout()
fig.savefig(files.dn_fig + "transect_transport.png")
plt.close("all")
def test_transect_density_and_pressure(self):
nemo_t = coast.Gridded(
fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid
)
tran_t = coast.TransectT(nemo_t, (54, -15), (56, -12))
tran_t.construct_pressure()
cksum1 = tran_t.data.density_zlevels.sum(dim=["t_dim", "r_dim", "depth_z_levels"]).compute().item()
cksum2 = tran_t.data.pressure_h_zlevels.sum(dim=["t_dim", "r_dim", "depth_z_levels"]).compute().item()
cksum3 = tran_t.data.pressure_s.sum(dim=["t_dim", "r_dim"]).compute().item()
check1 = np.isclose(cksum1, 23800545.87457855)
check2 = np.isclose(cksum2, 135536478.93335825)
check3 = np.isclose(cksum3, -285918.5625)
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
self.assertTrue(check3, msg="check3")
def test_cross_transect_geostrophic_flow(self):
nemo_f = coast.Gridded(fn_domain=files.fn_nemo_dom, config=files.fn_config_f_grid)
tran_f = coast.TransectF(nemo_f, (54, -15), (56, -12))
nemo_t = coast.Gridded(
fn_data=files.fn_nemo_grid_t_dat, fn_domain=files.fn_nemo_dom, config=files.fn_config_t_grid
)
tran_f.calc_geostrophic_flow(nemo_t, config_u=files.fn_config_u_grid, config_v=files.fn_config_v_grid)
cksum1 = tran_f.data_cross_tran_flow.normal_velocity_hpg.sum(dim=("t_dim", "depth_z_levels", "r_dim")).item()
cksum2 = tran_f.data_cross_tran_flow.normal_velocity_spg.sum(dim=("t_dim", "r_dim")).item()
cksum3 = tran_f.data_cross_tran_flow.normal_transport_hpg.sum(dim=("t_dim", "r_dim")).item()
cksum4 = tran_f.data_cross_tran_flow.normal_transport_spg.sum(dim=("t_dim", "r_dim")).item()
check1 = np.isclose(cksum1, 84.8632969783)
check2 = np.isclose(cksum2, -5.09718418121)
check3 = np.isclose(cksum3, 115.2587369660)
check4 = np.isclose(cksum4, -106.7897376093)
self.assertTrue(check1, msg="check1")
self.assertTrue(check2, msg="check2")
self.assertTrue(check3, msg="check3")
self.assertTrue(check4, msg="check4")
| 2.5 | 2 |
DataReader.py | DezhengLee/Labster | 1 | 12788580 | import numpy as np
from csv import reader
from decimal import *
def SDM(datalist):
"""
逐差法
:param datalist:
:return:
"""
length = len(datalist)
resultlist = []
halfLen = int(length/2)
for i in range(0, halfLen):
resultlist.append((Decimal(datalist[i+halfLen]) - Decimal(datalist[i])).to_eng_string())
return resultlist
class DataReader:
def __init__(self, filename):
self.filename = filename
with open(filename, 'rt', encoding='UTF-8') as raw_data:
readers = reader(raw_data, delimiter=',')
overx = list(readers)
data = np.array(overx)
self.data = data
self.resultVar = data[0][1]
self.resultUnit = data[0][3]
self.function = data[1][1]
self.P = float(data[0][5])
if data[1][3] == 'Y':
self.flag = True
elif data[1][3] == 'N':
self.flag = False
else:
raise IOError('Y or N wanted, not ' + data[1][3])
experimentdata = data[4:len(data)]
tempvarlist = []
tempunitlist = []
tempdatalist = []
tempUblist = []
tempSDMflag = []
tempUbFunclist = []
tempfunctionlist = []
for item in experimentdata:
tempvarlist.append(item[0])
tempunitlist.append(item[1])
tempUbFunclist.append(item[2])
temptempdata = []
for j in range(3, len(item)):
if j == 3:
tempUblist.append(item[j])
elif j == 4:
tempSDMflag.append(item[j])
elif j == 5:
tempfunctionlist.append(item[j])
else:
if not item[j] == '':
temptempdata.append(item[j])
tempdatalist.append(temptempdata)
self.varList = tempvarlist
self.unitList = tempunitlist
self.UbList = tempUblist
self.UbFuncList = tempUbFunclist
self.SDMflagList = tempSDMflag
self.TempFunctionList = tempfunctionlist
for i in range(0, len(tempSDMflag)):
if tempSDMflag[i] == 'Y':
tempdatalist[i] = SDM(tempdatalist[i])
self.dataList = tempdatalist
| 2.984375 | 3 |
CodeGen/hmlFhirConverterCodeGenerator/codegen/PathYamlGenerator.py | nmdp-bioinformatics/service-hmlFhirConverter | 1 | 12788581 | from swagger import RoutingSpecGenerator
import os
def replace_class_instances(className, fileContents):
fileContents = replace_class_named_instances(className, fileContents)
fileContents = str(fileContents).replace('**CLASSNAME**', className)
return replace_class_plural_instances(className, fileContents)
def replace_class_named_instances(className, fileContents):
return str(fileContents).replace('**classname**', js_camel_case_string(className))
def replace_class_plural_instances(className, fileContents):
pluralUpperClass = transform_class_name_to_plural(className)
pluralLowerClass = transform_class_name_to_plural(className.lower())
fileContents = str(fileContents).replace('**PLURAL**', pluralUpperClass)
return str(fileContents).replace('**plural**', pluralLowerClass)
def transform_class_name_to_plural(className):
if className[len(className) - 1] == "y":
return str(className)[:(len(className) - 1)] + "ies"
return className + "s"
def js_camel_case_string(string):
return str(string[0]).lower() + string[1:]
swagger_paths_spec_dir = r'/Source/API/service-hmlFhirConverter/src/main/resources/swagger/paths'
routingSpecGenerator = RoutingSpecGenerator.RoutingSpecGenerator()
modelNames = routingSpecGenerator.get_model_names()
for model in modelNames:
modelTemplate = routingSpecGenerator.get_template()
modelTemplate = replace_class_instances(model, modelTemplate)
routingSpecGenerator.write_file(os.path.join(swagger_paths_spec_dir, model + '.yaml'), modelTemplate) | 2.421875 | 2 |
tondo/tests.py | feliperuhland/tondo-api | 0 | 12788582 | # -*- coding: utf-8 -*-
import random
import unittest
import tondo
class TestSequenceFunctions(unittest.TestCase):
JSON_ITEMS = [
'content',
'contributor',
'type',
'url'
]
def setUp(self):
self.json = tondo.loadjsons()
def test_json_integrity(self):
for json_file in self.json:
for item in self.json[json_file]:
self.assertListEqual(item.keys(), self.JSON_ITEMS)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
| 2.734375 | 3 |
migrations/versions/a7acd67386c9_users_table.py | Armalon/PizzaTask | 0 | 12788583 | """users table
Revision ID: a7acd67386c9
Revises:
Create Date: 2020-07-21 16:17:36.492935
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('address', sa.String(length=256), nullable=True),
sa.Column('phone', sa.String(length=20), nullable=True),
sa.Column('last_password_hash', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_address'), 'user', ['address'], unique=True)
op.create_index(op.f('ix_user_phone'), 'user', ['phone'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_phone'), table_name='user')
op.drop_index(op.f('ix_user_address'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
| 1.867188 | 2 |
tobiko/openstack/stacks/_ubuntu.py | 4383/tobiko | 0 | 12788584 | # Copyright 2019 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import tobiko
from tobiko import config
from tobiko.openstack import glance
from tobiko.openstack.stacks import _nova
CONF = config.CONF
UBUNTU_IMAGE_URL = \
('http://cloud-images.ubuntu.com/bionic/current/'
'bionic-server-cloudimg-amd64.img')
class UbuntuImageFixture(glance.URLGlanceImageFixture):
image_url = CONF.tobiko.ubuntu.image_url or UBUNTU_IMAGE_URL
image_name = CONF.tobiko.ubuntu.image_name
image_file = CONF.tobiko.ubuntu.image_file
disk_format = CONF.tobiko.ubuntu.disk_format or "qcow2"
container_format = CONF.tobiko.ubuntu.container_format or "bare"
username = CONF.tobiko.ubuntu.username or 'ubuntu'
password = <PASSWORD>.password
class UbuntuFlavorStackFixture(_nova.FlavorStackFixture):
ram = 512
class UbuntuServerStackFixture(_nova.ServerStackFixture):
#: Glance image used to create a Nova server instance
image_fixture = tobiko.required_setup_fixture(UbuntuImageFixture)
#: Flavor used to create a Nova server instance
flavor_stack = tobiko.required_setup_fixture(UbuntuFlavorStackFixture)
| 1.4375 | 1 |
backend/tests/wallet_tests/services/offchain/test_offchain.py | tanshuai/reference-wallet | 14 | 12788585 | # Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
import dataclasses
import context
import wallet.services.offchain.p2p_payment as pc_service
import wallet.services.offchain.utils as utils
from diem import identifier, LocalAccount, jsonrpc
from diem_utils.types.currencies import DiemCurrency
from tests.wallet_tests.resources.seeds.one_user_seeder import OneUser
from wallet import storage
from wallet.services import offchain as offchain_service
from wallet.services.account import (
generate_new_subaddress,
)
from wallet.services.offchain import offchain as offchain_service
from wallet.storage import db_session
from wallet import storage
import offchain
from wallet.types import TransactionStatus
CID = "35a1b548-3170-438f-bf3a-6ca0fef85d15"
currency = DiemCurrency.XUS
def test_save_outbound_payment_command(monkeypatch):
user = OneUser.run(
db_session, account_amount=100_000_000_000, account_currency=currency
)
amount = 10_000_000_000
receiver = LocalAccount.generate()
sub_address = identifier.gen_subaddress()
cmd = pc_service.save_outbound_payment_command(
user.account_id, receiver.account_address, sub_address, amount, currency
)
assert cmd is not None
assert cmd.reference_id() is not None
model = storage.get_payment_command(cmd.reference_id())
assert model is not None
assert model.reference_id is not None
assert model.status == TransactionStatus.OFF_CHAIN_OUTBOUND
with monkeypatch.context() as m:
m.setattr(
context.get().offchain_client,
"send_command",
lambda c, _: offchain.reply_request(c.cid),
)
offchain_service.process_offchain_tasks()
db_session.refresh(model)
assert model.status == TransactionStatus.OFF_CHAIN_WAIT
def test_process_inbound_payment_command(monkeypatch):
hrp = context.get().config.diem_address_hrp()
user = OneUser.run(
db_session, account_amount=100_000_000_000, account_currency=currency
)
amount = 10_000_000_000
sender = LocalAccount.generate()
sender_sub_address = identifier.gen_subaddress()
receiver_sub_address = generate_new_subaddress(user.account_id)
cmd = offchain.PaymentCommand.init(
sender_account_id=identifier.encode_account(
sender.account_address, sender_sub_address, hrp
),
sender_kyc_data=utils.user_kyc_data(user.account_id),
receiver_account_id=identifier.encode_account(
context.get().config.vasp_address, receiver_sub_address, hrp
),
amount=amount,
currency=currency.value,
inbound=True,
)
with monkeypatch.context() as m:
client = context.get().offchain_client
m.setattr(
client,
"deserialize_jws_request",
lambda _, c: client.create_inbound_payment_command(c.cid, c.payment),
)
m.setattr(
client,
"process_inbound_request",
lambda c, _: client.create_inbound_payment_command(c.cid, c.payment),
)
m.setattr(
client,
"send_command",
lambda c, _: offchain.reply_request(c.cid),
)
code, resp = offchain_service.process_inbound_command(
cmd.payment.sender.address, cmd
)
assert code == 200
assert resp
model = storage.get_payment_command(cmd.reference_id())
assert model
assert model.status == TransactionStatus.OFF_CHAIN_RECEIVER_OUTBOUND
assert model.inbound, str(cmd)
def test_submit_txn_when_both_ready(monkeypatch):
user = OneUser.run(
db_session, account_amount=100_000_000_000, account_currency=currency
)
amount = 10_000_000_000
receiver = LocalAccount.generate()
sub_address = identifier.gen_subaddress()
cmd = pc_service.save_outbound_payment_command(
user.account_id, receiver.account_address, sub_address, amount, currency
)
receiver_cmd = dataclasses.replace(
cmd, my_actor_address=cmd.payment.receiver.address
)
receiver_ready_cmd = receiver_cmd.new_command(
recipient_signature=b"recipient_signature".hex(),
status=offchain.Status.ready_for_settlement,
kyc_data=utils.user_kyc_data(user.account_id),
)
model = storage.get_payment_command(cmd.reference_id())
assert model
assert model.status == TransactionStatus.OFF_CHAIN_OUTBOUND
assert not model.inbound, str(model)
with monkeypatch.context() as m:
client = context.get().offchain_client
m.setattr(
context.get().offchain_client,
"deserialize_jws_request",
lambda _, c: client.create_inbound_payment_command(c.cid, c.payment),
)
m.setattr(
client,
"process_inbound_request",
lambda c, _: client.create_inbound_payment_command(c.cid, c.payment),
)
code, resp = offchain_service.process_inbound_command(
cmd.payment.receiver.address, receiver_ready_cmd
)
assert code == 200
assert resp
model = storage.get_payment_command(cmd.reference_id())
assert model
assert model.status == TransactionStatus.OFF_CHAIN_INBOUND
assert model.inbound, str(model)
# sync command and submit
with monkeypatch.context() as m:
m.setattr(
context.get().offchain_client,
"send_command",
lambda c, _: offchain.reply_request(c.cid),
)
m.setattr(
context.get(),
"p2p_by_travel_rule",
jsonrpc_txn_sample,
)
offchain_service.process_offchain_tasks()
model = storage.get_payment_command(cmd.reference_id())
assert model.status == TransactionStatus.COMPLETED, model.reference_id
tx = storage.get_transaction_by_reference_id(model.reference_id)
assert tx.status == TransactionStatus.COMPLETED
assert tx.sequence == 5
assert tx.blockchain_version == 3232
def jsonrpc_txn_sample(*args):
return jsonrpc.Transaction(
version=3232,
transaction=jsonrpc.TransactionData(sequence_number=5),
hash="3232-hash",
)
| 1.789063 | 2 |
slack_bot/bot.py | dietrichsimon/slack-bot | 0 | 12788586 | import logging
import sqlalchemy as db
import pandas as pd
from slack import WebClient
import config
# establish connection to postgres database
try:
engine = db.create_engine("postgres://postgres:1234@postgresdb:5432")
except:
logging.critical('Could not establish connection to postgres database.')
initialize connection with slack
client = WebClient(token=config.ACCESS_TOKEN_SLACK)
# select most recent tweet from database
query = f"""SELECT * FROM tweets;"""
df = pd.read_sql(query, engine)
tweet = str(df['tweet'].tail(1))
# post tweet on Slack
response = client.chat_postMessage(
channel="#bot_playground",
text=f"Time for a tweet: {tweet}")
| 2.9375 | 3 |
cra_helper/server_check.py | Maronato/django-cra-helper | 54 | 12788587 | from urllib import request, error as url_error
from django.conf import settings
from cra_helper.logging import logger
def hosted_by_liveserver(file_url: str) -> bool:
# Ignore the server check if we're in production
if settings.DEBUG:
try:
resp = request.urlopen(file_url)
if resp.status == 200:
logger.debug('{} is being hosted by liveserver'.format(file_url))
return True
else:
logger.warning('Create-React-App liveserver is up but not serving files')
return False
except url_error.URLError as err:
logger.debug('{} is not being hosted by liveserver'.format(file_url))
return False
else:
logger.debug('Liveserver host check disabled in production')
return False
| 2.34375 | 2 |
dist/Basilisk/fswAlgorithms/inertial3DSpin/inertial3DSpin.py | ian-cooke/basilisk_mag | 0 | 12788588 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_inertial3DSpin')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_inertial3DSpin')
_inertial3DSpin = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_inertial3DSpin', [dirname(__file__)])
except ImportError:
import _inertial3DSpin
return _inertial3DSpin
try:
_mod = imp.load_module('_inertial3DSpin', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_inertial3DSpin = swig_import_helper()
del swig_import_helper
else:
import _inertial3DSpin
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
def new_doubleArray(nelements):
return _inertial3DSpin.new_doubleArray(nelements)
new_doubleArray = _inertial3DSpin.new_doubleArray
def delete_doubleArray(ary):
return _inertial3DSpin.delete_doubleArray(ary)
delete_doubleArray = _inertial3DSpin.delete_doubleArray
def doubleArray_getitem(ary, index):
return _inertial3DSpin.doubleArray_getitem(ary, index)
doubleArray_getitem = _inertial3DSpin.doubleArray_getitem
def doubleArray_setitem(ary, index, value):
return _inertial3DSpin.doubleArray_setitem(ary, index, value)
doubleArray_setitem = _inertial3DSpin.doubleArray_setitem
def new_longArray(nelements):
return _inertial3DSpin.new_longArray(nelements)
new_longArray = _inertial3DSpin.new_longArray
def delete_longArray(ary):
return _inertial3DSpin.delete_longArray(ary)
delete_longArray = _inertial3DSpin.delete_longArray
def longArray_getitem(ary, index):
return _inertial3DSpin.longArray_getitem(ary, index)
longArray_getitem = _inertial3DSpin.longArray_getitem
def longArray_setitem(ary, index, value):
return _inertial3DSpin.longArray_setitem(ary, index, value)
longArray_setitem = _inertial3DSpin.longArray_setitem
def new_intArray(nelements):
return _inertial3DSpin.new_intArray(nelements)
new_intArray = _inertial3DSpin.new_intArray
def delete_intArray(ary):
return _inertial3DSpin.delete_intArray(ary)
delete_intArray = _inertial3DSpin.delete_intArray
def intArray_getitem(ary, index):
return _inertial3DSpin.intArray_getitem(ary, index)
intArray_getitem = _inertial3DSpin.intArray_getitem
def intArray_setitem(ary, index, value):
return _inertial3DSpin.intArray_setitem(ary, index, value)
intArray_setitem = _inertial3DSpin.intArray_setitem
def new_shortArray(nelements):
return _inertial3DSpin.new_shortArray(nelements)
new_shortArray = _inertial3DSpin.new_shortArray
def delete_shortArray(ary):
return _inertial3DSpin.delete_shortArray(ary)
delete_shortArray = _inertial3DSpin.delete_shortArray
def shortArray_getitem(ary, index):
return _inertial3DSpin.shortArray_getitem(ary, index)
shortArray_getitem = _inertial3DSpin.shortArray_getitem
def shortArray_setitem(ary, index, value):
return _inertial3DSpin.shortArray_setitem(ary, index, value)
shortArray_setitem = _inertial3DSpin.shortArray_setitem
def getStructSize(self):
try:
return eval('sizeof_' + repr(self).split(';')[0].split('.')[-1])
except (NameError) as e:
typeString = 'sizeof_' + repr(self).split(';')[0].split('.')[-1]
raise NameError(e.message + '\nYou tried to get this size macro: ' + typeString +
'\n It appears to be undefined. \nYou need to run the SWIG GEN_SIZEOF' +
' SWIG macro against the class/struct in your SWIG file if you want to ' +
' make this call.\n')
def protectSetAttr(self, name, value):
if(hasattr(self, name) or name == 'this'):
object.__setattr__(self, name, value)
else:
raise ValueError('You tried to add this variable: ' + name + '\n' +
'To this class: ' + str(self))
def protectAllClasses(moduleType):
import inspect
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
for member in clsmembers:
try:
exec(str(member[0]) + '.__setattr__ = protectSetAttr')
exec(str(member[0]) + '.getStructSize = getStructSize')
except (AttributeError, TypeError) as e:
pass
Update_inertial3DSpin = _inertial3DSpin.Update_inertial3DSpin
SelfInit_inertial3DSpin = _inertial3DSpin.SelfInit_inertial3DSpin
CrossInit_inertial3DSpin = _inertial3DSpin.CrossInit_inertial3DSpin
Reset_inertial3DSpin = _inertial3DSpin.Reset_inertial3DSpin
sizeof_inertial3DSpinConfig = _inertial3DSpin.sizeof_inertial3DSpinConfig
sizeof_AttRefFswMsg = _inertial3DSpin.sizeof_AttRefFswMsg
class inertial3DSpinConfig(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, inertial3DSpinConfig, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, inertial3DSpinConfig, name)
__repr__ = _swig_repr
__swig_setmethods__["sigma_RN"] = _inertial3DSpin.inertial3DSpinConfig_sigma_RN_set
__swig_getmethods__["sigma_RN"] = _inertial3DSpin.inertial3DSpinConfig_sigma_RN_get
if _newclass:
sigma_RN = _swig_property(_inertial3DSpin.inertial3DSpinConfig_sigma_RN_get, _inertial3DSpin.inertial3DSpinConfig_sigma_RN_set)
__swig_setmethods__["omega_spin"] = _inertial3DSpin.inertial3DSpinConfig_omega_spin_set
__swig_getmethods__["omega_spin"] = _inertial3DSpin.inertial3DSpinConfig_omega_spin_get
if _newclass:
omega_spin = _swig_property(_inertial3DSpin.inertial3DSpinConfig_omega_spin_get, _inertial3DSpin.inertial3DSpinConfig_omega_spin_set)
__swig_setmethods__["priorTime"] = _inertial3DSpin.inertial3DSpinConfig_priorTime_set
__swig_getmethods__["priorTime"] = _inertial3DSpin.inertial3DSpinConfig_priorTime_get
if _newclass:
priorTime = _swig_property(_inertial3DSpin.inertial3DSpinConfig_priorTime_get, _inertial3DSpin.inertial3DSpinConfig_priorTime_set)
__swig_setmethods__["outputDataName"] = _inertial3DSpin.inertial3DSpinConfig_outputDataName_set
__swig_getmethods__["outputDataName"] = _inertial3DSpin.inertial3DSpinConfig_outputDataName_get
if _newclass:
outputDataName = _swig_property(_inertial3DSpin.inertial3DSpinConfig_outputDataName_get, _inertial3DSpin.inertial3DSpinConfig_outputDataName_set)
__swig_setmethods__["outputMsgID"] = _inertial3DSpin.inertial3DSpinConfig_outputMsgID_set
__swig_getmethods__["outputMsgID"] = _inertial3DSpin.inertial3DSpinConfig_outputMsgID_get
if _newclass:
outputMsgID = _swig_property(_inertial3DSpin.inertial3DSpinConfig_outputMsgID_get, _inertial3DSpin.inertial3DSpinConfig_outputMsgID_set)
__swig_setmethods__["inputRefName"] = _inertial3DSpin.inertial3DSpinConfig_inputRefName_set
__swig_getmethods__["inputRefName"] = _inertial3DSpin.inertial3DSpinConfig_inputRefName_get
if _newclass:
inputRefName = _swig_property(_inertial3DSpin.inertial3DSpinConfig_inputRefName_get, _inertial3DSpin.inertial3DSpinConfig_inputRefName_set)
__swig_setmethods__["inputRefID"] = _inertial3DSpin.inertial3DSpinConfig_inputRefID_set
__swig_getmethods__["inputRefID"] = _inertial3DSpin.inertial3DSpinConfig_inputRefID_get
if _newclass:
inputRefID = _swig_property(_inertial3DSpin.inertial3DSpinConfig_inputRefID_get, _inertial3DSpin.inertial3DSpinConfig_inputRefID_set)
__swig_setmethods__["attRefOut"] = _inertial3DSpin.inertial3DSpinConfig_attRefOut_set
__swig_getmethods__["attRefOut"] = _inertial3DSpin.inertial3DSpinConfig_attRefOut_get
if _newclass:
attRefOut = _swig_property(_inertial3DSpin.inertial3DSpinConfig_attRefOut_get, _inertial3DSpin.inertial3DSpinConfig_attRefOut_set)
def __init__(self):
this = _inertial3DSpin.new_inertial3DSpinConfig()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _inertial3DSpin.delete_inertial3DSpinConfig
__del__ = lambda self: None
inertial3DSpinConfig_swigregister = _inertial3DSpin.inertial3DSpinConfig_swigregister
inertial3DSpinConfig_swigregister(inertial3DSpinConfig)
def computeReference_inertial3DSpin(ConfigData, omega_R0N_N, domega_R0N_N, omega_RR0_R, dt):
return _inertial3DSpin.computeReference_inertial3DSpin(ConfigData, omega_R0N_N, domega_R0N_N, omega_RR0_R, dt)
computeReference_inertial3DSpin = _inertial3DSpin.computeReference_inertial3DSpin
class AttRefFswMsg(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AttRefFswMsg, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AttRefFswMsg, name)
__repr__ = _swig_repr
__swig_setmethods__["sigma_RN"] = _inertial3DSpin.AttRefFswMsg_sigma_RN_set
__swig_getmethods__["sigma_RN"] = _inertial3DSpin.AttRefFswMsg_sigma_RN_get
if _newclass:
sigma_RN = _swig_property(_inertial3DSpin.AttRefFswMsg_sigma_RN_get, _inertial3DSpin.AttRefFswMsg_sigma_RN_set)
__swig_setmethods__["omega_RN_N"] = _inertial3DSpin.AttRefFswMsg_omega_RN_N_set
__swig_getmethods__["omega_RN_N"] = _inertial3DSpin.AttRefFswMsg_omega_RN_N_get
if _newclass:
omega_RN_N = _swig_property(_inertial3DSpin.AttRefFswMsg_omega_RN_N_get, _inertial3DSpin.AttRefFswMsg_omega_RN_N_set)
__swig_setmethods__["domega_RN_N"] = _inertial3DSpin.AttRefFswMsg_domega_RN_N_set
__swig_getmethods__["domega_RN_N"] = _inertial3DSpin.AttRefFswMsg_domega_RN_N_get
if _newclass:
domega_RN_N = _swig_property(_inertial3DSpin.AttRefFswMsg_domega_RN_N_get, _inertial3DSpin.AttRefFswMsg_domega_RN_N_set)
def __init__(self):
this = _inertial3DSpin.new_AttRefFswMsg()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _inertial3DSpin.delete_AttRefFswMsg
__del__ = lambda self: None
AttRefFswMsg_swigregister = _inertial3DSpin.AttRefFswMsg_swigregister
AttRefFswMsg_swigregister(AttRefFswMsg)
import sys
protectAllClasses(sys.modules[__name__])
# This file is compatible with both classic and new-style classes.
| 1.695313 | 2 |
Source/Squiddy/src/tema-android-adapter-3.2-sma/AndroidAdapter/screencapture.py | samini/gort-public | 1 | 12788589 | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Uses the android screenshot application to capture device screen
Screenshot is a Java-application delivered with the android source and must be
compiled and set up with the following instructions:
1. copy <androidsource>\sdk\screenshot\src\com folder to <androidsdk>\tools
2. with command line, go to <androidsdk>\tools
3. compile the code: javac -classpath lib\ddmlib.jar com\android\screenshot\Screenshot.java
4. set the sdk's tools folder and the <androidsdk>\tools\lib\ddmlib.jar to the CLASSPATH environment variable
5. You should now be able to run the application from any path with the command: java com.android.screenshot.Screenshot
"""
import subprocess
def captureScreen(out_file, serial_id):
retcode = subprocess.call(["java", "com.android.screenshot.Screenshot", "-s", serial_id , out_file],stdout = subprocess.PIPE,stderr = subprocess.PIPE)
if retcode != 0:
print "Error: screenshot application not configured!"
if __name__ == "__main__":
captureScreen("pic.png", "emulator-5554")
| 1.6875 | 2 |
dynamic_programming/fibonacci_modified.py | laanak08/algorithms | 0 | 12788590 | def main():
A,B,N = raw_input().split(" ")
print T(int(A),int(B),int(N))
def T(A,B,N):
if N == 1:
return A
elif N == 2:
return B
else:
return ( T(A,B,(N-1)) ** 2 ) + T(A,B,(N-2))
main() | 3.515625 | 4 |
care/facility/migrations/0190_auto_20201001_1134.py | gigincg/care | 189 | 12788591 | <filename>care/facility/migrations/0190_auto_20201001_1134.py<gh_stars>100-1000
# Generated by Django 2.2.11 on 2020-10-01 06:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facility', '0189_auto_20200929_1258'),
]
operations = [
migrations.AlterField(
model_name='shiftingrequest',
name='status',
field=models.IntegerField(choices=[(10, 'PENDING'), (15, 'ON HOLD'), (20, 'APPROVED'), (30, 'REJECTED'), (40, 'DESTINATION APPROVED'), (50, 'DESTINATION REJECTED'), (60, 'AWAITING TRANSPORTATION'), (70, 'TRANSFER IN PROGRESS'), (80, 'COMPLETED')], default=10),
),
]
| 1.554688 | 2 |
nuplan/planning/metrics/evaluation_metrics/common/ego_lat_jerk.py | motional/nuplan-devkit | 128 | 12788592 | <gh_stars>100-1000
from typing import List
from nuplan.planning.metrics.evaluation_metrics.base.within_bound_metric_base import WithinBoundMetricBase
from nuplan.planning.metrics.metric_result import MetricStatistics
from nuplan.planning.metrics.utils.state_extractors import extract_ego_jerk
from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario
from nuplan.planning.simulation.history.simulation_history import SimulationHistory
class EgoLatJerkStatistics(WithinBoundMetricBase):
"""Ego lateral jerk metric."""
def __init__(self, name: str, category: str) -> None:
"""
Initializes the EgoLatJerkStatistics class
:param name: Metric name
:param category: Metric category.
"""
super().__init__(name=name, category=category)
def compute(self, history: SimulationHistory, scenario: AbstractScenario) -> List[MetricStatistics]:
"""
Returns the lateral jerk metric
:param history: History from a simulation engine
:param scenario: Scenario running this metric
:return the estimated lateral jerk metric.
"""
return self._compute_statistics( # type: ignore
history=history,
scenario=scenario,
statistic_unit_name='meters_per_second_cubed',
extract_function=extract_ego_jerk,
extract_function_params={'acceleration_coordinate': 'y'},
)
| 2.34375 | 2 |
examples/many_frames.py | RcSepp/asyncframes | 2 | 12788593 | # -*- coding: utf-8 -*-
# Copyright (c) <NAME>. All Rights Reserved.
# Distributed under the MIT License. See LICENSE file for more info.
import time
from asyncframes import Frame, PFrame, sleep, all_
from asyncframes.asyncio_eventloop import EventLoop
@Frame
async def main_frame():
subframes = [sub_frame(i) for i in range(10000)]
print(sum(1 if result == True else 0 for result in await all_(*subframes)))
@PFrame
async def sub_frame(i):
time.sleep(0.001)
return i % 2 == 0
loop = EventLoop()
loop.run(main_frame)
| 3.21875 | 3 |
backend/FlaskAPI/mf_insert_table_data.py | steruel/CovalentMFFPrototype | 0 | 12788594 | <reponame>steruel/CovalentMFFPrototype
# coding: utf-8
# In[1]:
##Includes
from flask import request, url_for
from flask_api import FlaskAPI, status, exceptions
from flask_cors import CORS, cross_origin
from flask import Blueprint, render_template, abort
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pickle
import json
import sqlite3
mf_insert_table_data = Blueprint('mf_insert_table_data', __name__)
CORS(mf_insert_table_data)
CORS(mf_insert_table_data,resources={r"/mf_insert_table_data/*/": {"origins": "*"}})
#cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
######flask_api
##########
##Pass header=0 if first line in text file contains header
def read_csv_file(filename, seperator, header):
df = pd.read_csv(filename,sep=seperator,header=header)
return df
def df_give_columnnames(df,colnames_array):
df.columns = colnames_array
return df
def write_json_file(df,filename):
df.to_json(filename,orient='records')
#http://localhost:5000/mf_insert_table_data/model_metadata/model_id=m1&model_description=testmodel
#http://localhost:5000/mf_insert_table_data/model_metadata/
@mf_insert_table_data.route("/mf_insert_table_data/model_metadata/", methods=['GET','POST'])
@cross_origin()
def insertinto_model_metadata():
print ('innnn1')
list_records = []
dbname = 'mf'
db_file = '../database/'+dbname+'.db'
tablename = 'model_metadata'
model_id = request.json['model_id']
model_description = request.json['model_description']
datasetid = request.json['datasetid']
print (model_id,model_description,datasetid)
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
status = "ok"
try:
sql = "insert into model_metadata(model_id, model_description,datasetid) VALUES ('"+model_id+"','"+model_description+"',"+str(datasetid)+")"
print sql
cursor.execute(sql)
print cursor.lastrowid
except:
print "error"
status = "error"
conn.commit()
conn.close()
json = {'status':status}
return json
| 2.203125 | 2 |
abc/abc063/abc063c.py | c-yan/atcoder | 1 | 12788595 | N = int(input())
s = [int(input()) for _ in range(N)]
result = sum(s)
if result % 10 != 0:
print(result)
exit()
t = [i for i in s if i % 10 != 0]
if len(t) == 0:
print(0)
else:
result -= min(t)
print(result)
| 2.6875 | 3 |
scale/job/messages/failed_jobs.py | kaydoh/scale | 121 | 12788596 | <filename>scale/job/messages/failed_jobs.py
"""Defines a command message that sets FAILED status for job models"""
from __future__ import unicode_literals
import logging
from collections import namedtuple
from django.db import transaction
from error.models import get_error
from job.models import Job
from messaging.messages.message import CommandMessage
from util.parse import datetime_to_string, parse_datetime
from util.retry import retry_database_query
# This is the maximum number of job models that can fit in one message. This maximum ensures that every message of this
# type is less than 25 KiB long.
MAX_NUM = 100
FailedJob = namedtuple('FailedJob', ['job_id', 'exe_num', 'error_id'])
logger = logging.getLogger(__name__)
def create_failed_jobs_messages(failed_jobs, when):
"""Creates messages to fail the given jobs
:param failed_jobs: The failed jobs
:type failed_jobs: :func:`list`
:param when: When the jobs failed
:type when: :class:`datetime.datetime`
:return: The list of messages
:rtype: :func:`list`
"""
messages = []
message = None
for failed_job in failed_jobs:
if not message:
message = FailedJobs()
message.ended = when
elif not message.can_fit_more():
messages.append(message)
message = FailedJobs()
message.ended = when
message.add_failed_job(failed_job)
if message:
messages.append(message)
return messages
class FailedJobs(CommandMessage):
"""Command message that sets FAILED status for job models
"""
def __init__(self):
"""Constructor
"""
super(FailedJobs, self).__init__('failed_jobs')
self._count = 0
self._failed_jobs = {} # {Error ID: [FailedJob]}
self.ended = None
def add_failed_job(self, failed_job):
"""Adds the given failed job to this message
:param failed_job: The failed job
:type failed_job: :class:`job.messages.failed_jobs.FailedJob`
"""
self._count += 1
if failed_job.error_id in self._failed_jobs:
self._failed_jobs[failed_job.error_id].append(failed_job)
else:
self._failed_jobs[failed_job.error_id] = [failed_job]
def can_fit_more(self):
"""Indicates whether more failed jobs can fit in this message
:return: True if more failed jobs can fit, False otherwise
:rtype: bool
"""
return self._count < MAX_NUM
def to_json(self):
"""See :meth:`messaging.messages.message.CommandMessage.to_json`
"""
error_list = []
for error_id, job_list in self._failed_jobs.items():
jobs_list = []
for failed_job in job_list:
jobs_list.append({'id': failed_job.job_id, 'exe_num': failed_job.exe_num})
error_list.append({'id': error_id, 'jobs': jobs_list})
return {'ended': datetime_to_string(self.ended), 'errors': error_list}
@staticmethod
def from_json(json_dict):
"""See :meth:`messaging.messages.message.CommandMessage.from_json`
"""
message = FailedJobs()
message.ended = parse_datetime(json_dict['ended'])
for error_dict in json_dict['errors']:
error_id = error_dict['id']
for job_dict in error_dict['jobs']:
job_id = job_dict['id']
exe_num = job_dict['exe_num']
message.add_failed_job(FailedJob(job_id, exe_num, error_id))
return message
@retry_database_query(max_tries=5, base_ms_delay=1000, max_ms_delay=5000)
def execute(self):
"""See :meth:`messaging.messages.message.CommandMessage.execute`
"""
from queue.messages.queued_jobs import create_queued_jobs_messages, QueuedJob
job_ids = []
for job_list in self._failed_jobs.values():
for failed_job in job_list:
job_ids.append(failed_job.job_id)
root_recipe_ids = set()
with transaction.atomic():
# Retrieve locked job models
job_models = {}
for job in Job.objects.get_locked_jobs(job_ids):
job_models[job.id] = job
if job.root_recipe_id:
root_recipe_ids.add(job.root_recipe_id)
# Get job models with related fields
# TODO: once long running job types are gone, the related fields are not needed
for job in Job.objects.get_jobs_with_related(job_ids):
job_models[job.id] = job
jobs_to_retry = []
all_failed_job_ids = []
for error_id, job_list in self._failed_jobs.items():
error = get_error(error_id)
jobs_to_fail = []
for failed_job in job_list:
job_model = job_models[failed_job.job_id]
# If job cannot be failed or execution number does not match, then this update is obsolete
if not job_model.can_be_failed() or job_model.num_exes != failed_job.exe_num:
# Ignore this job
continue
# Re-try job if error supports re-try and there are more tries left
retry = error.should_be_retried and job_model.num_exes < job_model.max_tries
# Also re-try long running jobs
retry = retry or job_model.job_type.is_long_running
# Do not re-try superseded jobs
retry = retry and not job_model.is_superseded
if retry:
jobs_to_retry.append(QueuedJob(job_model.id, job_model.num_exes))
else:
jobs_to_fail.append(job_model)
# Update jobs that failed with this error
if jobs_to_fail:
failed_job_ids = Job.objects.update_jobs_to_failed(jobs_to_fail, error_id, self.ended)
logger.info('Set %d job(s) to FAILED status with error %s', len(failed_job_ids), error.name)
all_failed_job_ids.extend(failed_job_ids)
# Need to update recipes of failed jobs so that dependent jobs are BLOCKED
if root_recipe_ids:
from recipe.messages.update_recipe import create_update_recipe_messages_from_node
self.new_messages.extend(create_update_recipe_messages_from_node(root_recipe_ids))
# Place jobs to retry back onto the queue
if jobs_to_retry:
self.new_messages.extend(create_queued_jobs_messages(jobs_to_retry, requeue=True))
# Send messages to update recipe metrics
from recipe.messages.update_recipe_metrics import create_update_recipe_metrics_messages_from_jobs
self.new_messages.extend(create_update_recipe_metrics_messages_from_jobs(job_ids))
return True
| 2.65625 | 3 |
hoc/5_clear.py | DolceVii/astropi | 2 | 12788597 | from sense_hat import SenseHat
sense = SenseHat()
sense.set_rotation(270)
magenta=(255,0,255)
sense.clear(magenta)
| 1.5625 | 2 |
sources/algorithms/sweepln/rstdregioncyclesweep.py | tipech/OverlapGraph | 0 | 12788598 | #!/usr/bin/env python
"""
Restricted Cyclic Multi-Pass Sweep-line Algorithm for RegionSet
Implements an cyclic multi-pass sweep-line algorithm over a set of Regions,
within a restricting Region (all Begin and End events must have context Regions
that intersect with the specified restricting Region) and within a specified
subset of Regions.
Implements RestrictedRegionCycleSweep class that executes the specific details
and actions of the sweep-line algorithm, when encountering: Init, Begin, End or
Done events.
Classes:
- RestrictedRegionCycleSweep
"""
from typing import List
from sources.core import RegionEvent
from .regioncyclesweep import RegionCycleSweep
from .rstdregionsweep import RestrictedRegionSweep
class RestrictedRegionCycleSweep(RestrictedRegionSweep, RegionCycleSweep):
"""
A cyclic multi-pass sweep-line algorithm over a set of Regions with
restricting Region and subset of Regions. Subscribes to and is evaluated by
the cyclic multi-pass sweep-line algorithm along a dimension on the set of
Regions.
Extends:
RestrictedRegionSweep
RegionCycleSweep
"""
def __init__(self, *args, **kwargs):
"""
Initialize the cyclic multi-pass sweep-line algorithm over a
restricted set of Regions.
Args:
args, kwargs:
Arguments for RestrictedRegionSweep.initialize().
"""
RestrictedRegionSweep.initialize(self, *args, **kwargs)
RegionCycleSweep.__init__(self, self.regions)
### Methods: Event Handlers
def on_begin(self, event: RegionEvent):
"""
Handle Event when cyclic multi-pass sweep-line algorithm
encounters the beginning of a Region.
Overrides:
RestrictedRegionSweep.on_begin
RegionSweep.on_begin
Args:
event: The Region beginning Event.
"""
if self._should_process(event):
RegionCycleSweep.on_begin(self, event)
def on_end(self, event: RegionEvent):
"""
Handle Event when cyclic multi-pass sweep-line algorithm
encounters the ending of a Region.
Overrides:
RestrictedRegionSweep.on_end
RegionSweep.on_end
Args:
event: The Region ending Event.
"""
if self._should_process(event):
RegionCycleSweep.on_end(self, event)
| 2.984375 | 3 |
tests/test_input_checker.py | pzarabadip/PopOff | 4 | 12788599 | <gh_stars>1-10
#! /usr/bin/env python3
import pytest
import numpy as np
from mock import Mock
from popoff.fitting_code import FitModel
from popoff.lammps_data import LammpsData
from popoff.atom_types import AtomType
from popoff.input_checker import (check_coreshell, check_scaling_limits,
check_spring, check_buckingham, setup_error_checks)
@pytest.fixture
def mock_fit_data(mock_lammps_data):
fit_data = Mock(FitModel)
fit_data.lammps_data = [mock_lammps_data]
return fit_data
@pytest.mark.parametrize( 'label', [('dq_Li'),('dq_Ba')])
def test_typeerror_for_label_in_check_coreshell(label, bounds, mock_fit_data):
with pytest.raises(TypeError):
check_coreshell(label, bounds, mock_fit_data)
@pytest.mark.parametrize( 'bounds', [((-0.9,0.9)),((0,-0.9)),((0.9,1.1)),((1.1,7)),((0.9,0.8))])
def test_valueerror_for_bounds_in_check_coreshell(bounds, mock_fit_data):
with pytest.raises(ValueError):
check_coreshell("dq_O", bounds, mock_fit_data)
@pytest.mark.parametrize( 'bounds', [((-0.9,0.9)),((0,-0.9)),((0.9,1.1)),((1.1,1.1)),((0.9,0.8))])
def test_valueerror_for_bounds_in_check_scaling_limits(bounds):
with pytest.raises(ValueError):
check_scaling_limits(bounds)
@pytest.mark.parametrize( 'bounds', [((-0.9,0.9)),((0,-0.9)),((0.9,0.8))])
def test_valueerror_for_bounds_in_check_spring(bounds, params):
with pytest.raises(ValueError):
check_spring('O-O spring', bounds, params)
@pytest.mark.parametrize( 'label', [('Ni-Ni spring'),('Li-Li spring'),('Li-O spring')])
def test_typeerror_for_label_in_check_spring(label, bounds, params):
with pytest.raises(TypeError):
check_spring(label, bounds, params)
@pytest.mark.parametrize( 'bounds', [((-0.9,0.9)),((0,-0.9)),((0.9,0.8))])
def test_valueerror_for_bounds_in_check_buckingham(bounds, params):
with pytest.raises(ValueError):
check_buckingham('Li_O_a', bounds, params)
@pytest.mark.parametrize( 'label', [('Ni_O_a'),('Li_B_a'),('Li_O_aa')])
def test_typeerror_for_bounds_in_check_buckingham(label, bounds, params):
with pytest.raises(TypeError):
check_buckingham(label, bounds, params)
def test_indexerror_in_setup_error_checks(labels, bounds, params):
mock_fit_data = Mock(FitModel)
labels = labels[:-1]
with pytest.raises(IndexError):
setup_error_checks(labels, bounds, mock_fit_data, params)
def test_typeerror_in_setup_error_checks(labels, bounds, params):
mock_fit_data = Mock(FitModel)
labels[0] = 'test'
with pytest.raises(TypeError):
setup_error_checks(labels, bounds, mock_fit_data, params)
| 2.0625 | 2 |
app/helpers/helpers_search.py | geoadmin/service-search-wsgi | 0 | 12788600 | <gh_stars>0
import logging
import math
import unicodedata
from decimal import Decimal
from functools import reduce
import pyproj.exceptions
from pyproj import Proj
from pyproj import Transformer
from shapely.geometry.base import BaseGeometry
from shapely.ops import transform as shape_transform
from shapely.wkt import dumps as shape_dumps
from shapely.wkt import loads as shape_loads
from app import cache
from app.lib import sphinxapi
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
PROJECTIONS = {}
# Rounding to abount 0.1 meters
COORDINATES_DECIMALS_FOR_METRIC_PROJ = 1
COORDINATES_DECIMALS_FOR_DEGREE_PROJ = 6
# Number of element in an iterator
def ilen(iterable):
return reduce(lambda sum, element: sum + 1, iterable, 0)
def format_search_text(input_str):
return remove_accents(escape_sphinx_syntax(input_str))
def format_locations_search_text(input_str):
if input_str is None:
return input_str
# only remove trailing and leading dots
input_str = ' '.join([w.strip('.') for w in input_str.split()])
# remove double quotation marks
input_str = input_str.replace('"', '')
return format_search_text(input_str)
def remove_accents(input_str):
if input_str is None:
return input_str
input_str = input_str.replace('ü', 'ue')
input_str = input_str.replace('Ü', 'ue')
input_str = input_str.replace('ä', 'ae')
input_str = input_str.replace('Ä', 'ae')
input_str = input_str.replace('ö', 'oe')
input_str = input_str.replace('Ö', 'oe')
return ''.join(
c for c in unicodedata.normalize('NFD', input_str) if unicodedata.category(c) != 'Mn'
)
def escape_sphinx_syntax(input_str):
if input_str is None:
return input_str
return sphinxapi.SphinxClient.EscapeString(input_str)
def get_proj_from_srid(srid):
if srid in PROJECTIONS:
return PROJECTIONS[srid]
proj = Proj(f'EPSG:{srid}')
PROJECTIONS[srid] = proj
return proj
def get_precision_for_proj(srid):
precision = COORDINATES_DECIMALS_FOR_METRIC_PROJ
proj = get_proj_from_srid(srid)
if proj.crs.is_geographic:
precision = COORDINATES_DECIMALS_FOR_DEGREE_PROJ
return precision
def _round_bbox_coordinates(bbox, precision=None):
tpl = f'%.{precision}f'
if precision is not None:
return [float(Decimal(tpl % c)) for c in bbox]
return bbox
def _round_shape_coordinates(shape, precision=None):
if precision is None:
return shape
return shape_loads(shape_dumps(shape, rounding_precision=precision))
def round_geometry_coordinates(geom, precision=None):
if isinstance(geom, (
list,
tuple,
)):
return _round_bbox_coordinates(geom, precision=precision)
if isinstance(geom, BaseGeometry):
return _round_shape_coordinates(geom, precision=precision)
return geom
@cache.memoize(timeout=60)
def transform_round_geometry(geom, srid_from, srid_to, rounding=True):
if srid_from == srid_to:
if rounding:
precision = get_precision_for_proj(srid_to)
return round_geometry_coordinates(geom, precision=precision)
return geom
if isinstance(geom, (
list,
tuple,
)):
return _transform_coordinates(geom, srid_from, srid_to, rounding=rounding)
return _transform_shape(geom, srid_from, srid_to, rounding=rounding)
TRANSFORMER = {}
def get_transformer(srid_from, srid_to):
transformer_id = f'{srid_from}-to-{srid_to}'
if transformer_id in TRANSFORMER:
return TRANSFORMER[transformer_id]
TRANSFORMER[transformer_id] = Transformer.from_crs(srid_from, srid_to, always_xy=True)
return TRANSFORMER[transformer_id]
# used by transform_round_geometry used by search.py
# Reprojecting pairs of coordinates and rounding them if necessary
# Only a point or a line are considered
def _transform_coordinates(coordinates, srid_from, srid_to, rounding=True):
if len(coordinates) % 2 != 0:
logger.error("Invalid coordinates %s, must be two numbers", coordinates)
raise ValueError(f"Invalid coordinates {coordinates}, must be two numbers")
new_coords = []
transformer = get_transformer(srid_from, srid_to)
coords_iter = iter(coordinates)
try:
for pnt in zip(coords_iter, coords_iter):
new_pnt = transformer.transform(pnt[0], pnt[1])
new_coords += new_pnt
if rounding:
precision = get_precision_for_proj(srid_to)
new_coords = _round_bbox_coordinates(new_coords, precision=precision)
except (pyproj.exceptions.CRSError) as e:
logger.error(
"Cannot transform coordinates %s from %s to %s, %s", coordinates, srid_from, srid_to, e
)
raise ValueError(
f"Cannot transform coordinates {coordinates} from {srid_from} to {srid_to}"
) from e
return new_coords
# indirectly used by search.py
def _transform_shape(geom, srid_from, srid_to, rounding=True):
transformer = get_transformer(srid_from, srid_to)
new_geom = shape_transform(transformer.transform, geom)
if rounding:
precision = get_precision_for_proj(srid_to)
return _round_shape_coordinates(new_geom, precision=precision)
return new_geom
# float('NaN') does not raise an Exception. This function does.
# used by validation_search.py
def float_raise_nan(val):
ret = float(val)
if math.isnan(ret):
raise ValueError('nan is not considered valid float')
return ret
# used by search.py
def parse_box2d(stringBox2D):
extent = stringBox2D.replace('BOX(', '').replace(')', '').replace(',', ' ')
# Python2/3
box = map(float, extent.split(' '))
if not isinstance(box, list):
box = list(box)
return box
# used by center_from_box_2d used by search.py
def is_box2d(box2D):
# Python2/3
if not isinstance(box2D, list):
box2D = list(box2D)
# Bottom left to top right only
if len(box2D) != 4 or box2D[0] > box2D[2] or box2D[1] > box2D[3]:
raise ValueError(f'Invalid box2D {box2D}.')
return box2D
# used by search.py
def center_from_box2d(box2D):
box2D = is_box2d(box2D)
return [box2D[0] + ((box2D[2] - box2D[0]) / 2), box2D[1] + ((box2D[3] - box2D[1]) / 2)]
# used by validation_search.py and search.py
def shift_to(coords, srid):
cds = []
x_offset = 2e6
y_offset = 1e6
coords_copy = coords[:]
while len(coords_copy) > 0:
c = coords_copy.pop(0)
if not isinstance(c, (int, float)):
raise TypeError('Coordinates should be of type int or float')
if srid == 2056:
cds.append(c + x_offset if len(coords_copy) % 2 else c + y_offset)
elif srid == 21781:
cds.append(c - x_offset if len(coords_copy) % 2 else c - y_offset)
return cds
| 2.09375 | 2 |
deployer/__init__.py | cezio/deployer | 0 | 12788601 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import subprocess
from flask import Flask, Response, request
from deployer.runner import run_child
app = Flask('deployer')
@app.route('/incoming/<deployment_name>/', methods=["POST"])
def incoming(deployment_name):
r = request
config_path = os.environ.get('DEPLOYER_CONFIG')
if not config_path:
return Response(response='no deployment config path', status=500)
final_path = os.path.join(config_path, '{}.conf'.format(deployment_name))
if not os.path.exists(final_path):
return Response(response='no deployment config', status=404)
run_child(final_path)
response = 'ok'
status = 200
r = Response(response=response, status=status)
return r
if __name__ == '__main__':
app.run()
| 2.1875 | 2 |
build_dataset/check_track.py | hotelll/Music_Plagiarism_Detection | 2 | 12788602 | import pretty_midi
import numpy as np
'''
Note class: represent note, including:
1. the note pitch
2. the note duration
3. downbeat
4. intensity of note sound
'''
class Note:
def __init__(self):
self.pitch = 0
self.length = 0
self.downbeat = False
self.force = 0
'''
Midi2Numpy: tool to convert midi file to numpy list of Note
input_path: the path of the input midi file
track_index: the index of the melody track of midi
output_path: the path to save the numpy array
'''
def Midi2Numpy(input_path, output_path, track_index):
midi_data = pretty_midi.PrettyMIDI(input_path)
notes = midi_data.instruments[track_index].notes
downbeats = midi_data.get_downbeats()
dataset = []
for n in notes:
note = Note()
for i in downbeats:
'''
the downbeat locates in this note's duration
we see the note as downbeat
'''
if n.start <= i < n.end:
note.downbeat = True
note.pitch = n.pitch
note.length = n.end - n.start
note.force = n.velocity
dataset.append(note)
np.save(output_path, dataset)
path = 'plag/23_ma este meg.mid'
test = pretty_midi.PrettyMIDI()
midi_data = pretty_midi.PrettyMIDI(path)
# decide the track index
track_index = 0
notes = midi_data.instruments[track_index]
test.instruments.append(notes)
test.write('test.mid')
test.write("newdata" + path[4:])
| 3.125 | 3 |
Q/mindmaps/migrations/0001_initial.py | ES-DOC/esdoc-questionnaire | 0 | 12788603 | <reponame>ES-DOC/esdoc-questionnaire
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='MindMapDomain',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.URLField()),
],
options={
'verbose_name': 'MindMap Domain',
'verbose_name_plural': 'MindMap Domains',
},
),
migrations.CreateModel(
name='MindMapSource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=64)),
('enabled', models.BooleanField(default=True)),
],
options={
'verbose_name': 'MindMap Source',
'verbose_name_plural': 'MindMap Sources',
},
),
migrations.AddField(
model_name='mindmapdomain',
name='source',
field=models.ForeignKey(related_name='domains', to='mindmaps.MindMapSource'),
),
]
| 1.703125 | 2 |
6 - Python/Strings/2 - String Split And Join.py | Terence-Guan/Python.HackerRank | 88 | 12788604 | l = input()
print('-'.join(input().split()))
# print(l.replace(" ", "-")) | 3.546875 | 4 |
dataspace/count/__init__.py | Sam-prog-sudo/dataspace | 3 | 12788605 | <gh_stars>1-10
from .count import _count_empty_, _count_null_, _count_unique_, _count_zero_
| 1.03125 | 1 |
allenact_plugins/manipulathor_plugin/manipulathor_viz.py | zcczhang/allenact | 2 | 12788606 | """Utility functions and classes for visualization and logging."""
import os
from datetime import datetime
import cv2
import imageio
import numpy as np
from allenact_plugins.manipulathor_plugin.manipulathor_utils import initialize_arm
from allenact_plugins.manipulathor_plugin.manipulathor_utils import (
reset_environment_and_additional_commands,
transport_wrapper,
)
class LoggerVisualizer:
def __init__(self, exp_name="", log_dir=""):
if log_dir == "":
log_dir = self.__class__.__name__
if exp_name == "":
exp_name = "NoNameExp"
now = datetime.now()
self.exp_name = exp_name
log_dir = os.path.join(
"experiment_output/visualizations",
exp_name,
log_dir + "_" + now.strftime("%m_%d_%Y_%H_%M_%S_%f"),
)
self.log_dir = log_dir
os.makedirs(self.log_dir, exist_ok=True)
self.log_queue = []
self.action_queue = []
self.logger_index = 0
def log(self, environment, action_str):
raise Exception("Not Implemented")
def is_empty(self):
return len(self.log_queue) == 0
def finish_episode_metrics(self, episode_info, task_info, metric_results):
pass
def finish_episode(self, environment, episode_info, task_info):
pass
class TestMetricLogger(LoggerVisualizer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.total_metric_dict = {}
log_file_name = os.path.join(
self.log_dir, "test_metric_{}.txt".format(self.exp_name)
)
self.metric_log_file = open(log_file_name, "w")
def average_dict(self):
result = {}
for (k, v) in self.total_metric_dict.items():
result[k] = sum(v) / len(v)
return result
def finish_episode_metrics(self, episode_info, task_info, metric_results=None):
if metric_results is None:
print("had to reset")
self.log_queue = []
self.action_queue = []
return
for k in metric_results.keys():
if "metric" in k or k in ["ep_length", "reward", "success"]:
self.total_metric_dict.setdefault(k, [])
self.total_metric_dict[k].append(metric_results[k])
print(
"total",
len(self.total_metric_dict["success"]),
"average test metric",
self.average_dict(),
)
# save the task info and all the action queue and results
log_dict = {
"task_info_metrics": metric_results,
"action_sequence": self.action_queue,
"logger_number": self.logger_index,
}
self.logger_index += 1
self.metric_log_file.write(str(log_dict))
self.metric_log_file.write("\n")
print("Logging to", self.metric_log_file.name)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
# We can add agent arm and state location if needed
self.action_queue.append(action_str)
self.log_queue.append(action_str)
class BringObjImageVisualizer(LoggerVisualizer):
def finish_episode(self, environment, episode_info, task_info):
now = datetime.now()
time_to_write = now.strftime("%m_%d_%Y_%H_%M_%S_%f")
time_to_write += "log_ind_{}".format(self.logger_index)
self.logger_index += 1
print("Loggigng", time_to_write, "len", len(self.log_queue))
source_object_id = task_info["source_object_id"]
goal_object_id = task_info["goal_object_id"]
pickup_success = episode_info.object_picked_up
episode_success = episode_info._success
# Put back if you want the images
# for i, img in enumerate(self.log_queue):
# image_dir = os.path.join(self.log_dir, time_to_write + '_seq{}.png'.format(str(i)))
# cv2.imwrite(image_dir, img[:,:,[2,1,0]])
episode_success_offset = "succ" if episode_success else "fail"
pickup_success_offset = "succ" if pickup_success else "fail"
gif_name = (
time_to_write
+ "_from_"
+ source_object_id.split("|")[0]
+ "_to_"
+ goal_object_id.split("|")[0]
+ "_pickup_"
+ pickup_success_offset
+ "_episode_"
+ episode_success_offset
+ ".gif"
)
concat_all_images = np.expand_dims(np.stack(self.log_queue, axis=0), axis=1)
save_image_list_to_gif(concat_all_images, gif_name, self.log_dir)
this_controller = environment.controller
scene = this_controller.last_event.metadata["sceneName"]
reset_environment_and_additional_commands(this_controller, scene)
self.log_start_goal(
environment,
task_info["visualization_source"],
tag="start",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_start_goal(
environment,
task_info["visualization_target"],
tag="goal",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
image_tensor = environment.current_frame
self.action_queue.append(action_str)
self.log_queue.append(image_tensor)
def log_start_goal(self, env, task_info, tag, img_adr):
object_location = task_info["object_location"]
object_id = task_info["object_id"]
agent_state = task_info["agent_pose"]
this_controller = env.controller
# We should not reset here
# for start arm from high up as a cheating, this block is very important. never remove
event1, event2, event3 = initialize_arm(this_controller)
if not (
event1.metadata["lastActionSuccess"]
and event2.metadata["lastActionSuccess"]
and event3.metadata["lastActionSuccess"]
):
print("ERROR: ARM MOVEMENT FAILED in logging! SHOULD NEVER HAPPEN")
event = transport_wrapper(this_controller, object_id, object_location)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not transport in logging")
event = this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not teleport in logging")
image_tensor = this_controller.last_event.frame
image_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + ".png"
)
cv2.imwrite(image_dir, image_tensor[:, :, [2, 1, 0]])
# Saving the mask
target_object_id = task_info["object_id"]
all_visible_masks = this_controller.last_event.instance_masks
if target_object_id in all_visible_masks:
mask_frame = all_visible_masks[target_object_id]
else:
mask_frame = np.zeros(env.controller.last_event.frame[:, :, 0].shape)
mask_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + "_mask.png"
)
cv2.imwrite(mask_dir, mask_frame.astype(float) * 255.0)
class ImageVisualizer(LoggerVisualizer):
def finish_episode(self, environment, episode_info, task_info):
now = datetime.now()
time_to_write = now.strftime("%m_%d_%Y_%H_%M_%S_%f")
time_to_write += "log_ind_{}".format(self.logger_index)
self.logger_index += 1
print("Loggigng", time_to_write, "len", len(self.log_queue))
object_id = task_info["objectId"]
pickup_success = episode_info.object_picked_up
episode_success = episode_info._success
# Put back if you want the images
# for i, img in enumerate(self.log_queue):
# image_dir = os.path.join(self.log_dir, time_to_write + '_seq{}.png'.format(str(i)))
# cv2.imwrite(image_dir, img[:,:,[2,1,0]])
episode_success_offset = "succ" if episode_success else "fail"
pickup_success_offset = "succ" if pickup_success else "fail"
gif_name = (
time_to_write
+ "_obj_"
+ object_id.split("|")[0]
+ "_pickup_"
+ pickup_success_offset
+ "_episode_"
+ episode_success_offset
+ ".gif"
)
concat_all_images = np.expand_dims(np.stack(self.log_queue, axis=0), axis=1)
save_image_list_to_gif(concat_all_images, gif_name, self.log_dir)
self.log_start_goal(
environment,
task_info["visualization_source"],
tag="start",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_start_goal(
environment,
task_info["visualization_target"],
tag="goal",
img_adr=os.path.join(self.log_dir, time_to_write),
)
self.log_queue = []
self.action_queue = []
def log(self, environment, action_str):
image_tensor = environment.current_frame
self.action_queue.append(action_str)
self.log_queue.append(image_tensor)
def log_start_goal(self, env, task_info, tag, img_adr):
object_location = task_info["object_location"]
object_id = task_info["object_id"]
agent_state = task_info["agent_pose"]
this_controller = env.controller
scene = this_controller.last_event.metadata[
"sceneName"
] # maybe we need to reset env actually]
reset_environment_and_additional_commands(this_controller, scene)
# for start arm from high up as a cheating, this block is very important. never remove
event1, event2, event3 = initialize_arm(this_controller)
if not (
event1.metadata["lastActionSuccess"]
and event2.metadata["lastActionSuccess"]
and event3.metadata["lastActionSuccess"]
):
print("ERROR: ARM MOVEMENT FAILED in logging! SHOULD NEVER HAPPEN")
event = transport_wrapper(this_controller, object_id, object_location)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not transport in logging")
event = this_controller.step(
dict(
action="TeleportFull",
standing=True,
x=agent_state["position"]["x"],
y=agent_state["position"]["y"],
z=agent_state["position"]["z"],
rotation=dict(
x=agent_state["rotation"]["x"],
y=agent_state["rotation"]["y"],
z=agent_state["rotation"]["z"],
),
horizon=agent_state["cameraHorizon"],
)
)
if event.metadata["lastActionSuccess"] == False:
print("ERROR: oh no could not teleport in logging")
image_tensor = this_controller.last_event.frame
image_dir = (
img_adr + "_obj_" + object_id.split("|")[0] + "_pickup_" + tag + ".png"
)
cv2.imwrite(image_dir, image_tensor[:, :, [2, 1, 0]])
def save_image_list_to_gif(image_list, gif_name, gif_dir):
gif_adr = os.path.join(gif_dir, gif_name)
seq_len, cols, w, h, c = image_list.shape
pallet = np.zeros((seq_len, w, h * cols, c))
for col_ind in range(cols):
pallet[:, :, col_ind * h : (col_ind + 1) * h, :] = image_list[:, col_ind]
if not os.path.exists(gif_dir):
os.makedirs(gif_dir)
imageio.mimsave(gif_adr, pallet.astype(np.uint8), format="GIF", duration=1 / 5)
print("Saved result in ", gif_adr)
| 2.46875 | 2 |
hijackingprevention/user.py | michardy/account-hijacking-prevention | 3 | 12788607 | import logging
from tornado import gen
import hijackingprevention.db_int as db_int
logger = logging.getLogger(__name__)
class User(db_int.Interface):
"""This class handles reading, writing, and manipulating user objects."""
def __init__(self, uid, site, db):
self.__id_type = "uid"
self.__id = uid
self.__data_type= 'userData_site-'
self.data = {}
self.code = None #verification token
self.__site = str(site)
self.__db = db
super(User, self).__init__(db, self.__data_type, str(site),
self.__id_type, uid, self.__combine)
@gen.coroutine
def read_db(self):
"""Reads user object from DB."""
sud = self.__db[self.__data_type + self.__site]
user = yield sud.find_one({'uid':self.__id}) #Try to find user
if user is not None:
self.data = user['data']
try:
self.code = user['code']
except KeyError:
pass
def add_data(self, data):
"""Adds data to user."""
for k in data.keys():
if k in self.data.keys():
self.data[k].append(data[k])
else:
self.data[k] = [data[k]]
def __combine(self):
"""Returns user object as dictionary"""
return({self.__id_type:self.__id, 'data':self.data,
'code':self.code})
| 2.671875 | 3 |
utils/fit.py | sean-mackenzie/gdpyt-analysis | 0 | 12788608 | <gh_stars>0
# gdpyt-analysis: utils.fit
"""
Notes
"""
# imports
import math
import numpy as np
from scipy.optimize import curve_fit, minimize
from scipy.interpolate import SmoothBivariateSpline
import functools
from utils import functions
# scripts
def gauss_1d_function(x, a, x0, sigma):
return a * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2))
def fit(x, y, fit_function=None, bounds=None):
# fit the function
if fit_function is None:
popt, pcov = curve_fit(functions.parabola, x, y)
fit_function = functions.parabola
else:
if bounds is not None:
popt, pcov = curve_fit(fit_function, x, y, bounds=bounds)
else:
popt, pcov = curve_fit(fit_function, x, y)
return popt, pcov, fit_function
def fit_3d(points, fit_function):
return fit_3d_plane(points)
def fit_3d_plane(points):
fun = functools.partial(functions.plane_error, points=points)
params0 = np.array([0, 0, 0])
res = minimize(fun, params0)
a = res.x[0]
b = res.x[1]
c = res.x[2]
point = np.array([0.0, 0.0, c])
normal = np.array(functions.cross([1, 0, a], [0, 1, b]))
d = -point.dot(normal)
popt = [a, b, c, d, normal]
minx = np.min(points[:, 0])
miny = np.min(points[:, 1])
maxx = np.max(points[:, 0])
maxy = np.max(points[:, 1])
xx, yy = np.meshgrid([minx, maxx], [miny, maxy])
z = (-normal[0] * xx - normal[1] * yy - d) * 1. / normal[2]
return xx, yy, z, popt
def fit_3d_spline(x, y, z, kx=1, ky=2):
w = np.ones_like(x)
bispl = SmoothBivariateSpline(x, y, z, w=w, kx=kx, ky=ky)
rmse = np.sqrt(bispl.get_residual() / len(x))
return bispl, rmse
def fit_3d_sphere(X, Y, Z):
"""
Fit a sphere to data points (X, Y, Z) and return the sphere radius and center of best fit.
Reference: <NAME> (2016); https://jekel.me/2015/Least-Squares-Sphere-Fit/
:param X:
:param Y:
:param Z:
:return:
"""
# Assemble the A matrix
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
A = np.zeros((len(X), 4))
A[:, 0] = X * 2
A[:, 1] = Y * 2
A[:, 2] = Z * 2
A[:, 3] = 1
# Assemble the f matrix
f = np.zeros((len(X), 1))
f[:, 0] = (X * X) + (Y * Y) + (Z * Z)
C, residules, rank, singval = np.linalg.lstsq(A, f)
xc, yc, zc = C[0], C[1], C[2]
# solve for the radius
t = (xc * xc) + (yc * yc) + (zc * zc) + C[3]
radius = math.sqrt(t)
return radius, xc, yc, zc
def fit_3d_sphere_from_center(spX, spY, spZ, xc, yc):
"""
Fit a sphere to data points (spX, spY, spZ) given the sphere center in x-y coordinates.
:param spX:
:param spY:
:param spZ:
:param xc:
:param yc:
:return:
"""
# Assemble the A matrix
spX = np.array(spX)
spY = np.array(spY)
spZ = np.array(spZ)
A = np.zeros((len(spX), 2))
A[:, 0] = spZ * 2
A[:, 1] = 1
# Assemble the f matrix
f = np.zeros((len(spX), 1))
f[:, 0] = (spX * spX) + (spY * spY) + (spZ * spZ) - (2 * spX * xc) - (2 * spY * yc)
# least squares fit
C, residules, rank, singval = np.linalg.lstsq(A, f)
zc = C[0]
# solve for the radius
t = (xc ** 2) + (yc ** 2) + (zc ** 2) + C[1]
radius = math.sqrt(t)
return radius, C[0]
def fit_ellipsoid_from_center(X, Y, Z, xc, yc, zc, r):
"""
Fit a 3D ellipsoid given the x, y, z center coordinates, x-radius, and y-radius.
Somewhat helpful reference: https://jekel.me/2020/Least-Squares-Ellipsoid-Fit/
:param X:
:param Y:
:param Z:
:param xc:
:param yc:
:param zc:
:param r:
:return:
"""
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
f = np.zeros((len(X), 1))
f[:, 0] = -1 * ((Z * Z) - (2 * zc * Z) + (zc * zc))
A = np.zeros((len(X), 1))
A[:, 0] = ((X * X) - (2 * xc * X) + (xc * xc) + (Y * Y) - (2 * yc * Y) + (yc * yc)) / (r * r) - 1
# least squares fit
C, residules, rank, singval = np.linalg.lstsq(A, f)
# solve for radius in z-dir.
r_z = math.sqrt(C[0])
return r_z
def fit_ellipsoid_non_linear(X, Y, Z):
"""
Non-linear regression + optimization to find ellipsoid parameters.
Reference: https://jekel.me/2021/A-better-way-to-fit-Ellipsoids/
:param X:
:param Y:
:param Z:
:return:
"""
x, y, z = X, Y, Z
pass
def fit_smooth_surface(df, z_param='z'):
"""
Uses the 'smooth_surface' fit function on a dataframe.
:param df:
:return:
"""
# convert data into proper format
x_data = df.x.to_numpy()
y_data = df.y.to_numpy()
z_data = df[z_param].to_numpy()
data = [x_data, y_data]
# get fit parameters from scipy curve fit
fittedParameters, covariance = curve_fit(functions.smooth_surface, data, z_data)
# --- calculate prediction errors
rmse, r_squared = calculate_fit_error(fit_results=None,
data_fit_to=z_data,
fit_func=functions.smooth_surface,
fit_params=fittedParameters,
data_fit_on=data,
)
# deprecated prediction errors
"""modelPredictions = functions.smooth_surface(data, *fittedParameters)
absError = modelPredictions - z_data
SE = np.square(absError) # squared errors
MSE = np.mean(SE) # mean squared errors
RMSE = np.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (np.var(absError) / np.var(z_data))"""
return rmse, r_squared, fittedParameters
# ----------------------------------------------- HELPER FUNCTIONS -----------------------------------------------------
def calculate_fit_error(fit_results, data_fit_to, fit_func=None, fit_params=None, data_fit_on=None):
"""
Two options for calculating fit error:
1. fit_func + fit_params: the fit results are calculated.
2. fit_results: the fit results are known for each data point.
:param fit_func: the function used to calculate the fit.
:param fit_params: generally, popt.
:param fit_results: the outputs at each input data point ('data_fit_on')
:param data_fit_on: the input data that was inputted to fit_func to generate the fit.
:param data_fit_to: the output data that fit_func was fit to.
:return:
"""
# --- calculate prediction errors
if fit_results is None:
fit_results = fit_func(data_fit_on, *fit_params)
abs_error = fit_results - data_fit_to
se = np.square(abs_error) # squared errors
mse = np.mean(se) # mean squared errors
rmse = np.sqrt(mse) # Root Mean Squared Error, RMSE
r_squared = 1.0 - (np.var(abs_error) / np.var(data_fit_to))
return rmse, r_squared
# ----------------------------------------------- DEPRECATED FUNCTIONS -------------------------------------------------
def fit_dficts(dficts, fit_function=None, xparameter=None, yparameter='z'):
popts = []
pcovs = []
for name, df in dficts.items():
# throw out NaNs
df = df.dropna(axis=0, subset=[yparameter])
# get x- and y-arrays
if xparameter is None:
x = df.index
else:
x = df[xparameter]
y = df[yparameter]
# fit the function
if fit_function is None:
popt, pcov = curve_fit(functions.parabola, x, y)
fit_function = functions.parabola
else:
popt, pcov = curve_fit(fit_function, x, y)
popts.append(popt)
pcovs.append(pcov)
return popts, pcovs, fit_function | 2.375 | 2 |
examples/gt.py | bhatiadivij/kgtk | 0 | 12788609 | <gh_stars>0
import kgtk.gt.io_utils as gtio
import kgtk.gt.analysis_utils as gtanalysis
#datadir='data/'
mowgli_nodes=f'{datadir}nodes_v002.csv'
mowgli_edges=f'{datadir}edges_v002.csv'
output_gml=f'{datadir}graph.graphml'
gtio.transform_to_graphtool_format(mowgli_nodes, mowgli_edges, output_gml, True)
g=gtio.load_gt_graph(output_gml.replace(".graphml", '.gt'))
print(g.num_edges())
| 2.234375 | 2 |
transforms/__init__.py | arpastrana/coronary-mesh-convolution | 26 | 12788610 | <reponame>arpastrana/coronary-mesh-convolution
from .feature_descriptors import FeatureDescriptors
from .flow_extensions import RemoveFlowExtensions
from .geodesics import InletGeodesics
from .heat_sampling import HeatSamplingCluster
from .rotation import RandomRotation
| 0.601563 | 1 |
tests/integrations/config/session.py | josephmancuso/masonite | 35 | 12788611 | import os
DRIVERS = {
"default": "cookie",
"cookie": {},
"redis": {
"host": "127.0.0.1",
"port": 6379,
"password": "",
"options": {"db": 1}, # redis module driver specific options
"timeout": 60*60,
"namespace": "masonite4",
},
}
| 1.78125 | 2 |
source/Screen.py | dibala21/Ergocycle | 0 | 12788612 | <filename>source/Screen.py<gh_stars>0
# Screen class
# Imports (libraries)
import sys
import math
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QPushButton
# Imports (classes)
from Menu import Menu
class Screen:
# Constuctor
def __init__(self, function_dictionary):
print("THIS IS A TEST")
self.width = 640
self.height = 480
self.speed = 100
self.application = QApplication([])
self.window = QWidget()
self.window.setWindowTitle('Test interface graphique')
self.window.setFixedWidth(self.width)
self.window.setFixedHeight(self.height)
layout = QHBoxLayout()
layout.addWidget(QLabel('Amplitude:'))
self.amplitude_edit = QLineEdit()
layout.addWidget(self.amplitude_edit)
self.send_button = QPushButton("Commander amplitude")
layout.addWidget(self.send_button)
self.test_button = QPushButton("Tester événements")
layout.addWidget(self.test_button)
# Connect before the show or the exec
print("Widgets:")
for i in range(0, layout.count()):
widget = layout.itemAt(i).widget()
#print(widget)
#print(type(widget))
if type(widget) is QPushButton:
# print the push buttons labels
#print(widget.text())
label = widget.text()
if label in function_dictionary:
widget.clicked.connect(function_dictionary[label])
print("CONNECTED BUTTON TO ERGOCYCLE")
self.window.setLayout(layout)
self.window.show()
def start_application(self):
#sys.exit(self.app.exec_())
self.application.exec_()
def get_amplitude(self):
return self.amplitude_edit.text()
| 2.796875 | 3 |
carball/json_parser/actor/team.py | unitedroguegg/carball | 119 | 12788613 | <reponame>unitedroguegg/carball
from .ball import *
class TeamHandler(BaseActorHandler):
@classmethod
def can_handle(cls, actor: dict) -> bool:
return actor['ClassName'] == 'TAGame.Team_Soccar_TA'
def update(self, actor: dict, frame_number: int, time: float, delta: float) -> None:
self.parser.team_dicts[actor['Id']] = actor
self.parser.team_dicts[actor['Id']]['colour'] = 'blue' if actor["TypeName"] == "Archetypes.Teams.Team0" else \
'orange'
| 2.359375 | 2 |
zongheng/zongheng_lib/machine.py | meetbill/zongheng | 0 | 12788614 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import g
def show_machines():
ml = load_machine_list()
for m in ml:
print m
def load_machine_list(skip_ban=True):
client = g.mongo_client()
db = client.cap
cursor = db.machines.find()
results = []
ban_list = load_banned_machine_list()
for s in cursor:
if skip_ban:
if s["host"] not in ban_list:
results.append(s)
else:
results.append(s)
return results
def save_machine_list(machines):
client = g.mongo_client()
db = client.cap
db.machines.drop()
db.machines.insert(machines)
def update_machine_memory(host, mem_used):
client = g.mongo_client()
db = client.cap
db.machines.update({"host": host}, {"$inc": {"mem_used": mem_used}})
def load_banned_machine_list():
client = g.mongo_client()
db = client.cap
cursor = db.banned_machines.find()
results = []
for s in cursor:
results.append(s["host"])
return results
def save_banned_machine_list(hosts):
client = g.mongo_client()
db = client.cap
for host in hosts:
db.banned_machines.insert({"host": host})
def remove_banned_machine_list(hosts):
client = g.mongo_client()
db = client.cap
for host in hosts:
db.banned_machines.remove({"host": host})
def add_banned_machine_list(hosts):
save_banned_machine_list(hosts)
def update_machine_list(raw_file):
"""
格式可变,目前格式是<host> <mem_total> <mem_used>
"""
machines = []
for line in open(raw_file, "r"):
xs = line.strip().split(" ")
host = g.ip2hostname(xs[0])
host = host[:-10]
machine_info = {
"host": host,
"mem_total": int(xs[1]), # in MB
"mem_used": int(xs[2]), # in MB
"plan": g.pick_std_plan(xs[1]),
"idc": g.idc(host),
"logic_machine_room": g.logic_machine_room(host),
"machine_room": g.machine_room(host),
}
machines.append(machine_info)
save_machine_list(machines)
return machines
| 2.921875 | 3 |
app/main.py | hesusruiz/Canispy | 0 | 12788615 | # Standard python library
import logging
# The Fastapi web server
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
# Import uvicorn for debugging
import uvicorn
# The settings for the system
from settings import settings
# Acces to the bockchain
from blockchain import trustframework as tf
# Create logger
logging.basicConfig(
format='%(levelname)s - %(asctime)s - %(message)s', level=logging.INFO)
log = logging.getLogger(__name__)
# Create the FastAPi server
app = FastAPI(
title="FIWARE Canis Major with EBSI/Alastria APIs",
description="FIWARE blockchain integration with SSI and Verifiable Credentials with interoperability EBSI-Alastria Red T",
version="0.2.0",
openapi_url="/api/v1/openapi.json",
)
# The routes for the Canis Major NGSI API functionality
from routers import ngsi_api
app.include_router(ngsi_api.router)
# The routes for Resolver APIs
from routers import resolver_api
app.include_router(resolver_api.router, include_in_schema=True)
# The routes for Issuer APIs
from routers import issuer_api
app.include_router(issuer_api.router, include_in_schema=False)
# The route for Verifying a credential
from routers import verify_credential_api
app.include_router(verify_credential_api.router, include_in_schema=False)
# Support for API keys to secure invocations of APIs
from fastapi_simple_security import api_key_router
app.include_router(api_key_router, prefix="/auth", tags=["API-key Authorization"])
# APIs to check for server health
from routers import server_health
app.include_router(server_health.router, include_in_schema=True)
# APIS to implement a simple, fast a secure messaging server
from routers import secure_messaging_router
app.include_router(secure_messaging_router.router, include_in_schema=False)
# For serving static assets.
# Should be the last route added because it is serving the root ("/")
#app.mount("/", StaticFiles(directory="static", html=True), name="static")
# Template directory for dynamic HTML pages
templates = Jinja2Templates(directory="templates")
# Perform startup processing
@app.on_event("startup")
async def startup_event():
"""Connect to blockchain when starting the server"""
log.info("######### Configuration values #########")
if settings.PRODUCTION:
log.info(f"Running in PRODUCTION")
else:
log.info(f"Running in DEVELOPMENT")
log.info(f"Current directory: {settings.INITIAL_DIR}")
log.info(f"SmartContract source dir: {settings.CONTRACTS_DIR}")
log.info(f"SmartContract binary dir: {settings.CONTRACTS_OUTPUT_DIR}")
log.info(f"Blockchain IP: {settings.BLOCKCHAIN_NODE_IP}")
log.info(f"Database Dir: {settings.DATABASE_DIR}")
tf.connect_blockchain(settings.BLOCKCHAIN_NODE_IP)
log.info(f"Connected to the blockchain provider")
log.info("########################################")
# This is for running the server in test mode
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
| 2.015625 | 2 |
contest_questions/Nth_no_prime.py | mukul20-21/python_datastructure | 0 | 12788616 | import math
num = int(input("Enter the number:"))
try:
result = math.factorial(num)
print(result)
except:
print("factorial is not print for negative number") | 4.125 | 4 |
sra2variant/VCF2CSV.py | wuaipinglab/sra2variant | 0 | 12788617 | <filename>sra2variant/VCF2CSV.py
import sys
import pathlib
from sra2variant.pipeline.cmd_wrapper import ErrorTolerance
from sra2variant.artifacts.base_file import _FileArtifacts
from sra2variant.artifacts.vcf_file import init_vcf_file
from sra2variant.vcfparser.vcf2csv import vcf2csv
from sra2variant.cmdutils.parser_maker import base_parser
from sra2variant.cmdutils.parser_checker import check_base_parser
def main(sysargs=sys.argv[1:]):
parser = base_parser("Convert VCF to CSV")
params, _ = check_base_parser(sysargs, parser)
vcf_dir = params["input_dir"]
csv_dir = params["csv_dir"]
error_dir = params["error_dir"]
max_errors = params["max_errors"]
ErrorTolerance.set_max_errors(max_errors)
for vcf_file in pathlib.Path(vcf_dir).glob(f"**/*.vcf"):
vcf_file: _FileArtifacts = init_vcf_file(
vcf_file=vcf_file,
csv_dir=csv_dir
)
task_log_file = vcf_file.log_file()
try:
vcf2csv(vcf_file)
except Exception as e:
error_tolerance = ErrorTolerance(error_dir, task_log_file)
error_tolerance.handle(e)
| 2.546875 | 3 |
dfme/dfme/approximate_gradients.py | cleverhans-lab/model-extraction-iclr | 0 | 12788618 | <reponame>cleverhans-lab/model-extraction-iclr<filename>dfme/dfme/approximate_gradients.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.linalg
import matplotlib.pyplot as plt
import network
from tqdm import tqdm
import torchvision.models as models
import torchvision.transforms as transforms
from time import time
import socket
from time import sleep
import pickle
HOST = '127.0.0.1' # The server's hostname or IP address
PORT = 65432 # The port used by the server
server_address = (HOST, PORT)
def estimate_gradient_objective(args, victim_model, clone_model, x, epsilon = 1e-7, m = 5, verb=False, num_classes=10, device = "cpu", pre_x=False):
# Sampling from unit sphere is the method 3 from this website:
# http://extremelearning.com.au/how-to-generate-uniformly-random-points-on-n-spheres-and-n-balls/
#x = torch.Tensor(np.arange(2*1*7*7).reshape(-1, 1, 7, 7))
if pre_x and args.G_activation is None:
raise ValueError(args.G_activation)
clone_model.eval()
victim_model.eval()
timequery = 0
quers = None
if args.usenetwork == "yes":
usenetwork = True
else:
usenetwork = False
with torch.no_grad():
# Sample unit noise vector
N = x.size(0)
C = x.size(1)
S = x.size(2)
dim = S**2 * C
u = np.random.randn(N * m * dim).reshape(-1, m, dim) # generate random points from normal distribution
d = np.sqrt(np.sum(u ** 2, axis = 2)).reshape(-1, m, 1) # map to a uniform distribution on a unit sphere
u = torch.Tensor(u / d).view(-1, m, C, S, S)
u = torch.cat((u, torch.zeros(N, 1, C, S, S)), dim = 1) # Shape N, m + 1, S^2
u = u.view(-1, m + 1, C, S, S)
evaluation_points = (x.view(-1, 1, C, S, S).cpu() + epsilon * u).view(-1, C, S, S)
if pre_x:
evaluation_points = args.G_activation(evaluation_points) # Apply args.G_activation function
# Compute the approximation sequentially to allow large values of m
pred_victim = []
pred_clone = []
max_number_points = 32*156 # Hardcoded value to split the large evaluation_points tensor to fit in GPU
for i in (range(N * m // max_number_points + 1)):
pts = evaluation_points[i * max_number_points: (i+1) * max_number_points]
if args.dataset == "mnist" or args.dataset == "fashion-mnist":
x = []
for blah in range(pts.size(0)):
temp = pts[blah]
temp = transforms.Compose([
transforms.ToPILImage(),
transforms.Grayscale(num_output_channels=1),transforms.ToTensor(),])(temp)
x.append(temp.reshape(1,1,32,32))
pts2 = torch.cat(x)
pts = pts.to(device)
pts2 = pts2.to(device)
starttime = time()
if usenetwork:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(server_address)
try:
datastr = pickle.dumps(pts2)
sock.sendall(datastr)
sleep(0.1)
str = "done"
sock.sendall(str.encode())
data = []
while True:
packet = sock.recv(4096)
if packet == b'donesend':
break
if not packet or packet == b'done': break
data.append(packet)
# output = sock.recv(4096)
# output = pickle.loads(output)
pred_victim_pts = pickle.loads(b"".join(data))
sleep(0.1)
str = "doneiter"
sock.sendall(str.encode())
finally:
sock.close()
else:
pred_victim_pts = victim_model(pts2).detach() # pts converted to grayscale as pts2 since victim_model is for mnist
endtime = time()
if quers == None:
quers = pts2
else:
quers = torch.cat((quers, pts2), dim=0)
timequery += endtime - starttime
else:
# TODO: Add server client here as well
pts = pts.to(device)
starttime = time()
pred_victim_pts = victim_model(pts).detach()
if quers == None:
quers = pts
else:
quers = torch.cat((quers, pts), dim=0)
endtime = time()
timequery += endtime - starttime
pred_clone_pts = clone_model(pts)
pred_victim.append(pred_victim_pts)
pred_clone.append(pred_clone_pts)
pred_victim = torch.cat(pred_victim, dim=0).to(device)
pred_victim2 = pred_victim
pred_clone = torch.cat(pred_clone, dim=0).to(device)
u = u.to(device)
if args.loss == "l1":
loss_fn = F.l1_loss
if args.no_logits:
pred_victim = F.log_softmax(pred_victim, dim=1).detach()
if args.logit_correction == 'min':
pred_victim -= pred_victim.min(dim=1).values.view(-1, 1).detach()
elif args.logit_correction == 'mean':
pred_victim -= pred_victim.mean(dim=1).view(-1, 1).detach()
elif args.loss == "kl":
loss_fn = F.kl_div
pred_clone = F.log_softmax(pred_clone, dim=1)
pred_victim = F.softmax(pred_victim.detach(), dim=1)
else:
raise ValueError(args.loss)
# Compute loss
if args.loss == "kl":
loss_values = - loss_fn(pred_clone, pred_victim, reduction='none').sum(dim = 1).view(-1, m + 1)
else:
loss_values = - loss_fn(pred_clone, pred_victim, reduction='none').mean(dim = 1).view(-1, m + 1)
# Compute difference following each direction
differences = loss_values[:, :-1] - loss_values[:, -1].view(-1, 1)
differences = differences.view(-1, m, 1, 1, 1)
# Formula for Forward Finite Differences
gradient_estimates = 1 / epsilon * differences * u[:, :-1]
if args.forward_differences:
gradient_estimates *= dim
if args.loss == "kl":
gradient_estimates = gradient_estimates.mean(dim = 1).view(-1, C, S, S)
else:
gradient_estimates = gradient_estimates.mean(dim = 1).view(-1, C, S, S) / (num_classes * N)
clone_model.train()
loss_G = loss_values[:, -1].mean()
return gradient_estimates.detach(), loss_G, pred_victim2, quers, timequery
def compute_gradient(args, victim_model, clone_model, x, pre_x=False, device="cpu"):
if pre_x and args.G_activation is None:
raise ValueError(args.G_activation)
clone_model.eval()
N = x.size(0)
x_copy = x.clone().detach().requires_grad_(True)
x_ = x_copy.to(device)
if pre_x:
x_ = args.G_activation(x_)
if args.dataset == 'mnist' or args.dataset == "fashion-mnist":
cur = []
for blah in range(N):
temp = x_[blah]
temp = transforms.Compose([
transforms.ToPILImage(),
transforms.Grayscale(num_output_channels=1), transforms.ToTensor(), ])(temp)
cur.append(temp.reshape(1, 1, 32, 32))
x_2 = torch.cat(cur)
# print(pts.size())
x_2 = x_2.to(device)
pred_victim = victim_model(x_2)
pred_clone = clone_model(x_)
else:
pred_victim = victim_model(x_)
pred_clone = clone_model(x_)
if args.loss == "l1":
loss_fn = F.l1_loss
if args.no_logits:
pred_victim_no_logits = F.log_softmax(pred_victim, dim=1)
if args.logit_correction == 'min':
pred_victim = pred_victim_no_logits - pred_victim_no_logits.min(dim=1).values.view(-1, 1)
elif args.logit_correction == 'mean':
pred_victim = pred_victim_no_logits - pred_victim_no_logits.mean(dim=1).view(-1, 1)
else:
pred_victim = pred_victim_no_logits
elif args.loss == "kl":
loss_fn = F.kl_div
pred_clone = F.log_softmax(pred_clone, dim=1)
pred_victim = F.softmax(pred_victim, dim=1)
else:
raise ValueError(args.loss)
loss_values = -loss_fn(pred_clone, pred_victim, reduction='mean')
# print("True mean loss", loss_values)
loss_values.backward()
clone_model.train()
return x_copy.grad, loss_values
class Args(dict):
def __init__(self, **args):
for k,v in args.items():
self[k] = v
def get_classifier(classifier, pretrained=True, resnet34_8x_file=None, num_classes=10):
if classifier == "none":
return NullTeacher(num_classes=num_classes)
else:
raise ValueError("Only Null Teacher should be used")
if classifier == 'vgg11_bn':
return vgg11_bn(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'vgg13_bn':
return vgg13_bn(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'vgg16_bn':
return vgg16_bn(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'vgg19_bn':
return vgg19_bn(pretrained=pretrained, num_classes=num_classes)
if classifier == 'vgg11':
return models.vgg11(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'vgg13':
return models.vgg13(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'vgg16':
return models.vgg16(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'vgg19':
return models.vgg19(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'resnet18':
return resnet18(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'resnet34':
return resnet34(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'resnet50':
return resnet50(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'densenet121':
return densenet121(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'densenet161':
return densenet161(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'densenet169':
return densenet169(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'mobilenet_v2':
return mobilenet_v2(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'googlenet':
return googlenet(pretrained=pretrained, num_classes=num_classes)
elif classifier == 'inception_v3':
return inception_v3(pretrained=pretrained, num_classes=num_classes)
elif classifier == "resnet34_8x":
net = network.resnet_8x.ResNet34_8x(num_classes=num_classes)
if pretrained:
if resnet34_8x_file is not None:
net.load_state_dict( torch.load( resnet34_8x_file) )
else:
raise ValueError("Cannot load pretrained resnet34_8x from here")
return net
else:
raise NameError(f'Please enter a valid classifier {classifier}')
classifiers = [
"resnet34_8x", # Default DFAD
# "vgg11",
# "vgg13",
# "vgg16",
# "vgg19",
"vgg11_bn",
"vgg13_bn",
"vgg16_bn",
"vgg19_bn",
"resnet18",
"resnet34",
"resnet50",
"densenet121",
"densenet161",
"densenet169",
"mobilenet_v2",
"googlenet",
"lenet5"
"inception_v3",
] | 2.484375 | 2 |
Image_classification_sorter.py | OpenVessel/RedTinSaintBernard-for-BraTS2021-challenge | 0 | 12788619 | # https://www.tensorflow.org/tutorials/images/classification
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
#Load PNGs using image_dataset_from_directory - untility
#PNG to tf.data.Dataset our folder system
## to png
## then call DataSet
# main_directory/
# ...class_a/
# ......a_image_1.jpg
# ......a_image_2.jpg
# ...class_b/
# ......b_image_1.jpg
# ......b_image_2.jpg
# https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image_dataset_from_directory
# https://www.tensorflow.org/tutorials/load_data/images
## CREATE datasett
batch_size = 32
img_height = 180
img_width = 180 | 3.125 | 3 |
keras/mlflow/model/pipeline_train.py | PipelineAI/models | 44 | 12788620 | <reponame>PipelineAI/models<filename>keras/mlflow/model/pipeline_train.py
"""
Example of image classification with MLflow using Keras to classify flowers from photos. The data is
taken from ``http://download.tensorflow.org/example_images/flower_photos.tgz`` and may be
downloaded during running this project if it is missing.
"""
import math
import os
import time
import click
import keras
from keras.utils import np_utils
from keras.models import Model
from keras.callbacks import Callback
from keras.applications import vgg16
from keras.layers import Input, Dense, Flatten, Lambda
import numpy as np
from sklearn.model_selection import train_test_split
import tensorflow as tf
import mlflow
from image_pyfunc import decode_and_resize_image, log_model, KerasImageClassifierPyfunc
from datetime import datetime
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
#from utils import plot_history
#import matplotlib.pyplot as plt
#
# See https://nbviewer.jupyter.org/github/WillKoehrsen/Data-Analysis/blob/master/slack_interaction/Interacting%20with%20Slack.ipynb for more details.
#
class SlackUpdate(Callback):
"""Custom Keras callback that posts to Slack while training a neural network"""
def __init__(self,
channel,
slack_webhook_url):
self.channel = channel
self.slack_webhook_url = slack_webhook_url
def file_upload(self,
path,
title):
pass
def report_stats(self, text):
"""Report training stats"""
import subprocess
try:
cmd = 'curl -X POST --data-urlencode "payload={\\"unfurl_links\\": true, \\"channel\\": \\"%s\\", \\"username\\": \\"pipelineai_bot\\", \\"text\\": \\"%s\\"}" %s' % (self.channel, text, self.slack_webhook_url)
response = subprocess.check_output(cmd, shell=True).decode('utf-8')
return True
except:
return False
def on_train_begin(self, logs={}):
from timeit import default_timer as timer
self.report_stats(text=f'Training started at {datetime.now()}')
self.start_time = timer()
self.train_acc = []
self.valid_acc = []
self.train_loss = []
self.valid_loss = []
self.n_epochs = 0
def on_epoch_end(self, batch, logs={}):
self.train_acc.append(logs.get('acc'))
self.valid_acc.append(logs.get('val_acc'))
self.train_loss.append(logs.get('loss'))
self.valid_loss.append(logs.get('val_loss'))
self.n_epochs += 1
message = f'Epoch: {self.n_epochs} Training Loss: {self.train_loss[-1]:.4f} Validation Loss: {self.valid_loss[-1]:.4f}'
self.report_stats(message)
def on_train_end(self, logs={}):
best_epoch = np.argmin(self.valid_loss)
valid_loss = self.valid_loss[best_epoch]
train_loss = self.train_loss[best_epoch]
train_acc = self.train_acc[best_epoch]
valid_acc = self.valid_acc[best_epoch]
message = f'Trained for {self.n_epochs} epochs. Best epoch was {best_epoch + 1}.'
self.report_stats(message)
message = f'Best validation loss = {valid_loss:.4f} Training Loss = {train_loss:.2f} Validation accuracy = {100*valid_acc:.2f}%'
self.report_stats(message)
def on_train_batch_begin(self, batch, logs={}):
pass
def on_train_batch_end(self, batch, logs={}):
pass
def download_input():
import requests
url = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
print("downloading '{}' into '{}'".format(url, os.path.abspath("flower_photos.tgz")))
r = requests.get(url)
with open('flower_photos.tgz', 'wb') as f:
f.write(r.content)
import tarfile
print("decompressing flower_photos.tgz to '{}'".format(os.path.abspath("flower_photos")))
with tarfile.open("flower_photos.tgz") as tar:
tar.extractall(path="./")
@click.command(help="Trains an Keras model on flower_photos dataset."
"The input is expected as a directory tree with pictures for each category in a"
" folder named by the category."
"The model and its metrics are logged with mlflow.")
@click.option("--epochs", type=click.INT, default=1, help="Maximum number of epochs to evaluate.")
@click.option("--batch-size", type=click.INT, default=16,
help="Batch size passed to the learning algo.")
@click.option("--image-width", type=click.INT, default=224, help="Input image width in pixels.")
@click.option("--image-height", type=click.INT, default=224, help="Input image height in pixels.")
@click.option("--seed", type=click.INT, default=97531, help="Seed for the random generator.")
@click.option("--training-data", type=click.STRING, default='./flower_photos')
@click.option("--test-ratio", type=click.FLOAT, default=0.2)
def run(training_data, test_ratio, epochs, batch_size, image_width, image_height, seed):
image_files = []
labels = []
domain = {}
print("Training model with the following parameters:")
for param, value in locals().items():
print(" ", param, "=", value)
if training_data == "./flower_photos" and not os.path.exists(training_data):
print("Input data not found, attempting to download the data from the web.")
download_input()
for (dirname, _, files) in os.walk(training_data):
for filename in files:
if filename.endswith("jpg"):
image_files.append(os.path.join(dirname, filename))
clazz = os.path.basename(dirname)
if clazz not in domain:
domain[clazz] = len(domain)
labels.append(domain[clazz])
train(image_files, labels, domain,
epochs=epochs,
test_ratio=test_ratio,
batch_size=batch_size,
image_width=image_width,
image_height=image_height,
seed=seed)
class MLflowLogger(Callback):
"""
Keras callback for logging metrics and final model with MLflow.
Metrics are logged after every epoch. The logger keeps track of the best model based on the
validation metric. At the end of the training, the best model is logged with MLflow.
"""
def __init__(self, model, x_train, y_train, x_valid, y_valid,
**kwargs):
self._model = model
self._best_val_loss = math.inf
self._train = (x_train, y_train)
self._valid = (x_valid, y_valid)
self._pyfunc_params = kwargs
self._best_weights = None
def on_epoch_end(self, epoch, logs=None):
"""
Log Keras metrics with MLflow. Update the best model if the model improved on the validation
data.
"""
if not logs:
return
for name, value in logs.items():
if name.startswith("val_"):
name = "valid_" + name[4:]
else:
name = "train_" + name
mlflow.log_metric(name, value)
val_loss = logs["val_loss"]
if val_loss < self._best_val_loss:
# Save the "best" weights
self._best_val_loss = val_loss
self._best_weights = [x.copy() for x in self._model.get_weights()]
def on_train_end(self, *args, **kwargs):
"""
Log the best model with MLflow and evaluate it on the train and validation data so that the
metrics stored with MLflow reflect the logged model.
"""
self._model.set_weights(self._best_weights)
x, y = self._train
train_res = self._model.evaluate(x=x, y=y)
for name, value in zip(self._model.metrics_names, train_res):
mlflow.log_metric("train_{}".format(name), value)
x, y = self._valid
valid_res = self._model.evaluate(x=x, y=y)
for name, value in zip(self._model.metrics_names, valid_res):
mlflow.log_metric("valid_{}".format(name), value)
log_model(keras_model=self._model, **self._pyfunc_params)
def on_train_batch_begin(self, batch, logs={}):
pass
def on_train_batch_end(self, batch, logs={}):
pass
def _imagenet_preprocess_tf(x):
return (x / 127.5) - 1
def _create_model(input_shape, classes):
image = Input(input_shape)
lambda_layer = Lambda(_imagenet_preprocess_tf)
preprocessed_image = lambda_layer(image)
model = vgg16.VGG16(classes=classes,
input_tensor=preprocessed_image,
weights=None,
include_top=False)
x = Flatten(name='flatten')(model.output)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
return Model(inputs=model.input, outputs=x)
def train(image_files,
labels,
domain,
image_width=224,
image_height=224,
epochs=1,
batch_size=16,
test_ratio=0.2,
seed=None):
"""
Train VGG16 model on provided image files. This will create a new MLflow run and log all
parameters, metrics and the resulting model with MLflow. The resulting model is an instance
of KerasImageClassifierPyfunc - a custom python function model that embeds all necessary
preprocessing together with the VGG16 Keras model. The resulting model can be applied
directly to image base64 encoded image data.
:param image_height: Height of the input image in pixels.
:param image_width: Width of the input image in pixels.
:param image_files: List of image files to be used for training.
:param labels: List of labels for the image files.
:param domain: Dictionary representing the domain of the reponse.
Provides mapping label-name -> label-id.
:param epochs: Number of epochs to train the model for.
:param batch_size: Batch size used during training.
:param test_ratio: Fraction of dataset to be used for validation. This data will not be used
during training.
:param seed: Random seed. Used e.g. when splitting the dataset into train / validation.
"""
assert len(set(labels)) == len(domain)
input_shape = (image_width, image_height, 3)
#mlflow.set_tracking_uri('http://mlflow-tracking-host:port')
# This will create and set the experiment
mlflow.set_experiment(str(int(time.time()))[2:] + 'flower-v1')
with mlflow.start_run() as run:
mlflow.log_param("epochs", str(epochs))
mlflow.log_param("batch_size", str(batch_size))
mlflow.log_param("validation_ratio", str(test_ratio))
if seed:
mlflow.log_param("seed", str(seed))
def _read_image(filename):
with open(filename, "rb") as f:
return f.read()
with tf.Graph().as_default() as g:
with tf.Session(graph=g).as_default():
dims = input_shape[:2]
x = np.array([decode_and_resize_image(_read_image(x), dims)
for x in image_files])
y = np_utils.to_categorical(np.array(labels), num_classes=len(domain))
train_size = 1 - test_ratio
x_train, x_valid, y_train, y_valid = train_test_split(x, y, random_state=seed,
train_size=train_size)
model = _create_model(input_shape=input_shape, classes=len(domain))
model.compile(
optimizer=keras.optimizers.SGD(decay=1e-5, nesterov=True, momentum=.9),
loss=keras.losses.categorical_crossentropy,
metrics=["accuracy"])
sorted_domain = sorted(domain.keys(), key=lambda x: domain[x])
slack_update = SlackUpdate(channel='#slack-after-dark',
slack_webhook_url='https://hooks.slack.com/services/T/B/G')
history = model.fit(
x=x_train,
y=y_train,
validation_data=(x_valid, y_valid),
epochs=epochs,
batch_size=batch_size,
callbacks=[MLflowLogger(model=model,
x_train=x_train,
y_train=y_train,
x_valid=x_valid,
y_valid=y_valid,
artifact_path="model",
domain=sorted_domain,
image_dims=input_shape),
slack_update])
# From the following: https://keras.io/visualization/
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
plt.savefig('training_accuracy.png')
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# plot_history(history.history)
plt.savefig('training_loss.png')
#slack_update.file_upload(path='training_charts.png',
# title='Charts')
if __name__ == '__main__':
run()
| 2.984375 | 3 |
bindings/python/scripts/__init__.py | matthieuvigne/pinocchio | 1 | 12788621 | #
# Copyright (c) 2015-2016,2018 CNRS
#
import numpy as np
from pinocchio.robot_wrapper import RobotWrapper
from . import libpinocchio_pywrap as pin
from . import utils
from .explog import exp, log
from .libpinocchio_pywrap import *
from .deprecated import *
from .shortcuts import *
pin.AngleAxis.__repr__ = lambda s: 'AngleAxis(%s)' % s.vector()
| 1.734375 | 2 |
calculate_single_lstm_cell_error.py | placibo/LSTM | 0 | 12788622 | <reponame>placibo/LSTM
#calculate error for single lstm cell
def calculate_single_lstm_cell_error(activation_output_error,next_activation_error,next_cell_error,parameters,lstm_activation,cell_activation,prev_cell_activation):
#activation error = error coming from output cell and error coming from the next lstm cell
activation_error = activation_output_error + next_activation_error
#output gate error
oa = lstm_activation['oa']
eo = np.multiply(activation_error,tanh_activation(cell_activation))
eo = np.multiply(np.multiply(eo,oa),1-oa)
#cell activation error
cell_error = np.multiply(activation_error,oa)
cell_error = np.multiply(cell_error,tanh_derivative(tanh_activation(cell_activation)))
#error also coming from next lstm cell
cell_error += next_cell_error
#input gate error
ia = lstm_activation['ia']
ga = lstm_activation['ga']
ei = np.multiply(cell_error,ga)
ei = np.multiply(np.multiply(ei,ia),1-ia)
#gate gate error
eg = np.multiply(cell_error,ia)
eg = np.multiply(eg,tanh_derivative(ga))
#forget gate error
fa = lstm_activation['fa']
ef = np.multiply(cell_error,prev_cell_activation)
ef = np.multiply(np.multiply(ef,fa),1-fa)
#prev cell error
prev_cell_error = np.multiply(cell_error,fa)
#get parameters
fgw = parameters['fgw']
igw = parameters['igw']
ggw = parameters['ggw']
ogw = parameters['ogw']
#embedding + hidden activation error
embed_activation_error = np.matmul(ef,fgw.T)
embed_activation_error += np.matmul(ei,igw.T)
embed_activation_error += np.matmul(eo,ogw.T)
embed_activation_error += np.matmul(eg,ggw.T)
input_hidden_units = fgw.shape[0]
hidden_units = fgw.shape[1]
input_units = input_hidden_units - hidden_units
#prev activation error
prev_activation_error = embed_activation_error[:,input_units:]
#input error (embedding error)
embed_error = embed_activation_error[:,:input_units]
#store lstm error
lstm_error = dict()
lstm_error['ef'] = ef
lstm_error['ei'] = ei
lstm_error['eo'] = eo
lstm_error['eg'] = eg
return prev_activation_error,prev_cell_error,embed_error,lstm_error | 2.703125 | 3 |
chapter 3/sampleCode6.py | DTAIEB/Thoughtful-Data-Science | 15 | 12788623 | <gh_stars>10-100
[[GitHubTracking]]
@route(query="*")
@templateArgs
def do_search(self, query):
self.first_url = "https://api.github.com/search/repositories?q={}".format(query)
self.prev_url = None
self.next_url = None
self.last_url = None
response = requests.get(self.first_url)
if not response.ok:
return "<div>An Error occurred: {{response.text}}</div>"
total_count = response.json()['total_count']
self.next_url = response.links.get('next', {}).get('url', None)
self.last_url = response.links.get('last', {}).get('url', None)
return """
<h1><center>{{total_count}} repositories were found</center></h1>
<ul class="pagination">
<li><a href="#" pd_options="page=first_url" pd_target="body{{prefix}}">First</a></li>
<li><a href="#" pd_options="page=prev_url" pd_target="body{{prefix}}">Prev</a></li>
<li><a href="#" pd_options="page=next_url" pd_target="body{{prefix}}">Next</a></li>
<li><a href="#" pd_options="page=last_url" pd_target="body{{prefix}}">Last</a></li>
</ul>
<table class="table">
<thead>
<tr>
<th>Repo Name</th>
<th>Lastname</th>
<th>URL</th>
<th>Stars</th>
</tr>
</thead>
<tbody id="body{{prefix}}">
{{this.invoke_route(this.do_retrieve_page, page='first_url')}}
</tbody>
</table>
"""
| 2.75 | 3 |
tests/auth_tests/test_apps.py | carlospalol/django | 1 | 12788624 | <filename>tests/auth_tests/test_apps.py
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from django.db import ConnectionHandler
SETTINGS = """
SECRET_KEY = 'django_auth_tests_secret_key'
INSTALLED_APPS = [
'django.contrib.auth.apps.BaseAuthConfig',
'django.contrib.contenttypes',
]
MIGRATION_MODULES = {'auth': None}
DATABASES = %(databases)r
"""
class AppConfigTests(unittest.TestCase):
def test_no_migrations(self):
project_path = tempfile.mkdtemp()
try:
databases = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(project_path, 'db.sqlite3'),
}
}
with open(os.path.join(project_path, 'no_migrations.py'), 'w') as fp:
fp.write(SETTINGS % {'databases': databases})
with open(os.devnull, 'wb') as devnull:
cmd = [
sys.executable,
'-m', 'django',
'migrate',
'--settings', 'no_migrations',
'--pythonpath', project_path,
]
returncode = subprocess.call(cmd, stdout=devnull, stderr=devnull)
# Migrate command ran without errors.
self.assertEqual(returncode, 0)
# Auth tables weren't created.
conns = ConnectionHandler(databases)
try:
self.assertEqual(
set(conns['default'].introspection.table_names()),
{'django_content_type', 'django_migrations'},
)
finally:
conns.close_all()
finally:
shutil.rmtree(project_path)
| 2.328125 | 2 |
Assignments/CH485---Artificial-Intelligence-and-Chemistry-master/Practice 06/gcn_logP.py | SeungsuKim/CH485--AI-and-Chemistry | 2 | 12788625 | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from utils import read_ZINC_smiles, smiles_to_onehot, convert_to_graph
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
import sys
import time
# execution) python gcn_logP.py 3 64 256 0.001 gsc
# Default option
num_layer = 3
hidden_dim1 = 64
hidden_dim2 = 256
init_lr = 0.001
using_sc = 'gsc' # 'sc, 'gsc, 'no'
if( len(sys.argv) == 6 ):
# Note that sys.argv[0] is gcn_logP.py
num_layer = int(sys.argv[1])
hidden_dim1 = int(sys.argv[2])
hidden_dim2 = int(sys.argv[3])
init_lr = float(sys.argv[4])
using_sc = sys.argv[5] # 'sc, 'gsc, 'no'
model_name = 'gcn_logP_' + str(num_layer) + '_' + str(hidden_dim1) + '_' + str(hidden_dim2) + '_' + str(init_lr) + '_' + using_sc
#1. Prepare data - X : fingerprint, Y : logP
# and split to (training:validation:test) set
smi_total, logP_total, tpsa_total = read_ZINC_smiles(50000)
num_train = 30000
num_validation = 10000
num_test = 10000
smi_train = smi_total[0:num_train]
logP_train = logP_total[0:num_train]
smi_validation = smi_total[num_train:(num_train+num_validation)]
logP_validation = logP_total[num_train:(num_train+num_validation)]
smi_test = smi_total[(num_train+num_validation):]
logP_test = logP_total[(num_train+num_validation):]
#2. Construct a neural network
def skip_connection(input_X, new_X, act):
# Skip-connection, H^(l+1)_sc = H^(l) + H^(l+1)
inp_dim = int(input_X.get_shape()[2])
out_dim = int(new_X.get_shape()[2])
if(inp_dim != out_dim):
output_X = act(new_X + tf.layers.dense(input_X, units=out_dim, use_bias=False))
else:
output_X = act(new_X + input_X)
return output_X
def gated_skip_connection(input_X, new_X, act):
# Skip-connection, H^(l+1)_gsc = z*H^(l) + (1-z)*H^(l+1)
inp_dim = int(input_X.get_shape()[2])
out_dim = int(new_X.get_shape()[2])
def get_gate_coefficient(input_X, new_X, out_dim):
X1 = tf.layers.dense(input_X, units=out_dim, use_bias=True)
X2 = tf.layers.dense(new_X, units=out_dim, use_bias=True)
gate_coefficient = tf.nn.sigmoid(X1 + X2)
return gate_coefficient
if(inp_dim != out_dim):
input_X = tf.layers.dense(input_X, units=out_dim, use_bias=False)
gate_coefficient = get_gate_coefficient(input_X, new_X, out_dim)
output_X = tf.multiply(new_X, gate_coefficient) + tf.multiply(input_X, 1.0-gate_coefficient)
return output_X
def graph_convolution(input_X, input_A, hidden_dim, act, using_sc):
# Graph Convolution, H^(l+1) = A{H^(l)W^(l)+b^(l))
output_X = tf.layers.dense(input_X,
units=hidden_dim,
use_bias=True,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer())
output_X = tf.matmul(input_A, output_X)
if( using_sc == 'sc' ):
output_X = skip_connection(input_X, output_X, act)
elif( using_sc == 'gsc' ):
output_X = gated_skip_connection(input_X, output_X, act)
elif( using_sc == 'no' ):
output_X = act(output_X)
else:
output_X = gated_skip_connection(input_X, output_X)
return output_X
# Readout
def readout(input_X, hidden_dim, act):
# Readout, Z = sum_{v in G} NN(H^(L)_v)
output_Z = tf.layers.dense(input_X,
units=hidden_dim,
use_bias=True,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer())
output_Z = tf.reduce_sum(output_Z, axis=1)
output = act(output_Z)
return output_Z
num_atoms=50
num_features=58
X = tf.placeholder(tf.float64, shape=[None, num_atoms, num_features])
A = tf.placeholder(tf.float64, shape=[None, num_atoms, num_atoms])
Y = tf.placeholder(tf.float64, shape=[None, ])
is_training = tf.placeholder(tf.bool, shape=())
h = X
# Graph convolution layers
for i in range(num_layer):
h = graph_convolution(h,
A,
hidden_dim1,
tf.nn.relu,
using_sc)
# Readout layer
h = readout(h, hidden_dim2, tf.nn.sigmoid)
# Predictor composed of MLPs(multi-layer perceptron)
h = tf.layers.dense(h,
units=hidden_dim2,
use_bias=True,
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer())
h = tf.layers.dense(h,
units=hidden_dim2,
use_bias=True,
activation=tf.nn.tanh,
kernel_initializer=tf.contrib.layers.xavier_initializer())
Y_pred = tf.layers.dense(h,
units=1,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer())
#3. Set a loss function, in this case we will use a MSE-loss (l2-norm)
Y_pred = tf.reshape(Y_pred, shape=[-1,])
Y_pred = tf.cast(Y_pred, tf.float64)
Y = tf.cast(Y, tf.float64)
loss = tf.reduce_mean( (Y_pred - Y)**2 )
#4. Set an optimizer
lr = tf.Variable(0.0, trainable = False) # learning rate
opt = tf.train.AdamOptimizer(lr).minimize(loss) # Note that we use the Adam optimizer in this practice.
#opt = tf.train.GradientDescentOptimizer(lr).minimize(loss)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
#5. Training & validation
batch_size = 100
epoch_size = 100
decay_rate = 0.95
batch_train = int(num_train/batch_size)
batch_validation = int(num_validation/batch_size)
batch_test = int(num_test/batch_size)
total_iter = 0
total_time = 0.0
for t in range(epoch_size):
pred_train = []
sess.run(tf.assign( lr, init_lr*( decay_rate**t ) ))
st = time.time()
for i in range(batch_train):
total_iter += 1
smi_batch = smi_train[i*batch_size:(i+1)*batch_size]
X_batch, A_batch = convert_to_graph(smi_batch)
Y_batch = logP_train[i*batch_size:(i+1)*batch_size]
_opt, _Y, _loss = sess.run([opt, Y_pred, loss], feed_dict = {X : X_batch, A : A_batch, Y : Y_batch, is_training : True})
pred_train.append(_Y.flatten())
#print("Epoch :", t, "\t batch:", i, "Loss :", _loss, "\t Training")
pred_train = np.concatenate(pred_train, axis=0)
error = (logP_train-pred_train)
mae = np.mean(np.abs(error))
rmse = np.sqrt(np.mean(error**2))
stdv = np.std(error)
print ("MAE :", mae, "RMSE :", rmse, "Std :", stdv, "\t Training, \t Epoch :", t)
pred_validation = []
for i in range(batch_validation):
smi_batch = smi_validation[i*batch_size:(i+1)*batch_size]
X_batch, A_batch = convert_to_graph(smi_batch)
Y_batch = logP_validation[i*batch_size:(i+1)*batch_size]
_Y, _loss = sess.run([Y_pred, loss], feed_dict = {X : X_batch, A : A_batch, Y : Y_batch, is_training : False})
#print("Epoch :", t, "\t batch:", i, "Loss :", _loss, "\t validation")
pred_validation.append(_Y.flatten())
pred_validation = np.concatenate(pred_validation, axis=0)
error = (logP_validation-pred_validation)
mae = np.mean(np.abs(error))
rmse = np.sqrt(np.mean(error**2))
stdv = np.std(error)
et = time.time()
print ("MAE :", mae, "RMSE :", rmse, "Std :", stdv, "\t Validation, \t Epoch :", t, "\t Time per epoch", (et-st))
total_time += (et-st)
### save model
ckpt_path = 'save/'+model_name+'.ckpt'
saver.save(sess, ckpt_path, global_step=total_iter)
#6. Test
pred_test = []
for i in range(batch_test):
smi_batch = smi_test[i*batch_size:(i+1)*batch_size]
X_batch, A_batch = convert_to_graph(smi_batch)
Y_batch = logP_test[i*batch_size:(i+1)*batch_size]
_Y, _loss = sess.run([Y_pred, loss], feed_dict = {X : X_batch, A : A_batch, Y : Y_batch, is_training : False})
pred_test.append(_Y.flatten())
pred_test = np.concatenate(pred_test, axis=0)
error = (logP_test-pred_test)
mae = np.mean(np.abs(error))
rmse = np.sqrt(np.mean(error**2))
stdv = np.std(error)
print ("MSE :", mae, "RMSE :", rmse, "Std :", stdv, "\t Test", "\t Total time :", total_time)
plt.figure()
plt.scatter(logP_test, pred_test, s=3)
plt.xlabel('logP - Truth', fontsize=15)
plt.ylabel('logP - Prediction', fontsize=15)
x = np.arange(-4,6)
plt.plot(x,x,c='black')
plt.tight_layout()
plt.savefig('./figures/'+model_name+'_results.png')
| 2.171875 | 2 |
Evaluation/pairwise_distance.py | DigitalPhonetics/SpeechRepresentationFinetuning | 1 | 12788626 | # Average Pariwsie Distance:
import argparse
import pickle
import pandas as pd
from scipy.spatial import distance
parser = argparse.ArgumentParser(
description="Average Pariwsie Distance Evaluation (Quality Analysis)"
)
parser.add_argument("embed_type", type=str, metavar="N", help="")
parser.add_argument("label_name", type=str, metavar="N", help="")
arg = parser.parse_args()
# TODO: label class automatically update based on label name
noise_lable_transfer = {
"Clean": "Clean",
"Babble": "Noisy",
"Telephone": "Noisy",
"Music": "Noisy",
}
age_lable_transfer = {
60: 50,
70: 50,
}
def filter(data_dict):
filter_dict = data_dict.copy()
for key in data_dict:
emo_list = data_dict[key]["emotion"]
if len(emo_list) != 0:
filter_dict[key]["emotion"] = emo_list[0]
else:
del filter_dict[key]
return filter_dict
class PairwiseDistance:
"""
1. extract all classes we have
2. calculate the centroid(mean of all point belonged to the class) of all class
3. calcuate the distance between this centroid and all other centroid, and average
4. print average pairwise distnce for all class
5. average all value to get final average
Args:
embed_type: 'contrastive', 'barlowtwins', or 'combined'
label_name: 'valence_arousal', 'age', 'gender', 'noise'
label_classes: based on label name
train_result_path: path to trained embedding result
test_result_path: path to test embedding result (we evaluate this frist)
"""
def __init__(self, embed_type, label_name, train_result_path, test_result_path):
self.embed_type = embed_type
self.label_name = label_name
self.train_result_path = train_result_path
self.test_result_path = test_result_path
def extract_embedding_label(self, embed_type, label_name, file_path):
"""
return embeddings and corresponding labels
"""
with open(file_path, "rb") as fp:
data = pickle.load(fp)
if self.label_name == "emotion":
filter(data)
df_data = pd.DataFrame.from_dict(data, orient="index")
df_data = df_data[[embed_type, label_name]]
df_data = df_data.dropna(subset=[embed_type])
return df_data
def _get_avg_pairwise_dist(self, centroids) -> dict:
"""
Given dictionary of label-centroid point, return label-average pairwise distance
"""
result_dict = {}
for label, centroid in centroids.items():
centroids_cp = centroids.copy()
pairwise_dist = 0.0
del centroids_cp[label]
for other_label, other_centroid in centroids_cp.items():
dist = distance.euclidean(centroid, other_centroid)
pairwise_dist += dist
result_dict[label] = pairwise_dist # / len(centroids)
print(result_dict)
return result_dict
def avg_distances_per_class(self):
"""
Calculate the centroid given embeddings(a list of 1D vector)
Euclidan distance between each data point in a cluster to
its respective cluster centroid, put in pandas df
"""
train_pair_distances = {}
test_pair_distances = {}
# data type of embedding: numpy.ndarray here
train_df_data = self.extract_embedding_label(
embed_type=self.embed_type,
label_name=self.label_name,
file_path=self.train_result_path,
)
test_df_data = self.extract_embedding_label(
embed_type=self.embed_type,
label_name=self.label_name,
file_path=self.test_result_path,
)
if self.label_name == "noise_type":
train_df_data["noise_type"] = train_df_data["noise_type"].replace(
noise_lable_transfer
)
test_df_data["noise_type"] = test_df_data["noise_type"].replace(
noise_lable_transfer
)
if self.label_name == "age":
train_df_data["age"] = train_df_data["age"].replace(age_lable_transfer)
test_df_data["age"] = test_df_data["age"].replace(age_lable_transfer)
label_classes = train_df_data[self.label_name].unique().tolist()
# put all in pandas data strcture, so we can extract embedding base on label
for label in label_classes:
# get centroid/mean of vectors from each class, and calculate the average euclidean distance between it and all datapoint
# training set
train_one_class = train_df_data[train_df_data[self.label_name] == label]
# all vectors belong to this class
one_class_vectors = train_one_class[self.embed_type].to_numpy()
# get centroid of those vectors
one_class_centriod = one_class_vectors.mean(axis=0)
train_pair_distances[label] = one_class_centriod
# test set
test_one_class = test_df_data[test_df_data[self.label_name] == label]
test_one_class_vectors = test_one_class[self.embed_type].to_numpy()
test_one_class_centriod = test_one_class_vectors.mean(axis=0)
test_pair_distances[label] = test_one_class_centriod
# get average pairwise distance
train_avg_distances = self._get_avg_pairwise_dist(train_pair_distances)
test_avg_distances = self._get_avg_pairwise_dist(test_pair_distances)
# return result in the pandas format
train_avg_pair_distances = pd.DataFrame.from_dict(
train_avg_distances,
orient="index",
columns=["avg_pairwise_dist"],
)
print(
"--- Average Pariwsie Distance of {}/{} (train)--- \n{}".format(
self.label_name, self.embed_type, train_avg_pair_distances
)
)
test_avg_pair_distances = pd.DataFrame.from_dict(
test_avg_distances,
orient="index",
columns=["avg_pairwise_dist"],
)
print(
"--- Average Pariwsie Distance of {}/{} (test)--- \n{}".format(
self.label_name, self.embed_type, test_avg_pair_distances
)
)
# TODO: write into csv file? (better copy-paste)
return train_avg_pair_distances, test_avg_pair_distances
if __name__ == "__main__":
embed_type = arg.embed_type
label_name = arg.label_name
# TODO: auto search file in cache
avg_pair_distance = PairwiseDistance(
embed_type=embed_type,
label_name=label_name,
# train_result_path=iemocap_barlowtwins_train,
# test_result_path=iemocap_barlowtwins_test,
)
avg_pair_distance.avg_distances_per_class()
| 3.015625 | 3 |
views/__init__.py | nakpisang/dicoding | 0 | 12788627 | <filename>views/__init__.py
from PIL import Image
from flask import Flask, Blueprint, render_template, request, jsonify, redirect, url_for, g, session
from torch_mtcnn import detect_faces
from flask_bootstrap import Bootstrap
from util import is_same, ModelLoaded
base = Blueprint('base', __name__, template_folder='templates')
THRESHOLD = 1.5
# bootstrap = Bootstrap(base)
@base.before_request
def before_request():
g.user = None
if 'user_id' in session:
user = [x for x in users if x.id == session['user_id']][0]
g.user = user
class User:
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def __repr__(self):
return f'<User: {self.username}>'
users = []
users.append(User(id=1, username='nakpisang', password='password'))
users.append(User(id=2, username='zul', password='password'))
users.append(User(id=3, username='rizal', password='password'))
users.append(User(id=4, username='admin', password='password'))
@base.route('/index')
def index():
return render_template('index.html')
@base.route('/home')
def home():
return render_template('indexx.html')
@base.route('/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('user_id', None)
username = request.form['username']
password = request.form['password']
user = [x for x in users if x.username == username][0]
if user and user.password == password:
session['user_id'] = user.id
return redirect(url_for('base.home'))
return redirect(url_for('base.login'))
return render_template('loginn.html')
@base.route('/logout')
def logout():
session.clear()
return redirect(url_for('base.login'))
@base.route('/predict', methods=['post'])
def predict():
files = request.files
img_left = Image.open(files.get('imgLeft')).convert('RGB')
img_right = Image.open(files.get('imgRight')).convert('RGB')
bbox_left, _ = detect_faces(img_left)
bbox_right, _ = detect_faces(img_right)
if bbox_left.shape[0] > 0:
a, b, c, d, _ = bbox_left[0]
img_left = img_left.crop((a, b, c, d))
if bbox_right.shape[0] > 0:
a, b, c, d, _ = bbox_right[0]
img_right = img_right.crop((a, b, c, d))
distance, similar = is_same(img_left, img_right, THRESHOLD)
model_acc = ModelLoaded.acc
return jsonify(same=('BERBEDA', 'SAMA')[similar.item()],
score=distance.item(),
model_acc=model_acc,
threshold=THRESHOLD)
| 2.375 | 2 |
product_search_python/tests/run_unittests.py | sp4350/Test | 34 | 12788628 | #!/usr/bin/env python2.7
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import optparse
import os
import sys
import unittest
import logging
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps.
SDK_PATH Path to the SDK installation
TEST_PATH Path to package containing test modules"""
def main(sdk_path, test_path):
sys.path.insert(0, sdk_path)
import dev_appserver
dev_appserver.fix_sys_path()
project_dir = os.path.dirname(os.path.dirname(__file__))
suite = unittest.loader.TestLoader().discover(test_path,
top_level_dir=project_dir)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
parser = optparse.OptionParser(USAGE)
options, args = parser.parse_args()
if len(args) != 2:
print 'Error: Exactly 2 arguments required.'
parser.print_help()
sys.exit(1)
sdk_path = args[0]
test_path = args[1]
logging.getLogger().setLevel(logging.ERROR)
main(sdk_path, test_path)
| 2.4375 | 2 |
ROI_Arrival_Viewer.py | alangburl/NWIC_Spectrum_Viewer | 0 | 12788629 | import numpy as np
from ROI_Arrival import ROI_Arrival,ROI_Location
#prefined imports
import sys,time,winsound
import numpy as np
from PyQt5.QtWidgets import (QApplication, QPushButton,QWidget,QGridLayout,
QSizePolicy,QLineEdit,
QMainWindow,QAction,QVBoxLayout
,QDockWidget,QListView,
QAbstractItemView,QLabel,QFileDialog,QTextEdit,
QInputDialog,QSlider,QMdiArea,QMdiSubWindow,
QMessageBox)
from PyQt5.QtGui import QFont
from PyQt5.QtCore import Qt
#import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
class ROI_Viewer(QMainWindow):
done=False
def __init__(self,list_time,list_channel,sync_time,calibration):
super().__init__()
self.num_sync=sync_time.size
self.num_pulses=list_time.size
self.list_time,self.list_channel=list_time,list_channel
self.sync_time,self.calibration=sync_time,calibration
self.sync_delta=sync_time[2]-sync_time[1]
self.lower,self.upper=9.5,10.9
self.font1=QFont()
self.font1.setPointSize(12)
self.size_policy=QSizePolicy.Expanding
self.menu()
self.showMaximized()
self.setWindowTitle('ROI Timing Arrival')
self.geometry()
# self.process()
self.show()
def menu(self):
self.menuFile=self.menuBar().addMenu('&File')
self.save_file=QAction('&Save Spectrum')
self.save_file.triggered.connect(self.save_spectrum)
self.save_file.setShortcut('CTRL+S')
self.save_file.setEnabled(False)
# self.save_roi=QAction('&Save ROI')
# self.save_roi.triggered.connect(self.save_roi_csv)
# self.save_roi.setEnabled(True)
self.menuFile.addActions([self.save_file])
def geometry(self):
r1_label=QLabel(r'Region 1-2 divider: [us]')
r1_label.setFont(self.font1)
r2_label=QLabel(r'Region 2-3 divider: [us]')
r2_label.setFont(self.font1)
self.r_1_slider=QSlider(Qt.Horizontal)
self.r_1_slider.setSizePolicy(self.size_policy,self.size_policy)
self.r_1_slider.setMinimum(0)
self.r_1_slider.setMaximum(self.sync_delta-1)
self.r_1_slider.setSingleStep(1)
self.r_1_slider.setTickInterval(50)
self.r_1_slider.setValue(100)
self.r_1_slider.setTickPosition(QSlider.TicksBelow)
self.r_1_slider.valueChanged.connect(self.update_r_1)
self.r_1_slider.setFont(self.font1)
self.r_2_slider=QSlider(Qt.Horizontal)
self.r_2_slider.setSizePolicy(self.size_policy,self.size_policy)
self.r_2_slider.setMinimum(101)
self.r_2_slider.setMaximum(self.sync_delta)
self.r_2_slider.setSingleStep(1)
self.r_2_slider.setTickInterval(50)
self.r_2_slider.setValue(101)
self.r_2_slider.setTickPosition(QSlider.TicksBelow)
self.r_2_slider.valueChanged.connect(self.update_r_2)
self.r_2_slider.setFont(self.font1)
self.r_1_label=QLabel(self)
self.r_1_label.setSizePolicy(self.size_policy,self.size_policy)
self.r_1_label.setText(str(self.r_1_slider.value()))
self.r_1_label.setFont(self.font1)
self.r_2_label=QLabel(self)
self.r_2_label.setSizePolicy(self.size_policy,self.size_policy)
self.r_2_label.setText(str(self.r_2_slider.value()))
self.r_2_label.setFont(self.font1)
self.processer=QPushButton('Process',self)
self.processer.clicked.connect(self.process)
self.processer.setFont(self.font1)
lower_label=QLabel('Lower ROI: [MeV]',self)
lower_label.setFont(self.font1)
upper_label=QLabel('Upper ROI: [MeV]',self)
upper_label.setFont(self.font1)
self.lower_text=QLineEdit(self)
self.lower_text.setFont(self.font1)
self.lower_text.setText(str(self.lower))
self.upper_text=QLineEdit(self)
self.upper_text.setFont(self.font1)
self.upper_text.setText(str(self.upper))
self.time_plot=QWidget()
self.time_figure=Figure()
self.time_canvas=FigureCanvas(self.time_figure)
self.time_toolbar=NavigationToolbar(self.time_canvas,self)
layout=QVBoxLayout()
layout.addWidget(self.time_toolbar)
layout.addWidget(self.time_canvas)
self.time_plot.setLayout(layout)
self.time_ax=self.time_canvas.figure.subplots()
self.time_ax.set_title('Time')
main_=QWidget()
layout=QGridLayout(self)
layout.addWidget(r1_label,0,0)
layout.addWidget(self.r_1_slider,0,1)
layout.addWidget(self.r_1_label,0,2)
layout.addWidget(lower_label,0,3)
layout.addWidget(self.lower_text,0,4)
layout.addWidget(upper_label,1,3)
layout.addWidget(self.upper_text,1,4)
layout.addWidget(r2_label,1,0)
layout.addWidget(self.r_2_slider,1,1)
layout.addWidget(self.r_2_label,1,2)
layout.addWidget(self.processer,2,0)
layout.addWidget(self.time_plot,3,0,1,5)
main_.setLayout(layout)
self.setCentralWidget(main_)
def update_r_1(self):
self.r_2_slider.setMinimum(self.r_1_slider.value()+1)
self.r_1_label.setText(str(self.r_1_slider.value()))
def update_r_2(self):
self.r_2_label.setText(str(self.r_2_slider.value()))
def process(self):
self.save_file.setEnabled(True)
# self.save_roi.setEnabled(True)
s1=time.time()
delt=(self.sync_time[2]-self.sync_time[1])
self.lower=float(self.lower_text.text())
self.upper=float(self.upper_text.text())
self.arrival,self.height,self.raw=ROI_Arrival(self.sync_time,self.list_time,
self.num_sync,self.list_channel,
self.num_pulses,self.lower,
self.upper,self.calibration)
num_bins=int(delt/4)
bins=np.linspace(0,delt,num_bins)
self.bins=bins
s=len(self.arrival)
self.output=ROI_Location(self.arrival,bins,num_bins,s)
r1,r2,r3=0,0,0
print('Process ROI Arrivals in {:.3f}s'.format(time.time()-s1))
for i in range(num_bins):
if bins[i]<=self.r_1_slider.value():
r1+=self.output[i]
elif bins[i]>self.r_1_slider.value() and bins[i]<=self.r_2_slider.value():
r2+=self.output[i]
else:
r3+=self.output[i]
self.time_ax.clear()
self.time_ax.plot(bins,self.output,'r*')
self.time_ax.axvline(self.r_1_slider.value(),label='Region 1-2 divider at {:.2f}'.format(self.r_1_slider.value()))
self.time_ax.axvline(self.r_2_slider.value(),label='Region 2-3 divider at {:.2f}'.format(self.r_2_slider.value()))
# self.time_ax.set_yscale('log')
self.time_ax.set_ylabel('Counts',fontsize=18)
self.time_ax.set_xlabel(r'Arrival Time [$\mu s$]',fontsize=18)
self.time_canvas.draw()
self.done=True
self.percentages=[r1/(r1+r2+r3)*100,
r2/(r1+r2+r3)*100,
r3/(r1+r2+r3)*100]
QMessageBox.information(self,
'ROI Perecentages','''Region 1:{:.2f}%\nRegion 2:{:.2f}%\nRegion 3:{:.2f}%'''.format(
r1/(r1+r2+r3)*100,
r2/(r1+r2+r3)*100,r3/(r1+r2+r3)*100),
QMessageBox.Ok)
# print('Region 1 total ROI percentage: {:.2f}%'.format(r1/(r1+r2+r3)*100))
# print('Region 2 total ROI percentage: {:.2f}%'.format(r2/(r1+r2+r3)*100))
# print('Region 3 total ROI percentage: {:.2f}%'.format(r3/(r1+r2+r3)*100))
def save_spectrum(self):
name=QFileDialog.getSaveFileName(self,'File Name','',
'Text File (*.txt);;Comma Seperated File (*.csv)')
if name[0]!=' ':
f=open(name[0],'w')
f.write('%{:.2f},{:.2f},{:.2f}\n'.format(*self.percentages))
for i in range(len(self.bins)):
f.write('{:.6f},{}\n'.format(self.bins[i],self.output[i]))
f.close()
# def save_roi_csv(self):
# name,ok=QFileDialog.getSaveFileName(self,'Safe File Name','',
# 'Comma Seperated File (*.csv)')
# if ok:
# f=open(name,'w')
# f.write('Pulse_Height(MeV),Time(s)\n')
# print(len(self.height))
# for i in range(len(self.height)):
# f.write('{:.3f},{:.3f}\n'.format(self.height[i],self.raw[i]*1e-6))
# f.close()
# print('All finished') | 2.109375 | 2 |
scripts/aggregate_tool_info.py | dhrithideshpande/review-technology-dictates-algorithms | 3 | 12788630 | <reponame>dhrithideshpande/review-technology-dictates-algorithms
import numpy as np
import pandas as pd
if __name__ == '__main__':
table_df = pd.read_csv('../summary_data/table1_final.csv')
table_df.loc[:,'seed_type'] = table_df.loc[:,'fix_length_seed'] + table_df.loc[:,'spaced_seed'] + table_df.loc[:,'seed_chaining']
tool_df = table_df.groupby(['indexing', 'seed_type', 'pairwise_alignment']).size().to_frame(name = 'count').reset_index().to_csv(index=False, path_or_buf="../raw_data/aggregate_tool_info.csv")
| 2.578125 | 3 |
src/gausskernel/dbmind/sqldiag/test/test_demo.py | wotchin/openGauss-server | 1 | 12788631 | """
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
from main import train, predict
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
trans2sec = 10 ** 12
def main():
train_file = 'data/train.csv'
test_file = 'data/test.csv'
label_file = 'data/label.csv'
# create dataset
test_label = list()
with open(label_file) as f:
for line in f.readlines():
line = line.strip()
test_label += [float(line)]
cluster_number, top_sql = train('data/train.csv', 'data/', 2000, 40)
print('Best cluster number is: ' + str(cluster_number))
print('Typical SQL template is: ')
print(top_sql)
result = predict('data/test.csv', 'data/', 0.1)
# plot
x = range(len(result))
scores = r2_score(test_label, result, multioutput='variance_weighted')
plt.scatter(x, test_label, marker='o', label='actual value')
plt.scatter(x, result, marker='*', label='predicted value')
plt.title("acc: " + str(scores * 100))
plt.legend()
plt.show()
if __name__ == '__main__':
main()
| 2.390625 | 2 |
deep_gw_pe_followup/plotting/corner.py | avivajpeyi/gw_pe_judge | 0 | 12788632 | from corner import corner
import numpy as np
CORNER_KWARGS = dict(
smooth=0.9,
label_kwargs=dict(fontsize=30),
title_kwargs=dict(fontsize=16),
color="tab:blue",
truth_color="tab:orange",
quantiles=[0.16, 0.84],
levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9.0 / 2.0)),
plot_density=False,
plot_datapoints=False,
fill_contours=True,
max_n_ticks=3,
verbose=False,
use_math_text=True,
)
LABELS = dict(
q=r"$q$",
xeff=r"$\chi_{\rm eff}$",
a_1=r"$a_1$",
a_2=r"$a_2$",
cos_tilt_1=r"$\cos \theta_1$",
cos_tilt_2=r"$\cos \theta_2$",
)
def plot_corner(df, fname="corner.png"):
labels = [LABELS.get(i, i.replace("_", "")) for i in df.columns.values]
fig = corner(df, labels=labels, **CORNER_KWARGS)
fig.savefig(fname) | 2.609375 | 3 |
223_test_time_augmentation_for_semantic_segmentation.py | Data-Laboratory/WorkExamples | 1 | 12788633 | #Ref: <NAME>
"""
# TTA - Should be called prediction time augmentation
#We can augment each input image, predict augmented images and average all predictions
"""
import os
import cv2
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
import random
model = tf.keras.models.load_model("mitochondria_load_from_disk_focal_dice_50epochs.hdf5", compile=False)
image_directory = 'data2/test_images/test/'
mask_directory = 'data2/test_masks/test/'
SIZE = 256
image_dataset = []
mask_dataset = []
images = os.listdir(image_directory)
for i, image_name in enumerate(images): #Remember enumerate method adds a counter and returns the enumerate object
if (image_name.split('.')[1] == 'tif'):
#print(image_directory+image_name)
image = cv2.imread(image_directory+image_name)
image = Image.fromarray(image)
image = image.resize((SIZE, SIZE))
image_dataset.append(np.array(image))
#Iterate through all images in Uninfected folder, resize to 64 x 64
#Then save into the same numpy array 'dataset' but with label 1
masks = os.listdir(mask_directory)
for i, image_name in enumerate(masks):
if (image_name.split('.')[1] == 'tif'):
image = cv2.imread(mask_directory+image_name, 0)
image = Image.fromarray(image)
image = image.resize((SIZE, SIZE))
mask_dataset.append(np.array(image))
#
image_dataset = np.array(image_dataset) / 255.
#D not normalize masks, just rescale to 0 to 1.
mask_dataset = (np.array(mask_dataset)) /255.
#Demonstrate TTP on single image
n = random.randint(0, mask_dataset.shape[0])
temp_test_img = image_dataset[n,:,:,:]
temp_test_img = image_dataset[n,:,:,:]
temp_mask = mask_dataset[n,:,:]
p0 = model.predict(np.expand_dims(temp_test_img, axis=0))[0][:, :, 0]
p1 = model.predict(np.expand_dims(np.fliplr(temp_test_img), axis=0))[0][:, :, 0]
p1 = np.fliplr(p1)
p2 = model.predict(np.expand_dims(np.flipud(temp_test_img), axis=0))[0][:, :, 0]
p2 = np.flipud(p2)
p3 = model.predict(np.expand_dims(np.fliplr(np.flipud(temp_test_img)), axis=0))[0][:, :, 0]
p3 = np.fliplr(np.flipud(p3))
thresh = 0.3
p = (((p0 + p1 + p2 + p3) / 4) > thresh).astype(np.uint8)
plt.figure(figsize=(12, 12))
plt.subplot(231)
plt.title('Original mask')
plt.imshow(temp_mask, cmap='gray')
plt.subplot(232)
plt.title('Prediction No Aug')
plt.imshow(p0>thresh, cmap='gray')
plt.subplot(233)
plt.title('Prediction LR')
plt.imshow(p1>thresh, cmap='gray')
plt.subplot(234)
plt.title('Prediction UD')
plt.imshow(p2>thresh, cmap='gray')
plt.subplot(235)
plt.title('Prediction LR and UD')
plt.imshow(p3>thresh, cmap='gray')
plt.subplot(236)
plt.title('Average Prediction')
plt.imshow(p>thresh, cmap='gray')
plt.show()
#Now that we know the transformations are working, let us extend to all predictions
predictions = []
for image in image_dataset:
pred_original = model.predict(np.expand_dims(image, axis=0))[0][:, :, 0]
pred_lr = model.predict(np.expand_dims(np.fliplr(image), axis=0))[0][:, :, 0]
pred_lr = np.fliplr(pred_lr)
pred_ud = model.predict(np.expand_dims(np.flipud(image), axis=0))[0][:, :, 0]
pred_ud = np.flipud(pred_ud)
pred_lr_ud = model.predict(np.expand_dims(np.fliplr(np.flipud(image)), axis=0))[0][:, :, 0]
pred_lr_ud = np.fliplr(np.flipud(pred_lr_ud))
preds = (pred_original + pred_lr + pred_ud + pred_lr_ud) / 4
predictions.append(preds)
predictions = np.array(predictions)
threshold = 0.5
predictions_th = predictions > threshold
import random
test_img_number = random.randint(0, mask_dataset.shape[0]-1)
test_img = image_dataset[test_img_number]
ground_truth=mask_dataset[test_img_number]
#test_img_norm=test_img[:,:,0][:,:,None]
test_img_input=np.expand_dims(test_img, 0)
prediction = predictions_th[test_img_number]
plt.figure(figsize=(16, 8))
plt.subplot(231)
plt.title('Testing Image')
plt.imshow(test_img, cmap='gray')
plt.subplot(232)
plt.title('Testing Label')
plt.imshow(ground_truth, cmap='gray')
plt.subplot(233)
plt.title('Prediction on test image')
plt.imshow(prediction, cmap='gray')
plt.show() | 2.8125 | 3 |
constants.py | Sinica-SLAM/COSPRO-mix | 5 | 12788634 | <reponame>Sinica-SLAM/COSPRO-mix
NUM_BANDS = 4
SNR_THRESH = -6.
PRE_NOISE_SECONDS = 2.0
SAMPLERATE = 16000
MAX_SAMPLE_AMP = 0.95
MIN_SNR_DB = -3.
MAX_SNR_DB = 6.
PRE_NOISE_SAMPLES = PRE_NOISE_SECONDS * SAMPLERATE | 1.3125 | 1 |
ArraysAndStrings/bfs.py | ashaik4/CodeVault | 0 | 12788635 | <reponame>ashaik4/CodeVault
from collections import deque
"""
Find the minimum distance required to move the robot (9) from the field.
Idea: BFS
"""
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
class QueueNode:
def __init__(self, pt, distance):
self.point = pt
self.dist = distance
def isValid(grid, row, col):
ROW = len(grid)
COL = len(grid[0])
return (row >= 0 and row < ROW) and col >= 0 and col < COL
rowNum = [-1, 0, 0, 1]
colNum = [0, -1, 1, 0]
def find_robot(grid):
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 9:
return Point(i, j)
def bfs(matrix, src, dest):
ROW = len(grid)
COL = len(grid[0])
if matrix[src.x][src.y] == 0 or matrix[dest.x][dest.y] == 0:
return -1
visited = [[False for i in range(ROW)] for j in range(COL)]
visited[src.x][src.y] = True
q = deque()
s = QueueNode(src, 0)
q.append(s)
while q:
current = q[0]
pt = current.point
if pt.x == dest.x and pt.y == dest.y:
return current.dist
q.popleft()
for i in range(4):
row = pt.x + rowNum[i]
col = pt.y + colNum[i]
if isValid(grid, row, col) and matrix[row][col] \
and not visited[row][col]:
visited[row][col] = True
adjCell = QueueNode(Point(row, col), current.dist + 1)
q.append(adjCell)
return -1
"""
grid = [[1,1,1,9,0],
[1,0,0,1,0],
[1,1,1,1,0],
[1,1,1,1,0],
[0,0,0,0,0]]
"""
grid = [[9]]
destination = find_robot(grid)
source = Point(0, 0)
print(bfs(grid, source, destination))
| 3.875 | 4 |
analyze/lookup.py | patarapolw/HanziSRS | 10 | 12788636 | <gh_stars>1-10
import re
from HanziSRS.dir import database_path
class Cedict:
def __init__(self):
super().__init__()
self.dictionary = dict()
with open(database_path('cedict_ts.u8'), encoding='utf8') as f:
for row in f.readlines():
result = re.fullmatch(r'(\w+) (\w+) \[(.+)\] /(.+)/\n', row)
if result is not None:
trad, simp, pinyin, eng = result.groups()
self.dictionary.setdefault(simp, [])
self.dictionary.setdefault(trad, [])
self.dictionary[simp].append({
'traditional': trad,
'simplified': simp,
'reading': pinyin,
'english': eng
})
if trad != simp:
self.dictionary[trad].append(self.dictionary[simp][-1])
def get(self, vocab):
return self.dictionary.get(vocab, [dict()])[0]
class SpoonFed:
def __init__(self):
self.entries = []
with open(database_path('SpoonFed.tsv')) as f:
next(f)
for row in f:
contents = row.split('\t')
self.entries.append({
'Chinese': contents[2],
'English': contents[0]
})
def formatted_lookup(self, vocab):
result = ''
for item in self.iter_lookup(vocab):
result += item['Chinese'] + '<br />'
result += item['English'] + '<br />'
return result
def iter_lookup(self, vocab):
if vocab:
for entry in self.entries:
if re.search(vocab.replace('…', '.*'), entry['Chinese']):
yield entry
class HanziVariant:
def __init__(self):
self.entries = dict()
with open(database_path('hanzi_variant.tsv')) as f:
next(f)
for row in f:
contents = row.split('\t')
self.entries[contents[1]] = contents[-3]
def get(self, hanzi):
return self.entries.get(hanzi, '')
if __name__ == '__main__':
print(HanziVariant().entries)
| 2.875 | 3 |
flask-backend/migrations/versions/ca6c6171cdbe_added_task_model.py | harshagrawal523/OpenMF | 92 | 12788637 | """Added task model
Revision ID: ca6c6171cdbe
Revises: 989dbc01a9b0
Create Date: 2021-06-11 08:31:03.584401
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ca6c6171cdbe'
down_revision = '989dbc01a9b0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('task',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=255), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('assinged_on', sa.DateTime(), nullable=False),
sa.Column('due_on', sa.DateTime(), nullable=True),
sa.Column('admin_id', sa.Integer(), nullable=False),
sa.Column('is_completed', sa.Boolean(), nullable=True),
sa.Column('extractor_id', sa.Integer(), nullable=True),
sa.Column('management_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['admin_id'], ['admin.id'], ),
sa.ForeignKeyConstraint(['extractor_id'], ['extractor.id'], ),
sa.ForeignKeyConstraint(['management_id'], ['management.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('task')
# ### end Alembic commands ###
| 1.71875 | 2 |
test/test_crosscorr.py | thunder-project/thunder-register | 16 | 12788638 | <gh_stars>10-100
import pytest
from numpy import arange, allclose, asarray, expand_dims
from scipy.ndimage.interpolation import shift
from registration import CrossCorr
pytestmark = pytest.mark.usefixtures("eng")
def test_fit(eng):
reference = arange(25).reshape(5, 5)
algorithm = CrossCorr()
deltas = [[1, 2], [-2, 1]]
shifted = [shift(reference, delta, mode='wrap', order=0) for delta in deltas]
model = algorithm.fit(shifted, reference=reference)
assert allclose(model.toarray(), deltas)
def test_fit_3d(eng):
reference = arange(125).reshape(5, 5, 5)
algorithm = CrossCorr()
deltas = [[1, 0, 2], [0, 1, 2]]
shifted = [shift(reference, delta, mode='wrap', order=0) for delta in deltas]
model = algorithm.fit(shifted, reference=reference)
assert allclose(model.toarray(), deltas)
def test_fit_axis(eng):
reference = arange(60).reshape(2, 5, 6)
algorithm = CrossCorr(axis=0)
a = shift(reference[0], [1, 2], mode='wrap', order=0)
b = shift(reference[1], [-2, 1], mode='wrap', order=0)
c = shift(reference[0], [2, 1], mode='wrap', order=0)
d = shift(reference[1], [1, -2], mode='wrap', order=0)
shifted = [asarray([a, b]), asarray([c, d]),]
model = algorithm.fit(shifted, reference=reference)
assert allclose(model.toarray(), [[[1, 2], [-2, 1]], [[2, 1], [1, -2]]])
| 1.890625 | 2 |
src/limecc/first.py | avakar/limecc | 2 | 12788639 | """
This module defines some basic operations on words and sets of words.
A word is any iterable of symbols (strings), i.e. ('list', 'item') is a word.
The iterable must support slices, joining with operator + and must return
their length through the 'len' function.
"""
from .rule import Rule
from .grammar import Grammar
def first(word, k=1):
"""Returns FIRST_k(word).
The implied grammar is taken to be empty and all symbols are treated as terminals.
See <http://www.jambe.co.nz/UNI/FirstAndFollowSets.html> for more information.
>>> first('hello, world', k=7)
'hello, '
>>> first(('list', 'item', 'item', 'item', 'item'), k=2)
('list', 'item')
"""
return word[:k]
def oplus(left, right, k=1):
"""Returns the set { FIRST_k(vw) | v in left, w in right } and the length of its shortest member.
The 'left' and 'right' are iterables of words.
The function return type is a pair (s, l), where 's' is the first-set
and 'l' is the length of its shortest member. If 's' is empty, 'l' is set equal to 'k'.
The type of 's' is unspecified, but is guaranteed to be an iterable of words
and to support operators 'in' and 'not in'.
>>> s, l = oplus(['ab', 'ac', ''], ['zz', 'y', ''], k=3)
>>> sorted(list(s))
['', 'ab', 'aby', 'abz', 'ac', 'acy', 'acz', 'y', 'zz']
>>> l
0
>>> s, l = oplus(['ab', 'ac'], ['zz', 'y'], k=3)
>>> sorted(list(s))
['aby', 'abz', 'acy', 'acz']
>>> l
3
"""
res = set()
min_len = k
for lword in left:
for rword in right:
w = first(lword + rword, k)
if len(w) < min_len:
min_len = len(w)
res.add(w)
return res, min_len
class First:
"""Represents the first-set for a given grammar.
The grammar and 'k' parameter are passed during construction.
>>> g = Grammar(Rule('list', ()), Rule('list', ('list', 'item')))
>>> f = First(g, k=2)
>>> f.grammar
Grammar(Rule('list', ()), Rule('list', ('list', 'item')))
The objects are callable and, for a given word 'w', return the set
{ FIRST_k(u) | w =>* u, u terminal }.
>>> sorted(list(f(('item', 'item', 'item'))))
[('item', 'item')]
>>> sorted(list(f(())))
[()]
>>> sorted(list(f(('list',))))
[(), ('item',), ('item', 'item')]
If the constructor parameter `nonterms` is set to True, the first sets
are extended to sentential forms, i.e. `{ FIRST_k(u) | w =>* u, u is a sentential form }`.
>>> f = First(g, k=2, nonterms=True)
>>> sorted(list(f(('list',))))
[(), ('item',), ('item', 'item'), ('list',), ('list', 'item')]
"""
def __init__(self, grammar, k=1, nonterms=False):
"""
Given a grammar and a 'k', constructs the first-set table for all non-terminals.
The table is then used by the '__call__' method.
For the construction algorithm, see the Dragon book.
"""
self.grammar = grammar
self.k = k
if nonterms:
self.table = dict((nonterm, set([(nonterm,)])) for nonterm in grammar.nonterms())
else:
self.table = dict((nonterm, set()) for nonterm in grammar.nonterms())
# The sets in the table start empty and are iteratively filled.
# The termination is guaranteed by the existence of the least fixed point.
done = False
while not done:
done = True
for rule in grammar:
for word in self(rule.right):
if word not in self.table[rule.left]:
self.table[rule.left].add(word)
done = False
def __call__(self, word):
"""Returns FIRST_k(word) with respect to the associated grammar."""
res = set([()])
for symbol in word:
if symbol not in self.table:
rset = set([(symbol,)])
else:
rset = self.table[symbol]
res, c = oplus(res, rset, self.k)
if c == self.k:
break
return res
| 3.9375 | 4 |
tests.py | ctcampbell/veracode-python | 13 | 12788640 | <filename>tests.py
if __name__ == '__main__':
import doctest
from veracode import application, sandbox, build
from veracode.exceptions import *
try:
doctest.testmod(application, raise_on_error=True)
doctest.testmod(sandbox, raise_on_error=True)
doctest.testmod(build, raise_on_error=True)
except:
pass
finally:
app = application.Application('TEST_APPLICATION')
app.delete()
# pycco veracode/*.py veracode/SDK veracode/API -ips
| 1.679688 | 2 |
src/medius/mediuspackets/policy.py | Metroynome/robo | 8 | 12788641 | <gh_stars>1-10
from enums.enums import MediusEnum, CallbackStatus
from utils import utils
from medius.mediuspackets.policyresponse import PolicyResponseSerializer
class PolicySerializer:
data_dict = [
{'name': 'mediusid', 'n_bytes': 2, 'cast': None},
{'name': 'message_id', 'n_bytes': MediusEnum.MESSAGEID_MAXLEN, 'cast': None},
{'name': 'session_key', 'n_bytes': MediusEnum.SESSIONKEY_MAXLEN, 'cast': None}
]
class PolicyHandler:
def process(self, serialized, monolith, con):
def chunks(s, n):
"""Produce `n`-character chunks from `s`."""
res = []
for start in range(0, len(s), n):
res.append(s[start:start+n])
return res
policy = monolith.get_policy()
packets = []
policy_split = chunks(policy, MediusEnum.POLICY_MAXLEN-1)
for i, policy_substr in enumerate(policy_split):
end_of_list = int((i == (len(policy_split)-1)))
packets.append(PolicyResponseSerializer.build(
serialized['message_id'],
CallbackStatus.SUCCESS,
policy_substr,
end_of_list
))
return packets
| 2.53125 | 3 |
Users/forms.py | tifat58/lsv-c4-django-webexperiment | 1 | 12788642 | <gh_stars>1-10
from django import forms
from django.utils.translation import activate
from Common import constants
from Users.enums import *
from Users.models import *
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import PasswordChangeForm
class UserInfoForm(forms.Form):
"""
User info form
"""
# languageCode= 'en'
def __init__(self, *args, **kwargs):
# self.webLanguageId = kwargs.pop(CookieFields.WebsiteLanguageId)
self.languageCode = kwargs.pop('languageCode','en')
# excluding the australian and britain english and order by web language
qs_language = Language.objects.exclude(language_code__in=['en-au','en-gb','pt-br','es-ar','es-mx','es-ni','es-ve']).order_by('language_name_'+self.languageCode)
qs_country = Country.objects.order_by('country_name_'+self.languageCode)
super(UserInfoForm, self).__init__(*args, **kwargs)
self.fields['area_country'].queryset = qs_country
self.fields['education_country'].queryset = qs_country
self.fields['area_language'].queryset = qs_language
self.fields['living_area_language'].queryset = qs_language
self.fields['native_language'].queryset = qs_language
self.fields['home_language'].queryset = qs_language
self.fields['learned_language'].queryset = qs_language
age = forms.CharField(widget=forms.TextInput(attrs={'class': 'onlyInt number-input form-control'}), required=True)
gender = forms.ModelChoiceField(queryset=Gender.objects.all(), to_field_name="name",
widget=forms.Select(attrs={'class': 'form-control'}), required=True)
area_country = forms.ModelChoiceField(queryset=Country.objects.order_by('country_name'),
widget=forms.Select(attrs={'class': 'form-control'}), required=True)
area_language = forms.ModelChoiceField(queryset=Language.objects.order_by('language_name'),
widget=forms.Select(attrs={'class': 'form-control'}), required=False)
living_period = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control onlyInt lessThanEqualAge number-input'}), required=True)
is_lived_in_other_area = forms.BooleanField(widget=forms.RadioSelect(
choices=((False, constants.CommonConstants.NO_TEXT), (True, constants.CommonConstants.YES_TEXT))),
initial=False, required=False)
#living_area_language = forms.TypedChoiceField(choices=LanguageList, widget=forms.Select(attrs={'class': 'form-control'}), required=False)
living_area_language = forms.ModelChoiceField(queryset=Language.objects.order_by('language_name'),
widget=forms.Select(attrs={'class': 'form-control'}), required=False)
living_area_period = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control onlyInt lessThanAge number-input'}), required=False)
is_multilingual = forms.BooleanField(widget=forms.RadioSelect(
choices=[(False, constants.CommonConstants.NO_TEXT), (True, constants.CommonConstants.YES_TEXT)]),
initial=False, required=False)
native_language = forms.ModelChoiceField(queryset=Language.objects.all(),
widget=forms.Select(attrs={'class': 'form-control'}), required=False)
education_country = forms.ModelChoiceField(queryset=Country.objects.order_by('country_name'),
widget=forms.Select(attrs={'class': 'form-control'}), required=False)
education_time = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control onlyInt lessThanAge number-input'}), required=False)
home_language = forms.ModelChoiceField(queryset=Language.objects.order_by('language_name'),
widget=forms.Select(attrs={'class': 'form-control'}), required=False)
learned_language = forms.ModelChoiceField(queryset=Language.objects.order_by('language_name'),
widget=forms.Select(attrs={'class': 'form-control'}), required=False)
learned_language_time = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control onlyInt lessThanAge number-input'}), required=False)
# EducationDegree
highest_education_degree = forms.ModelChoiceField(queryset=EducationDegree.objects.all(),
widget=forms.Select(attrs={'class': 'form-control'}),
required=False)
have_linguistics_degree = forms.BooleanField(widget=forms.RadioSelect(
choices=[(False, constants.CommonConstants.NO_TEXT), (True, constants.CommonConstants.YES_TEXT)]),
initial=False, required=False)
prolific_id = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control'}), required=False)
class PasswordChangeCustomForm(PasswordChangeForm):
old_password = forms.CharField(required=True,
widget=forms.PasswordInput(attrs={
'placeholder': _('OLD_PASSWORD_TEXT')}))
new_password1 = forms.CharField(required=True,widget=forms.PasswordInput(attrs={
'placeholder': _('NEW_PASSWORD_TEXT')}))
new_password2 = forms.CharField(required=True,
widget=forms.PasswordInput(attrs={
'placeholder': _("NEW_PASSWORD_CONFIRMATION_TEXT")}))
class PasswordResetConfirmationForm(forms.Form):
new_password1 = forms.CharField(required=True,widget=forms.PasswordInput(attrs={
'placeholder': _('NEW_PASSWORD_TEXT')}))
new_password2 = forms.CharField(required=True,
widget=forms.PasswordInput(attrs={
'placeholder': _("NEW_PASSWORD_CONFIRMATION_TEXT")}))
| 2.171875 | 2 |
datasets/regdb_dataset.py | JDAI-CV/CM-NAS | 31 | 12788643 | <reponame>JDAI-CV/CM-NAS<gh_stars>10-100
import os
import random
import numpy as np
from PIL import Image
import torch.utils.data as data
class RegDBData(data.Dataset):
def __init__(self, data_root, trial, transform=None, visibleIndex=None, thermalIndex=None, img_size=(128,256)):
# Load training images (path) and labels
train_visible_list = os.path.join(data_root, 'idx/train_visible_{}.txt'.format(trial))
train_thermal_list = os.path.join(data_root, 'idx/train_thermal_{}.txt'.format(trial))
visible_img_file, train_visible_label = load_data(train_visible_list)
thermal_img_file, train_thermal_label = load_data(train_thermal_list)
train_visible_image = []
for i in range(len(visible_img_file)):
img = Image.open(os.path.join(data_root, visible_img_file[i]))
img = img.resize(img_size, Image.ANTIALIAS)
pix_array = np.array(img)
train_visible_image.append(pix_array)
train_visible_image = np.array(train_visible_image)
train_thermal_image = []
for i in range(len(thermal_img_file)):
img = Image.open(os.path.join(data_root, thermal_img_file[i]))
img = img.resize(img_size, Image.ANTIALIAS)
pix_array = np.array(img)
train_thermal_image.append(pix_array)
train_thermal_image = np.array(train_thermal_image)
# BGR to RGB
self.train_visible_image = train_visible_image
self.train_visible_label = train_visible_label
# BGR to RGB
self.train_thermal_image = train_thermal_image
self.train_thermal_label = train_thermal_label
self.transform = transform
self.vIndex = visibleIndex
self.tIndex = thermalIndex
def __getitem__(self, index):
img_v, target_v = self.train_visible_image[self.vIndex[index]], self.train_visible_label[self.vIndex[index]]
img_t, target_t = self.train_thermal_image[self.tIndex[index]], self.train_thermal_label[self.tIndex[index]]
img_v = self.transform(img_v)
img_t = self.transform(img_t)
return img_v, img_t, target_v, target_t
def __len__(self):
# return len(self.train_visible_label)
return len(self.vIndex)
def process_test_regdb(data_root, trial=1, modality='visible'):
if modality=='visible':
data_path = os.path.join(data_root, 'idx/test_visible_{}.txt'.format(trial))
elif modality=='thermal':
data_path = os.path.join(data_root, 'idx/test_thermal_{}.txt'.format(trial))
file_image, file_label = load_data(data_path)
file_image = [os.path.join(data_root, f) for f in file_image]
return file_image, np.array(file_label)
def load_data(data_path):
with open(data_path, 'r') as f:
data_file_list = f.read().splitlines()
# Get full list of image and labels
file_image = [s.split(' ')[0] for s in data_file_list]
file_label = [int(s.split(' ')[1]) for s in data_file_list]
return file_image, file_label | 2.296875 | 2 |
wqxlib-python/wqxlib/wqx_v3_0/ComparableAnalyticalMethod.py | FlippingBinary/wqxlib | 0 | 12788644 | <reponame>FlippingBinary/wqxlib
from ..common import WQXException
from .SimpleContent import (
MethodIdentifier,
MethodIdentifierContext,
MethodModificationText
)
from yattag import Doc
class ComparableAnalyticalMethod:
"""Identifies the procedures, processes, and references required to determine the analytical methods used to obtain a result."""
__methodIdentifier: MethodIdentifier
__methodIdentifierContext: MethodIdentifierContext
__methodModificationText: MethodModificationText
def __init__(self, o=None, *,
methodIdentifier:MethodIdentifier = None,
methodIdentifierContext:MethodIdentifierContext = None,
methodModificationText:MethodModificationText = None
):
if isinstance(o, ComparableAnalyticalMethod):
# Assign attributes from object without typechecking
self.__methodIdentifier = o.methodIdentifier
self.__methodIdentifierContext = o.methodIdentifierContext
self.__methodModificationText = o.methodModificationText
elif isinstance(o, dict):
# Assign attributes from dictionary with typechecking
self.methodIdentifier = o.get('methodIdentifier', default = None)
self.methodIdentifierContext = o.get('methodIdentifierContext', default = None)
self.methodModificationText = o.get('methodModificationText', default = None)
else:
# Assign attributes from named keywords with typechecking
self.methodIdentifier = methodIdentifier
self.methodIdentifierContext = methodIdentifierContext
self.methodModificationText = methodModificationText
@property
def methodIdentifier(self) -> MethodIdentifier:
return self.__methodIdentifier
@methodIdentifier.setter
def methodIdentifier(self, val:MethodIdentifier) -> None:
self.__methodIdentifier = MethodIdentifier(val)
@property
def methodIdentifierContext(self) -> MethodIdentifierContext:
return self.__methodIdentifierContext
@methodIdentifierContext.setter
def methodIdentifierContext(self, val:MethodIdentifierContext) -> None:
self.__methodIdentifierContext = MethodIdentifierContext(val)
@property
def methodModificationText(self) -> MethodModificationText:
return self.__methodModificationText
@methodModificationText.setter
def methodModificationText(self, val:MethodModificationText) -> None:
self.__methodModificationText = None if val is None else MethodModificationText(val)
def generateXML(self, name:str = 'ComparableAnalyticalMethod') -> str:
doc, tag, text, line = Doc().ttl()
with tag(name):
if self.__methodIdentifier is None:
raise WQXException("Attribute 'methodIdentifier' is required.")
line('MethodIdentifier', self.__methodIdentifier)
if self.__methodIdentifierContext is None:
raise WQXException("Attribute 'methodIdentifierContext' is required.")
line('MethodIdentifierContext', self.__methodIdentifierContext)
if self.__methodModificationText is not None:
line('MethodModificationText', self.__methodModificationText)
return doc.getvalue()
| 2.265625 | 2 |
lightctr/layer.py | yanyachen/LightCTR | 0 | 12788645 | import tensorflow as tf
class ResidualDense(tf.keras.layers.Layer):
def __init__(
self,
units,
activation=None,
dropout=None,
kernel_initializer=None,
kernel_regularizer=None,
output_activation=None
):
super(ResidualDense, self).__init__()
self.units = units
self.activation = activation
self.dropout = dropout
self.kernel_initializer = kernel_initializer
self.kernel_regularizer = kernel_regularizer
if output_activation is None:
self.output_activation = self.activation
else:
self.output_activation = self.output_activation
def build(self, input_shape):
last_dim_units = input_shape[-1].value
self.layer0 = tf.keras.layers.Dense(
units=self.units,
activation=self.activation,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer
)
if self.dropout is not None and self.dropout > 0:
self.dropout_layer = tf.keras.layers.Dropout(
rate=float(self.dropout)
)
self.layer1 = tf.keras.layers.Dense(
units=last_dim_units,
activation=tf.keras.activations.linear,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer
)
def call(self, inputs, training):
net = self.layer0(inputs)
if self.dropout is not None and self.dropout > 0:
net = self.dropout_layer(net, training=training)
net = self.layer1(net)
outputs = self.activation(inputs + net)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
class LayerNormalization(tf.keras.layers.Layer):
def __init__(self):
super(LayerNormalization, self).__init__()
def build(self, input_shape):
last_dim = input_shape[-1].value
self.scale = tf.Variable(
initial_value=tf.ones([last_dim]),
trainable=True,
name='layer_norm_scale',
dtype=tf.float32,
)
self.bias = tf.Variable(
initial_value=tf.zeros([last_dim]),
trainable=True,
name='layer_norm_bias',
dtype=tf.float32,
)
def call(self, inputs, epsilon=1e-6):
mean = tf.reduce_mean(
input_tensor=inputs, axis=[-1], keepdims=True
)
variance = tf.reduce_mean(
input_tensor=tf.square(inputs - mean), axis=[-1], keepdims=True
)
norm_inputs = (inputs - mean) * tf.math.rsqrt(variance + epsilon)
return norm_inputs * self.scale + self.bias
def compute_output_shape(self, input_shape):
return input_shape
class AdversarialNoise(tf.keras.layers.Layer):
def __init__(self, eps):
super(AdversarialNoise, self).__init__()
self.eps = eps
def _scale_l2(self, x):
ndim = tf.keras.backend.ndim(x)
feature_dims = [i for i in range(1, ndim)]
alpha = tf.reduce_max(
input_tensor=tf.abs(x),
axis=feature_dims,
keepdims=True
) + 1e-12
l2_norm = alpha * tf.sqrt(
tf.reduce_sum(
input_tensor=tf.pow(x / alpha, 2),
axis=feature_dims,
keepdims=True
) + 1e-6
)
x_unit = x / l2_norm
return x_unit
def _truncated_normal_eps(self, x):
ndim = tf.keras.backend.ndim(x)
sample_eps = tf.keras.backend.truncated_normal(
shape=tf.keras.backend.shape(x)[:1],
mean=tf.cast(self.eps, dtype=tf.float32) / 2.0,
stddev=tf.square(tf.cast(self.eps, dtype=tf.float32) / 4.0)
)
sample_eps = tf.tile(
input=tf.reshape(
sample_eps, [-1] + [1 for i in range(ndim-1)]
),
multiples=[1] + list(tf.keras.backend.int_shape(x)[1:])
)
return sample_eps
def call(self, inputs, loss, training):
if training:
inputs_grad = tf.gradients(
ys=loss,
xs=inputs,
aggregation_method=(
tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
)
)
inputs_grad_dense = tf.squeeze(
tf.stop_gradient(inputs_grad), axis=0
)
noise_unit = self._scale_l2(inputs_grad_dense)
sample_eps = self._truncated_normal_eps(noise_unit)
noise = noise_unit * sample_eps
return inputs + noise
else:
return inputs
def compute_output_shape(self, input_shape):
return input_shape
class TargetedDense(tf.keras.layers.Dense):
def __init__(
self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
targeted_dropout_type=None,
target_rate=0.50,
dropout_rate=0.50,
**kwargs
):
super(TargetedDense, self).__init__(
units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs
)
self.targeted_dropout_type = targeted_dropout_type
self.target_rate = target_rate
self.dropout_rate = dropout_rate
def targeted_weight_dropout(
self, w, target_rate, dropout_rate, is_training
):
w_shape = w.shape
w = tf.reshape(w, [-1, w_shape[-1]])
norm = tf.abs(w)
idx = tf.cast(
target_rate * tf.cast(tf.shape(input=w)[0], dtype=tf.float32),
dtype=tf.int32
)
threshold = tf.sort(norm, axis=0)[idx]
mask = norm < threshold[None, :]
if not is_training:
w = (1.0 - tf.cast(mask, dtype=tf.float32)) * w
w = tf.reshape(w, w_shape)
return w
mask = tf.cast(
tf.logical_and(
tf.random.uniform(tf.shape(input=w)) < dropout_rate,
mask
), dtype=tf.float32
)
w = (1.0 - mask) * w
w = tf.reshape(w, w_shape)
return w
def targeted_unit_dropout(
self, w, target_rate, dropout_rate, is_training
):
w_shape = w.shape
w = tf.reshape(w, [-1, w_shape[-1]])
norm = tf.norm(tensor=w, axis=0)
idx = int(target_rate * int(w.shape[1]))
sorted_norms = tf.sort(norm)
threshold = sorted_norms[idx]
mask = (norm < threshold)[None, :]
mask = tf.tile(mask, [w.shape[0], 1])
mask = tf.compat.v1.where(
tf.logical_and(
(1.0 - dropout_rate) < tf.random.uniform(tf.shape(input=w)),
mask
),
tf.ones_like(w, dtype=tf.float32),
tf.zeros_like(w, dtype=tf.float32)
)
w = (1.0 - mask) * w
w = tf.reshape(w, w_shape)
return w
def call(self, inputs, training):
inputs = tf.convert_to_tensor(value=inputs, dtype=self.dtype)
rank = inputs._rank()
if (self.targeted_dropout_type == 'weight'):
self.kernel.assign(
self.targeted_weight_dropout(
self.kernel,
self.target_rate,
self.dropout_rate,
training
)
)
elif (self.targeted_dropout_type == 'unit'):
self.kernel.assign(
self.targeted_unit_dropout(
self.kernel,
self.target_rate,
self.dropout_rate,
training
)
)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = tf.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not tf.executing_eagerly():
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = tf.linalg.matmul(inputs, self.kernel)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
class VectorDense(tf.keras.layers.Layer):
def __init__(
self,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
kernel_regularizer=None,
dropout=None
):
super(VectorDense, self).__init__()
self.units = units
self.dropout = dropout
self.permute_layer = tf.keras.layers.Permute(
dims=(2, 1)
)
if self.dropout is not None and self.dropout > 0:
self.dropout_layer = tf.keras.layers.Dropout(
rate=float(self.dropout)
)
self.dense_layer = tf.keras.layers.Dense(
units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer
)
def call(self, inputs, training):
net = self.permute_layer(inputs)
if self.dropout is not None and self.dropout > 0:
net = self.dropout_layer(net, training=training)
net = self.dense_layer(net)
outputs = self.permute_layer(net)
return outputs
def compute_output_shape(self, input_shape):
output_shape = tf.TensorShape(input_shape).as_list()
output_shape[1] = self.units
return tf.TensorShape(output_shape)
| 2.65625 | 3 |
mutatest/transformers.py | EvanKepner/m | 49 | 12788646 | """
Transformers
------------
Transformers defines the mutations that can be applied. The ``CATEGORIES`` dictionary lists all
valid category codes that are valid filters. The primary classes are:
1. ``LocIndex``
2. ``MutateAST``
The ``LocIndex`` is a location index within a given Abstract Syntax Tree (AST) that can be mutated.
The ``MutateAST`` class walks the AST of a given source file to identify all of the locations,
and optionally create the mutation at that node. These are implemented in the ``Genome`` object.
``MutateAST`` is constructed from ``MutateBase`` and the appropriate mixin class - either
``ConstantMixin`` for Python 3.8, or ``NameConstantMixin`` for Python 3.7.
"""
import ast
import logging
import sys
from pathlib import Path
####################################################################################################
# AST TRANSFORMERS
####################################################################################################
from typing import Any, Dict, List, NamedTuple, Optional, Set, Type, Union
try:
# Python 3.8
from typing import Protocol
except ImportError:
# Python 3.7
from typing_extensions import Protocol # type: ignore
LOGGER = logging.getLogger(__name__)
CATEGORIES = {
"AugAssign": "aa",
"BinOp": "bn",
"BinOpBC": "bc",
"BinOpBS": "bs",
"BoolOp": "bl",
"Compare": "cp",
"CompareIn": "cn",
"CompareIs": "cs",
"If": "if",
"Index": "ix",
"NameConstant": "nc",
"SliceUS": "su",
}
####################################################################################################
# CORE TYPES
####################################################################################################
class LocIndex(NamedTuple):
"""Location index within AST to mark mutation targets.
The ``end_lineno`` and ``end_col_offset`` properties are set to ``None`` by default as they
are only used distinctly in Python 3.8.
"""
ast_class: str
lineno: int
col_offset: int
op_type: Any # varies based on the visit_Node definition in MutateAST
# New in Python 3.8 AST: https://docs.python.org/3/whatsnew/3.8.html#improved-modules
# These values are always set to None if running Python 3.7.
# The NodeSpan class is used to manage setting the values
end_lineno: Optional[int] = None
end_col_offset: Optional[int] = None
class MutationOpSet(NamedTuple):
"""Container for compatible mutation operations. Also used in the CLI display."""
name: str
desc: str
operations: Set[Any]
category: str
class LocIndexNode(Protocol):
"""Type protocol for AST Nodes that include lineno and col_offset properties."""
lineno: int
col_offset: int
class NodeSpan(NamedTuple):
"""Node span to support Py3.7 and 3.8 compatibility for locations.
This is used to generate the set the values in the LocIndex as a general class.
"""
node: LocIndexNode
@property
def lineno(self) -> int:
"""Line number for the node."""
return self.node.lineno
@property
def col_offset(self) -> int:
"""Col offset for the node."""
return self.node.col_offset
@property
def end_lineno(self) -> Optional[int]:
"""End line no: Python 3.8 will have this defined, in Python 3.7 it will be None."""
eline: Optional[int] = getattr(self.node, "end_lineno", None)
return eline
@property
def end_col_offset(self) -> Optional[int]:
"""End col offset: Python 3.8 will have this defined, in Python 3.7 it will be None."""
ecol: Optional[int] = getattr(self.node, "end_col_offset", None)
return ecol
####################################################################################################
# MUTATE AST Definitions
# Includes MutateBase and Mixins for 3.7 and 3.8 AST support
# MutateAST is constructed from Base + Mixins depending on sys.version_info
####################################################################################################
class MutateBase(ast.NodeTransformer):
"""AST NodeTransformer to replace nodes with mutations by visits."""
def __init__(
self,
target_idx: Optional[LocIndex] = None,
mutation: Optional[Any] = None,
readonly: bool = False,
src_file: Optional[Union[Path, str]] = None,
) -> None:
"""Create the AST node transformer for mutations.
If readonly is set to True then no transformations are applied;
however, the locs attribute is updated with the locations of nodes that could
be transformed. This allows the class to function both as an inspection method
and as a mutation transformer.
Note that different nodes handle the ``LocIndex`` differently based on the context. For
example, ``visit_BinOp`` uses direct AST types, while ``visit_NameConstant`` uses values,
and ``visit_AugAssign`` uses custom strings in a dictionary mapping.
All ``visit_`` methods take the ``node`` as an argument and rely on the class properties.
This MutateBase class is designed to be implemented with the appropriate Mixin Class
for supporting either Python 3.7 or Python 3.8 ASTs. If the base class is used
directly certain operations - like ``visit_If`` and ``visit_NameConstant`` will not
work as intended..
Args:
target_idx: Location index for the mutatest in the AST
mutation: the mutatest to apply, may be a type or a value
readonly: flag for read-only operations, used to visit nodes instead of transform
src_file: Source file name, used for logging purposes
"""
self.locs: Set[LocIndex] = set()
# managed via @property
self._target_idx = target_idx
self._mutation = mutation
self._readonly = readonly
self._src_file = src_file
@property
def target_idx(self) -> Optional[LocIndex]:
"""Location index for the mutation in the AST"""
return self._target_idx
@property
def mutation(self) -> Optional[Any]:
"""The mutation to apply, may be a type or a value"""
return self._mutation
@property
def readonly(self) -> bool:
"""A flag for read-only operations, used to visit nodes instead of transform"""
return self._readonly
@property
def src_file(self) -> Optional[Union[Path, str]]:
"""Source file name, used for logging purposes"""
return self._src_file
@property
def constant_type(self) -> Union[Type[ast.NameConstant], Type[ast.Constant]]:
"""Overridden using the MixinClasses for NameConstant(3.7) vs. Constant(3.8)."""
raise NotImplementedError
def visit_AugAssign(self, node: ast.AugAssign) -> ast.AST:
"""AugAssign is ``-=, +=, /=, *=`` for augmented assignment."""
self.generic_visit(node)
log_header = f"visit_AugAssign: {self.src_file}:"
# custom mapping of string keys to ast operations that can be used
# in the nodes since these overlap with BinOp types
aug_mappings = {
"AugAssign_Add": ast.Add,
"AugAssign_Sub": ast.Sub,
"AugAssign_Mult": ast.Mult,
"AugAssign_Div": ast.Div,
}
rev_mappings = {v: k for k, v in aug_mappings.items()}
idx_op = rev_mappings.get(type(node.op), None)
# edge case protection in case the mapping isn't known for substitution
# in that instance, return the node and take no action
if not idx_op:
LOGGER.debug(
"%s (%s, %s): unknown aug_assignment: %s",
log_header,
node.lineno,
node.col_offset,
type(node.op),
)
return node
node_span = NodeSpan(node)
idx = LocIndex(
ast_class="AugAssign",
lineno=node_span.lineno,
col_offset=node_span.col_offset,
op_type=idx_op,
end_lineno=node_span.end_lineno,
end_col_offset=node_span.end_col_offset,
)
self.locs.add(idx)
if idx == self.target_idx and self.mutation in aug_mappings and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
return ast.copy_location(
ast.AugAssign(
target=node.target,
op=aug_mappings[self.mutation](), # awkward syntax to call type from mapping
value=node.value,
),
node,
)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_BinOp(self, node: ast.BinOp) -> ast.AST:
"""BinOp nodes are bit-shifts and general operators like add, divide, etc."""
self.generic_visit(node)
log_header = f"visit_BinOp: {self.src_file}:"
# default case for this node, can be BinOpBC or BinOpBS
ast_class = "BinOp"
op_type = type(node.op)
# binop_bit_cmp_types: Set[type] = {ast.BitAnd, ast.BitOr, ast.BitXor}
if op_type in {ast.BitAnd, ast.BitOr, ast.BitXor}:
ast_class = "BinOpBC"
# binop_bit_shift_types: Set[type] = {ast.LShift, ast.RShift}
if op_type in {ast.LShift, ast.RShift}:
ast_class = "BinOpBS"
node_span = NodeSpan(node)
idx = LocIndex(
ast_class=ast_class,
lineno=node_span.lineno,
col_offset=node_span.col_offset,
op_type=op_type,
end_lineno=node_span.end_lineno,
end_col_offset=node_span.end_col_offset,
)
self.locs.add(idx)
if idx == self.target_idx and self.mutation and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
return ast.copy_location(
ast.BinOp(left=node.left, op=self.mutation(), right=node.right), node
)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_BoolOp(self, node: ast.BoolOp) -> ast.AST:
"""Boolean operations, AND/OR."""
self.generic_visit(node)
log_header = f"visit_BoolOp: {self.src_file}:"
node_span = NodeSpan(node)
idx = LocIndex(
ast_class="BoolOp",
lineno=node_span.lineno,
col_offset=node_span.col_offset,
op_type=type(node.op),
end_lineno=node_span.end_lineno,
end_col_offset=node_span.end_col_offset,
)
self.locs.add(idx)
if idx == self.target_idx and self.mutation and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
return ast.copy_location(ast.BoolOp(op=self.mutation(), values=node.values), node)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_Compare(self, node: ast.Compare) -> ast.AST:
"""Compare nodes are ``==, >=, is, in`` etc. There are multiple Compare categories."""
self.generic_visit(node)
log_header = f"visit_Compare: {self.src_file}:"
# taking only the first operation in the compare node
# in basic testing, things like (a==b)==1 still end up with lists of 1,
# but since the AST docs specify a list of operations this seems safer.
# idx = LocIndex("CompareIs", node.lineno, node.col_offset, type(node.ops[0]))
cmpop_is_types: Set[type] = {ast.Is, ast.IsNot}
cmpop_in_types: Set[type] = {ast.In, ast.NotIn}
op_type = type(node.ops[0])
node_span = NodeSpan(node)
locidx_kwargs = {
"lineno": node_span.lineno,
"col_offset": node_span.col_offset,
"op_type": op_type,
"end_lineno": node_span.end_lineno,
"end_col_offset": node_span.end_col_offset,
}
if op_type in cmpop_is_types:
idx = LocIndex(ast_class="CompareIs", **locidx_kwargs) # type: ignore
elif op_type in cmpop_in_types:
idx = LocIndex(ast_class="CompareIn", **locidx_kwargs) # type: ignore
else:
idx = LocIndex(ast_class="Compare", **locidx_kwargs) # type: ignore
self.locs.add(idx)
if idx == self.target_idx and self.mutation and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
# TODO: Determine when/how this case would actually be called
if len(node.ops) > 1:
# unlikely test case where the comparison has multiple values
LOGGER.debug("%s multiple compare ops in node, len: %s", log_header, len(node.ops))
existing_ops = [i for i in node.ops]
mutation_ops = [self.mutation()] + existing_ops[1:]
return ast.copy_location(
ast.Compare(left=node.left, ops=mutation_ops, comparators=node.comparators),
node,
)
else:
# typical comparison case, will also catch (a==b)==1 as an example.
LOGGER.debug("%s single comparison node operation", log_header)
return ast.copy_location(
ast.Compare(
left=node.left, ops=[self.mutation()], comparators=node.comparators
),
node,
)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_If(self, node: ast.If) -> ast.AST:
"""If statements e.g. If ``x == y`` is transformed to ``if True`` and ``if False``.
This visit method only works when the appropriate Mixin is used.
"""
self.generic_visit(node)
log_header = f"visit_If: {self.src_file}:"
# default for a comparison is "If_Statement" which will be changed to True/False
# If_Statement is not set as a mutation target, controlled in get_mutations function
if_type = "If_Statement"
# Py 3.7 vs 3.8 - 3.7 uses NameConstant, 3.8 uses Constant
if_mutations = {
"If_True": self.constant_type(value=True),
"If_False": self.constant_type(value=False),
}
if type(node.test) == self.constant_type:
if_type: str = f"If_{bool(node.test.value)}" # type: ignore
node_span = NodeSpan(node)
idx = LocIndex(
ast_class="If",
lineno=node_span.lineno,
col_offset=node_span.col_offset,
op_type=if_type,
end_lineno=node_span.end_lineno,
end_col_offset=node_span.end_col_offset,
)
self.locs.add(idx)
if idx == self.target_idx and self.mutation and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
return ast.fix_missing_locations(
ast.copy_location(
ast.If(test=if_mutations[self.mutation], body=node.body, orelse=node.orelse),
node,
)
)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_Index(self, node: ast.Index) -> ast.AST:
"""Index visit e.g. ``i[0], i[0][1]``."""
self.generic_visit(node)
log_header = f"visit_Index: {self.src_file}:"
# Index Node has a value attribute that can be either Num node or UnaryOp node
# depending on whether the value is positive or negative.
n_value = node.value
idx = None
index_mutations = {
"Index_NumZero": ast.Num(n=0),
"Index_NumPos": ast.Num(n=1),
"Index_NumNeg": ast.UnaryOp(op=ast.USub(), operand=ast.Num(n=1)),
}
node_span = NodeSpan(n_value)
locidx_kwargs = {
"ast_class": "Index",
"lineno": node_span.lineno,
"col_offset": node_span.col_offset,
"end_lineno": node_span.end_lineno,
"end_col_offset": node_span.end_col_offset,
}
# index is a non-negative number e.g. i[0], i[1]
if isinstance(n_value, ast.Num):
# positive integer case
if n_value.n != 0:
idx = LocIndex(op_type="Index_NumPos", **locidx_kwargs) # type: ignore
self.locs.add(idx)
# zero value case
else:
idx = LocIndex(op_type="Index_NumZero", **locidx_kwargs) # type: ignore
self.locs.add(idx)
# index is a negative number e.g. i[-1]
if isinstance(n_value, ast.UnaryOp):
idx = LocIndex(op_type="Index_NumNeg", **locidx_kwargs) # type: ignore
self.locs.add(idx)
if idx == self.target_idx and self.mutation and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
mutation = index_mutations[self.mutation]
# uses AST.fix_missing_locations since the values of ast.Num and ast.UnaryOp also need
# lineno and col-offset values. This is a recursive fix.
return ast.fix_missing_locations(ast.copy_location(ast.Index(value=mutation), node))
LOGGER.debug(
"%s (%s, %s): no mutations applied.", log_header, n_value.lineno, n_value.col_offset
)
return node
def mixin_NameConstant(self, node: Union[ast.NameConstant, ast.Constant]) -> ast.AST:
"""Constants: ``True, False, None``.
This method is called by using the Mixin classes for handling the difference of
ast.NameConstant (Py 3.7) an ast.Constant (Py 3.8).
"""
self.generic_visit(node)
log_header = f"visit_NameConstant: {self.src_file}:"
node_span = NodeSpan(node)
idx = LocIndex(
ast_class="NameConstant",
lineno=node_span.lineno,
col_offset=node_span.col_offset,
op_type=node.value,
end_lineno=node_span.end_lineno,
end_col_offset=node_span.end_col_offset,
)
self.locs.add(idx)
if idx == self.target_idx and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
return ast.copy_location(self.constant_type(value=self.mutation), node)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
def visit_Subscript(self, node: ast.Subscript) -> ast.AST:
"""Subscript slice operations e.g., ``x[1:]`` or ``y[::2]``."""
self.generic_visit(node)
log_header = f"visit_Subscript: {self.src_file}:"
idx = None
# Subscripts have slice properties with col/lineno, slice itself does not have line/col
# Index is also a valid Subscript slice property
slice = node.slice
if not isinstance(slice, ast.Slice):
LOGGER.debug("%s (%s, %s): not a slice node.", log_header, node.lineno, node.col_offset)
return node
# Built "on the fly" based on the various conditions for operation types
# The RangeChange options are added in the later if/else cases
slice_mutations: Dict[str, ast.Slice] = {
"Slice_UnboundUpper": ast.Slice(lower=slice.upper, upper=None, step=slice.step),
"Slice_UnboundLower": ast.Slice(lower=None, upper=slice.lower, step=slice.step),
"Slice_Unbounded": ast.Slice(lower=None, upper=None, step=slice.step),
}
node_span = NodeSpan(node)
locidx_kwargs = {
"lineno": node_span.lineno,
"col_offset": node_span.col_offset,
"end_lineno": node_span.end_lineno,
"end_col_offset": node_span.end_col_offset,
}
# Unbounded Swap Operation
# upper slice range e.g. x[:2] will become x[2:]
if slice.lower is None and slice.upper is not None:
idx = LocIndex(
ast_class="SliceUS", op_type="Slice_UnboundLower", **locidx_kwargs # type: ignore
)
self.locs.add(idx)
# lower slice range e.g. x[1:] will become x[:1]
if slice.upper is None and slice.lower is not None:
idx = LocIndex(
ast_class="SliceUS", op_type="Slice_UnboundUpper", **locidx_kwargs # type: ignore
)
self.locs.add(idx)
# Apply Mutation
if idx == self.target_idx and not self.readonly:
LOGGER.debug("%s mutating idx: %s with %s", log_header, self.target_idx, self.mutation)
mutation = slice_mutations[str(self.mutation)]
# uses AST.fix_missing_locations since the values of ast.Num and ast.UnaryOp also need
# lineno and col-offset values. This is a recursive fix.
return ast.fix_missing_locations(
ast.copy_location(
ast.Subscript(value=node.value, slice=mutation, ctx=node.ctx), node
)
)
LOGGER.debug("%s (%s, %s): no mutations applied.", log_header, node.lineno, node.col_offset)
return node
class NameConstantMixin:
"""Mixin for Python 3.7 AST applied to MutateBase."""
@property
def constant_type(self) -> Type[ast.NameConstant]:
return ast.NameConstant
def visit_NameConstant(self, node: ast.NameConstant) -> ast.AST:
"""NameConstants: ``True, False, None``."""
return self.mixin_NameConstant(node) # type: ignore
class ConstantMixin:
"""Mixin for Python 3.8 AST applied to MutateBase."""
@property
def constant_type(self) -> Type[ast.Constant]:
return ast.Constant
def visit_Constant(self, node: ast.Constant) -> ast.AST:
"""Constants: https://bugs.python.org/issue32892
NameConstant: ``True, False, None``.
Num: isinstance(int, float)
Str: isinstance(str)
"""
# NameConstant behavior consistent with Python 3.7
if isinstance(node.value, bool) or node.value is None:
return self.mixin_NameConstant(node) # type: ignore
return node
# PYTHON 3.7
if sys.version_info < (3, 8):
class MutateAST(NameConstantMixin, MutateBase):
"""Implementation of the MutateAST class based on running environment."""
pass
# PYTHON 3.8
else:
class MutateAST(ConstantMixin, MutateBase):
"""Implementation of the MutateAST class based on running environment."""
pass
####################################################################################################
# TRANSFORMER FUNCTIONS
####################################################################################################
def get_compatible_operation_sets() -> List[MutationOpSet]:
"""Utility function to return a list of compatible AST mutations with names.
All of the mutation transformation sets that are supported by mutatest are defined here.
See: https://docs.python.org/3/library/ast.html#abstract-grammar
This is used to create the search space in finding mutations for a target, and
also to list the support operations in the CLI help function.
Returns:
List of ``MutationOpSets`` that have substitutable operations
"""
# AST operations that are sensible mutations for each other
binop_types: Set[type] = {ast.Add, ast.Sub, ast.Div, ast.Mult, ast.Pow, ast.Mod, ast.FloorDiv}
binop_bit_cmp_types: Set[type] = {ast.BitAnd, ast.BitOr, ast.BitXor}
binop_bit_shift_types: Set[type] = {ast.LShift, ast.RShift}
cmpop_types: Set[type] = {ast.Eq, ast.NotEq, ast.Lt, ast.LtE, ast.Gt, ast.GtE}
cmpop_is_types: Set[type] = {ast.Is, ast.IsNot}
cmpop_in_types: Set[type] = {ast.In, ast.NotIn}
boolop_types: Set[type] = {ast.And, ast.Or}
# Python built-in constants (singletons) that can be used with NameConstant AST node
named_const_singletons: Set[Union[bool, None]] = {True, False, None}
# Custom augmentation ops to differentiate from bin_op types
# these are defined for substitution within the visit_AugAssign node and need to match
aug_assigns: Set[str] = {"AugAssign_Add", "AugAssign_Sub", "AugAssign_Mult", "AugAssign_Div"}
# Custom references for substitutions of zero, positive, and negative iterable indicies
index_types: Set[str] = {"Index_NumPos", "Index_NumNeg", "Index_NumZero"}
# Custom references for If statement substitutions
# only If_True and If_False will be applied as mutations
if_types: Set[str] = {"If_True", "If_False", "If_Statement"}
# Custom references for subscript substitutions for slice mutations
slice_bounded_types: Set[str] = {"Slice_UnboundUpper", "Slice_UnboundLower", "Slice_Unbounded"}
return [
MutationOpSet(
name="AugAssign",
desc="Augmented assignment e.g. += -= /= *=",
operations=aug_assigns,
category=CATEGORIES["AugAssign"],
),
MutationOpSet(
name="BinOp",
desc="Binary operations e.g. + - * / %",
operations=binop_types,
category=CATEGORIES["BinOp"],
),
MutationOpSet(
name="BinOp Bit Comparison",
desc="Bitwise comparison operations e.g. x & y, x | y, x ^ y",
operations=binop_bit_cmp_types,
category=CATEGORIES["BinOpBC"],
),
MutationOpSet(
name="BinOp Bit Shifts",
desc="Bitwise shift operations e.g. << >>",
operations=binop_bit_shift_types,
category=CATEGORIES["BinOpBS"],
),
MutationOpSet(
name="BoolOp",
desc="Boolean operations e.g. and or",
operations=boolop_types,
category=CATEGORIES["BoolOp"],
),
MutationOpSet(
name="Compare",
desc="Comparison operations e.g. == >= <= > <",
operations=cmpop_types,
category=CATEGORIES["Compare"],
),
MutationOpSet(
name="Compare In",
desc="Compare membership e.g. in, not in",
operations=cmpop_in_types,
category=CATEGORIES["CompareIn"],
),
MutationOpSet(
name="Compare Is",
desc="Comapre identity e.g. is, is not",
operations=cmpop_is_types,
category=CATEGORIES["CompareIs"],
),
MutationOpSet(
name="If",
desc="If statement tests e.g. original statement, True, False",
operations=if_types,
category=CATEGORIES["If"],
),
MutationOpSet(
name="Index",
desc="Index values for iterables e.g. i[-1], i[0], i[0][1]",
operations=index_types,
category=CATEGORIES["Index"],
),
MutationOpSet(
name="NameConstant",
desc="Named constant mutations e.g. True, False, None",
operations=named_const_singletons,
category=CATEGORIES["NameConstant"],
),
MutationOpSet(
name="Slice Unbounded Swap",
desc=(
"Slice mutations to swap lower/upper values, x[2:] (unbound upper) to x[:2],"
" (unbound lower). Steps are not changed."
),
operations=slice_bounded_types,
category=CATEGORIES["SliceUS"],
),
]
def get_mutations_for_target(target: LocIndex) -> Set[Any]:
"""Given a target, find all the mutations that could apply from the AST definitions.
Args:
target: the location index target
Returns:
Set of types that can mutated into the target location.
"""
search_space: List[Set[Any]] = [m.operations for m in get_compatible_operation_sets()]
mutation_ops: Set[Any] = set()
for potential_ops in search_space:
if target.op_type in potential_ops:
LOGGER.debug("Potential mutatest operations found for target: %s", target.op_type)
mutation_ops = potential_ops.copy()
mutation_ops.remove(target.op_type)
# Special case for If_Statement since that is a default to transform to True or False
# but not a validation mutation target by itself
if "If_Statement" in mutation_ops:
mutation_ops.remove("If_Statement")
break
return mutation_ops
| 2.453125 | 2 |
examples/connect.py | adrienverge/aiocouch | 0 | 12788647 | import asyncio
from aiocouch import CouchDB
async def main_with():
async with CouchDB(
"http://localhost:5984", user="admin", password="<PASSWORD>"
) as couchdb:
database = await couchdb["config"]
async for doc in database.docs(["db-hta"]):
print(doc)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main_with())
| 2.484375 | 2 |
tests/test_hotfix_class.py | wo1fsea/PyHotfixer | 0 | 12788648 | <filename>tests/test_hotfix_class.py
# -*- coding: utf-8 -*-
"""----------------------------------------------------------------------------
Author:
<NAME>
<EMAIL>
Date:
2019/8/19
Description:
test_hotfix_class.py
----------------------------------------------------------------------------"""
import unittest
import os
import sys
import shutil
from pyhotfixer import hotfix
FILE_NAME = "class_test.py"
FILE_NAME_V1 = "class_test_v1.py"
FILE_NAME_V2 = "class_test_v2.py"
class HotfixClassTestCase(unittest.TestCase):
def setUp(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
self.module_file = os.path.join(cur_dir, FILE_NAME)
self.module_file_v1 = os.path.join(cur_dir, FILE_NAME_V1)
self.module_file_v2 = os.path.join(cur_dir, FILE_NAME_V2)
if os.path.exists(self.module_file):
os.remove(self.module_file)
def tearDown(self):
if os.path.exists(self.module_file):
os.remove(self.module_file)
def test_hotfix_class(self):
shutil.copy(self.module_file_v1, self.module_file)
sys.modules.pop("class_test", None)
import class_test
hotfix_class_obj = class_test.HotfixClass()
self.assertEqual(hotfix_class_obj.no_hotfix_data, 1)
self.assertEqual(hotfix_class_obj.hotfix_data, 1)
self.assertEqual(hotfix_class_obj.no_hotfix_method(), 1)
self.assertEqual(hotfix_class_obj.hotfix_method(), 1)
self.assertEqual(hotfix_class_obj.no_hotfix_classmethod(), 1)
self.assertEqual(hotfix_class_obj.hotfix_classmethod(), 1)
self.assertEqual(hotfix_class_obj.hotfix_property, 1)
self.assertEqual(hotfix_class_obj.no_hotfix_property, 1)
self.assertEqual(hotfix_class_obj.replace_data_with_func, 1)
self.assertEqual(hotfix_class_obj.replace_data_with_static_method, 1)
self.assertEqual(hotfix_class_obj.replace_data_with_class_method, 1)
self.assertEqual(class_test.HotfixClass.InnerClass1.func(), 1)
self.assertEqual(class_test.HotfixClass.InnerClass2.func(), 1)
self.assertEqual(class_test.HotfixClass.InnerClass, class_test.HotfixClass.InnerClass1)
no_hotfix_class_obj = class_test.NoHotfixClass()
self.assertEqual(no_hotfix_class_obj.no_hotfix_data, 1)
self.assertEqual(no_hotfix_class_obj.no_hotfix_method(), 1)
another_no_hotfix_class_obj = class_test.AnotherNoHotfixClass()
self.assertEqual(another_no_hotfix_class_obj.no_hotfix_data, 1)
self.assertEqual(another_no_hotfix_class_obj.no_hotfix_method(), 1)
shutil.copy(self.module_file_v2, self.module_file)
hotfix(["class_test"])
self.assertEqual(hotfix_class_obj.no_hotfix_data, 1)
self.assertEqual(hotfix_class_obj.hotfix_data, 2)
self.assertEqual(hotfix_class_obj.no_hotfix_method(), 1)
self.assertEqual(hotfix_class_obj.hotfix_method(), 2)
self.assertEqual(hotfix_class_obj.no_hotfix_classmethod(), 1)
self.assertEqual(hotfix_class_obj.hotfix_classmethod(), 2)
self.assertEqual(hotfix_class_obj.no_hotfix_property, 1)
self.assertEqual(hotfix_class_obj.hotfix_property, 2)
self.assertEqual(hotfix_class_obj.replace_data_with_func(), 2)
self.assertEqual(hotfix_class_obj.replace_data_with_static_method(), 2)
self.assertEqual(hotfix_class_obj.replace_data_with_class_method(), 2)
self.assertEqual(class_test.HotfixClass.InnerClass1.func(), 2)
self.assertEqual(class_test.HotfixClass.InnerClass2.func(), 2)
self.assertEqual(class_test.HotfixClass.InnerClass, class_test.HotfixClass.InnerClass2)
no_hotfix_class_obj = class_test.NoHotfixClass()
self.assertEqual(no_hotfix_class_obj.no_hotfix_data, 1)
self.assertEqual(no_hotfix_class_obj.no_hotfix_method(), 1)
another_no_hotfix_class_obj = class_test.AnotherNoHotfixClass()
self.assertEqual(another_no_hotfix_class_obj.no_hotfix_data, 1)
self.assertEqual(another_no_hotfix_class_obj.no_hotfix_method(), 1) | 2.53125 | 3 |
scaffoldgraph/core/fragment.py | shenwanxiang/ScaffoldGraph | 0 | 12788649 | <reponame>shenwanxiang/ScaffoldGraph<gh_stars>0
"""
scaffoldgraph.core.fragment
"""
from abc import ABC, abstractmethod
from rdkit import RDLogger
from rdkit.Chem import (
RWMol,
MolToSmiles,
rdmolops,
SanitizeMol,
GetMolFrags,
BondType,
CHI_UNSPECIFIED,
SANITIZE_ALL,
SANITIZE_CLEANUP,
SANITIZE_CLEANUPCHIRALITY,
SANITIZE_FINDRADICALS,
)
from rdkit.Chem.Scaffolds import MurckoScaffold
from scaffoldgraph.core.scaffold import Scaffold
rdlogger = RDLogger.logger()
class Fragmenter(ABC):
"""Abstract base class for scaffold fragmentation methods.
Fragmenters should be designed to be used for generating
scaffold graphs. subclasses may use attributes to store an
internal state or property used during fragmentation.
Subclasses should define the fragment method which takes a
scaffold (sg.core.Scaffold) as an argument and returns the next
set of scaffolds i.e. the next hierarchical level.
"""
def __call__(self, scaffold):
return self.fragment(scaffold)
@abstractmethod
def fragment(self, scaffold):
"""Subclasses should implement this method.
Parameters
----------
scaffold (sg.core.Scaffold): a scaffoldgraph scaffold object
Returns
-------
This method should return the next set of scaffolds. i.e.
the next hierarchical level.
"""
raise NotImplementedError()
class MurckoRingFragmenter(Fragmenter):
"""A Fragmenter class for the removal of peripheral rings from a
Murcko scaffold. Designed to be used for the generation of a scaffold
graph.
The fragment's fragment method takes a scaffold (sg.core.Scaffold) and
returns the next set of murcko fragments. i.e. scaffolds with 1 less ring
than the child scaffold.
"""
def __init__(self, use_scheme_4=False):
"""Initialize the MurckoRingFragmenter
Parameters
----------
use_scheme_4: if True use scheme 4 from the paper:
The Scaffold Tree − Visualization of the Scaffold Universe
by Hierarchical Scaffold Classification. This scheme should
be used when generating scaffold trees with the original
prioritization rules.
Notes
-----
Scheme 4 (description taken from paper):
The fusion bond connecting a three-membered ring with other
rings is converted into a double bond. This rule is intended
to deal with epoxides and aziridines. This rule treats such
systems as functional groups which are removed beforehand,
rather than as rings. This reflects the situation that epoxides
are usually generated by the oxidation of a double bond, and
also many natural products exist often in forms with and
without epoxidized double bonds.
"""
super(MurckoRingFragmenter, self).__init__()
self.use_scheme_4 = use_scheme_4
def fragment(self, scaffold):
"""Fragment a scaffold into its next set of murcko fragments.
Parameters
----------
scaffold (sg.core.Scaffold): scaffold to be fragmented.
Returns
-------
parents (list): a list of the next scaffold parents.
"""
parents = [] # container for parent scaffolds
rings = scaffold.rings # ring information
for rix, ring in enumerate(rings): # Loop through all rings and remove
edit = RWMol(scaffold.mol) # Editable molecule
# Collect all removable atoms in the molecule
remove_atoms = set()
for index, atom in zip(ring.aix, ring.atoms):
if rings.info.NumAtomRings(index) == 1:
if atom.GetDegree() > 2: # Evoke linker collection
collect_linker_atoms(edit.GetAtomWithIdx(index), remove_atoms)
else: # Add ring atom to removable set
remove_atoms.add(index)
else: # Atom is shared between multiple rings
correct_atom_props(edit.GetAtomWithIdx(index))
# Collect removable bonds (this needs to be done to prevent the case where when deleting
# a ring two atoms belonging to the same bond are also part of separate other rings.
# This bond must be broken to prevent an incorrect output)
remove_bonds = set()
for bix in {x for x in ring.bix if rings.info.NumBondRings(x) == 1}:
bond = edit.GetBondWithIdx(bix)
b_x, b_y = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
if b_x not in remove_atoms and b_y not in remove_atoms:
remove_bonds.add((b_x, b_y))
correct_atom_props(edit.GetAtomWithIdx(b_x))
correct_atom_props(edit.GetAtomWithIdx(b_y))
# Scheme 4 (scaffold tree rule)
if self.use_scheme_4 is not False and len(ring) == 3:
atomic_nums = [a.GetAtomicNum() for a in ring.atoms]
if len([a for a in atomic_nums if a != 1 and a != 6]) == 1:
shared = {x for x in ring.bix if rings.info.NumBondRings(x) > 1}
if len(shared) == 1:
bond = edit.GetBondWithIdx(shared.pop())
bond.SetBondType(BondType.DOUBLE)
# Remove collected atoms and bonds
for bix in remove_bonds:
edit.RemoveBond(*bix)
for aix in sorted(remove_atoms, reverse=True):
edit.RemoveAtom(aix)
# Add new parent scaffolds to parent list
for parent in get_scaffold_frags(edit):
if parent.rings.count == len(rings) - 1:
parent.removed_ring_idx = rix
parents.append(parent)
return parents
class MurckoRingSystemFragmenter(Fragmenter):
"""A Fragmenter class for the removal of peripheral ring systems from a
Murcko scaffold. Designed to be used for the generation of a scaffold
graph.
Unlike the MurckoRingFragmenter this fragmenter will not dissect fused
ring systems. This fragmenter is thus used for HierS network generation
The fragment's fragment method takes a scaffold (sg.core.Scaffold) and
returns the next set of murcko fragments. i.e. scaffolds with 1 less ring
than the child scaffold.
"""
def __init__(self):
super(MurckoRingSystemFragmenter, self).__init__()
def fragment(self, scaffold):
"""Fragment a scaffold into its next set of murcko fragments.
This fragmenter will not dissect fused ring systems.
Parameters
----------
scaffold (sg.core.Scaffold): scaffold to be fragmented.
Returns
-------
parents (list): a list of the next scaffold parents.
"""
parents = []
rings = scaffold.ring_systems # ring system information
info = scaffold.rings.info
if rings.count == 1:
return []
for rix, ring in enumerate(rings):
edit = RWMol(scaffold.mol)
remove_atoms = set()
for index, atom in zip(ring.aix, ring.atoms):
if info.NumAtomRings(index) == 1:
if atom.GetDegree() > 2: # Evoke linker collection
collect_linker_atoms(edit.GetAtomWithIdx(index), remove_atoms)
else:
remove_atoms.add(index)
else:
remove_atoms.add(index)
for aix in sorted(remove_atoms, reverse=True):
edit.RemoveAtom(aix)
for parent in get_scaffold_frags(edit):
if parent.ring_systems.count == len(rings) - 1:
parent.removed_ring_idx = rix
parents.append(parent)
return parents
def collect_linker_atoms(origin, remove_atoms):
"""Used during fragmentation to collect atoms that are part of a linker"""
visited = set() # Visited bond indexes
def collect(origin_atom):
for bond in origin_atom.GetBonds():
bond_id = bond.GetIdx()
if bond_id in visited or bond.IsInRing():
continue
other_atom = bond.GetOtherAtom(origin_atom)
other_degree = other_atom.GetDegree()
if other_degree == 1: # Terminal side-chain
remove_atoms.add(origin_atom.GetIdx())
remove_atoms.add(other_atom.GetIdx())
correct_atom_props(origin_atom)
visited.add(bond_id)
elif other_degree == 2: # Two neighboring atoms (remove)
remove_atoms.add(origin_atom.GetIdx())
visited.add(bond_id)
collect(other_atom)
elif other_degree > 2: # Branching point
# Determine number of non-terminal branches
non_terminal_branches = 0
for neighbor in other_atom.GetNeighbors():
if neighbor.GetDegree() != 1:
non_terminal_branches += 1
if non_terminal_branches < 3: # Continue with deletion
remove_atoms.add(origin_atom.GetIdx())
visited.add(bond_id)
collect(other_atom)
else: # Branching point links two rings
# Test for exolinker double bond
if not bond.GetBondType() == BondType.DOUBLE:
remove_atoms.add(origin_atom.GetIdx())
correct_atom_props(other_atom)
visited.add(bond_id)
pass
# Linker is recursively collected
# Linker atoms are added to the existing set 'remove_atoms'
collect(origin)
def get_scaffold_frags(frag):
"""Get fragments from a disconnected structure.
Used by fragmentation methods."""
try:
# frag.ClearComputedProps()
# frag.UpdatePropertyCache()
# Chem.GetSymmSSSR(frag)
partial_sanitization(frag)
except ValueError as e:
# This error is caught as dissecting an aromatic ring system,
# may lead to an undefined state where the resultant system
# is no longer aromatic. We make no attempt to prevent this
# but log it for reference.
# This behaviour may be desirable for a scaffold tree and is
# equivalent to the behavior of SNG (I believe...)
# logger.debug(e)
return set()
frags = {Scaffold(f) for f in GetMolFrags(frag, True, False)}
return frags
def correct_atom_props(atom):
"""Used during fragmentation to correct atom properties where an
adjacent atom is removed"""
if atom.GetIsAromatic() and atom.GetAtomicNum() != 6:
atom.SetNumExplicitHs(1)
elif atom.GetNoImplicit() or atom.GetChiralTag() != CHI_UNSPECIFIED:
atom.SetNoImplicit(False)
atom.SetNumExplicitHs(0)
atom.SetChiralTag(CHI_UNSPECIFIED)
def partial_sanitization(mol):
"""Partially sanitize a molecule (used during fragmentation)"""
SanitizeMol(mol, sanitizeOps=SANITIZE_ALL ^
SANITIZE_CLEANUP ^
SANITIZE_CLEANUPCHIRALITY ^
SANITIZE_FINDRADICALS)
def get_murcko_scaffold(mol, generic=False):
"""Get the murcko scaffold for an input molecule
Parameters
----------
mol (Chem.Mol): an rdkit molecule
generic (bool): if True return a generic scaffold (CSK)
Returns
-------
murcko (Chem.Mol): an rdkit molecule (scaffold)
"""
murcko = MurckoScaffold.GetScaffoldForMol(mol)
if generic:
murcko = MurckoScaffold.MakeScaffoldGeneric(murcko)
return murcko
def get_annotated_murcko_scaffold(mol, scaffold=None, as_mol=True):
"""Return an annotated murcko scaffold where side chains are replaced
with a dummy atom ('*').
Parameters
----------
mol (Chem.Mol): input molecule.
scaffold (Chem.Mol): If a murcko scaffold is already calculated for the mol,
this can be supplied as a template. (optional, default: None)
as_mol (bool): if True return rdkit Mol object else return
a SMILES string representation. (optional, default: True)
"""
if not scaffold:
scaffold = MurckoScaffold.GetScaffoldForMol(mol)
annotated = rdmolops.ReplaceSidechains(mol, scaffold)
if as_mol:
return annotated
if annotated is None:
return ''
return MolToSmiles(annotated)
def get_next_murcko_fragments(murcko_scaffold, break_fused_rings=True):
"""Fragment a scaffold into its next set of murcko fragments.
The fragmenter assumes that a murcko scaffold is supplied.
Parameters
----------
murcko_scaffold (Chem.Mol): An rdkit Mol containing a murcko scaffold
break_fused_rings (bool): If True dissect fused rings (default: True)
Returns
-------
parents (list): a list of parent scaffolds (next hierarchy [num_rings - 1])
"""
rdlogger.setLevel(4)
if break_fused_rings:
fragmenter = MurckoRingFragmenter()
else:
fragmenter = MurckoRingSystemFragmenter()
parents = [f.mol for f in set(fragmenter.fragment(Scaffold(murcko_scaffold)))]
rdlogger.setLevel(3)
return parents
def get_all_murcko_fragments(mol, break_fused_rings=True):
"""Get all possible murcko fragments from a molecule through
recursive removal of peripheral rings
Parameters
----------
mol: rdkit molecule to be processed
break_fused_rings (bool): If True dissect fused rings (default: True)
Returns
-------
A list of rdkit Mols representing all possible murcko fragments
"""
rdlogger.setLevel(4)
if break_fused_rings:
fragmenter = MurckoRingFragmenter()
else:
fragmenter = MurckoRingSystemFragmenter()
mol = get_murcko_scaffold(mol)
rdmolops.RemoveStereochemistry(mol)
scaffold = Scaffold(mol)
parents = {scaffold}
def recursive_generation(child):
for parent in fragmenter.fragment(child):
if parent in parents:
continue
parents.add(parent)
recursive_generation(parent)
recursive_generation(scaffold)
rdlogger.setLevel(3)
return [f.mol for f in parents]
| 2.40625 | 2 |
sekh/highlighting.py | movermeyer/django-sekh | 4 | 12788650 | <reponame>movermeyer/django-sekh
"""Highlighting for django-sekh"""
from bs4 import BeautifulSoup
from sekh.utils import compile_terms
from sekh.settings import PROTECTED_MARKUPS
from sekh.settings import HIGHLIGHTING_PATTERN
def highlight(content, terms):
"""
Highlight the HTML with BeautifulSoup.
"""
index = 1
update_content = False
soup = BeautifulSoup(content)
terms = compile_terms(terms)
for term in terms:
for text in soup.find_all(text=term):
if text.parent.name in PROTECTED_MARKUPS:
continue
def highlight(match):
match_term = match.group(0)
return HIGHLIGHTING_PATTERN % {
'index': index, 'term': match_term}
new_text = term.sub(highlight, text)
text.replace_with(BeautifulSoup(new_text))
update_content = True
# Reload the entire soup, because substituion
# doesn't rebuild the document tree
soup = BeautifulSoup(str(soup))
index += 1
if update_content:
return str(soup)
return content
| 2.59375 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.