id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3350200
|
<gh_stars>0
import os
import pytest
from pathlib import Path
from setuptools.config import read_configuration
namespace = 'bourbaki'
pardir = Path(__file__).parent.parent
modname = next(f.name for f in (pardir / namespace).iterdir() if f.is_dir() and f.name != '__pycache__')
@pytest.fixture(scope='module')
def pkgname_from_dirs():
return '{}.{}'.format(namespace, modname)
@pytest.fixture(scope='module')
def pkgname_from_config():
conf = read_configuration(os.path.join(pardir, 'setup.cfg'))
return conf['metadata']['name']
def test_import(pkgname_from_dirs):
"""
This test demonstrates the use of a _fixture_. It's not super useful in this case, but in general if you have
tests that depend on some mutable state that you want to reinitialize each time, or some object that is immutable
but expensive to instantiate, or a connection to a database, file, or other I/O, this is a good way to do it.
The argument name 'pkgname_from_dirs' here tells pytest to call the above function with the same name, and pass
that value in for the argument's value. With the scope='module' specification up there we're telling pytest it only
needs to evaluate that fixture once for the duration of this test suite."""
print(pkgname_from_dirs)
__import__(pkgname_from_dirs)
def test_import_from_config(pkgname_from_config):
__import__(pkgname_from_config)
def test_equal_pkgnames(pkgname_from_dirs, pkgname_from_config):
assert pkgname_from_dirs == pkgname_from_config
|
StarcoderdataPython
|
1611809
|
#046_Contagem_regressiva.py
#
from time import sleep
for i in range(0, 11):
sleep(1)
print(i)
print("Acabou")
for i in range(10, -1, -1):
sleep(1)
print(i)
print("Acabou")
|
StarcoderdataPython
|
4823343
|
<reponame>edrmonteiro/DataSciencePython<gh_stars>0
"""
Agrupamento com k-means
"""
import os
path = os.path.abspath(os.getcwd()) + r"/0_dataset/"
from sklearn import datasets
import numpy as np
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
# Carregamento da base de dados
iris = datasets.load_iris()
# visualização de quantos registros existem por classe
unicos, quantidade = np.unique(iris.target, return_counts = True)
unicos
quantidade
# Agrupamento com k-means, utilizando 3 clusters (de acordo com a base de dados)
cluster = KMeans(n_clusters = 3)
cluster.fit(iris.data)
# Visualização dos três centroides
centroides = cluster.cluster_centers_
centroides
# Visualização dos grupos que cada registro foi associado
previsoes = cluster.labels_
previsoes
# Contagem dos registros por classe
unicos2, quantidade2 = np.unique(previsoes, return_counts = True)
unicos2
quantidade2
# Geração da matriz de contingência para comparar os grupos com a base de dados
resultados = confusion_matrix(iris.target, previsoes)
resultados
# Geração do gráfico com os clusters gerados, considerando para um (previsoes 0, 1 ou 2)
# Usamos somente as colunas 0 e 1 da base de dados original para termos 2 dimensões
plt.scatter(iris.data[previsoes == 0, 0], iris.data[previsoes == 0, 1],
c = 'green', label = 'Setosa')
plt.scatter(iris.data[previsoes == 1, 0], iris.data[previsoes == 1, 1],
c = 'red', label = 'Versicolor')
plt.scatter(iris.data[previsoes == 2, 0], iris.data[previsoes == 2, 1],
c = 'blue', label = 'Virgica')
plt.legend()
|
StarcoderdataPython
|
4840009
|
from peewee import *
import datetime
from dataModels.BaseModel import BaseModel
class MachinePool(BaseModel):
name = TextField()
ipAddress = TextField()
internalIpAddress=TextField()
userId = TextField()
password = TextField()
sshFilePath=TextField(default=None)
kubeVersion = TextField()
role = TextField()
clusterId=IntegerField(default=None)
createdOn = DateTimeField(default=datetime.datetime.now)
modifiedOn = DateTimeField(default=datetime.datetime.now)
|
StarcoderdataPython
|
83397
|
<reponame>FilipeMaia/euxfel2013-analysis
'''
CrystFEL geometry file conversion scripts
Author: <NAME>
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
cspad_psana_shape = (4, 8, 185, 388)
cspad_geom_shape = (1480, 1552)
def pixel_maps_from_geometry_file(fnam, return_dict = False):
"""
Return pixel and radius maps from the geometry file
Input: geometry filename
Output: x: slab-like pixel map with x coordinate of each slab pixel in the reference system of the detector
y: slab-like pixel map with y coordinate of each slab pixel in the reference system of the detector
z: slab-like pixel map with distance of each pixel from the center of the reference system.
Note:
ss || y
fs || x
vectors should be given by [x, y]
"""
f = open(fnam, 'r')
f_lines = []
for line in f:
f_lines.append(line)
keyword_list = ['min_fs', 'min_ss', 'max_fs', 'max_ss', 'fs', 'ss', 'corner_x', 'corner_y']
detector_dict = {}
panel_lines = [ x for x in f_lines if '/' in x ]
for pline in panel_lines:
items = pline.split('=')[0].split('/')
if len(items) == 2 :
panel = items[0].strip()
property = items[1].strip()
if property in keyword_list and ('bad' not in panel and 'rigid' not in panel):
if panel not in detector_dict.keys():
detector_dict[panel] = {}
detector_dict[panel][property] = pline.split('=')[1].split(';')[0].strip()
parsed_detector_dict = {}
for p in detector_dict.keys():
parsed_detector_dict[p] = {}
parsed_detector_dict[p]['min_fs'] = int( detector_dict[p]['min_fs'] )
parsed_detector_dict[p]['max_fs'] = int( detector_dict[p]['max_fs'] )
parsed_detector_dict[p]['min_ss'] = int( detector_dict[p]['min_ss'] )
parsed_detector_dict[p]['max_ss'] = int( detector_dict[p]['max_ss'] )
parsed_detector_dict[p]['fs'] = []
parsed_detector_dict[p]['fs'].append( float( detector_dict[p]['fs'].split('x')[0] ) )
parsed_detector_dict[p]['fs'].append( float( detector_dict[p]['fs'].split('x')[1].split('y')[0] ) )
parsed_detector_dict[p]['ss'] = []
parsed_detector_dict[p]['ss'].append( float( detector_dict[p]['ss'].split('x')[0] ) )
parsed_detector_dict[p]['ss'].append( float( detector_dict[p]['ss'].split('x')[1].split('y')[0] ) )
parsed_detector_dict[p]['corner_x'] = float( detector_dict[p]['corner_x'] )
parsed_detector_dict[p]['corner_y'] = float( detector_dict[p]['corner_y'] )
max_slab_fs = np.array([parsed_detector_dict[k]['max_fs'] for k in parsed_detector_dict.keys()]).max()
max_slab_ss = np.array([parsed_detector_dict[k]['max_ss'] for k in parsed_detector_dict.keys()]).max()
x = np.zeros((max_slab_ss+1, max_slab_fs+1), dtype=np.float32)
y = np.zeros((max_slab_ss+1, max_slab_fs+1), dtype=np.float32)
for p in parsed_detector_dict.keys():
# get the pixel coords for this asic
i, j = np.meshgrid( np.arange(parsed_detector_dict[p]['max_ss'] - parsed_detector_dict[p]['min_ss'] + 1),
np.arange(parsed_detector_dict[p]['max_fs'] - parsed_detector_dict[p]['min_fs'] + 1), indexing='ij')
#
# make the y-x ( ss, fs ) vectors, using complex notation
dx = parsed_detector_dict[p]['fs'][1] + 1J * parsed_detector_dict[p]['fs'][0]
dy = parsed_detector_dict[p]['ss'][1] + 1J * parsed_detector_dict[p]['ss'][0]
r_0 = parsed_detector_dict[p]['corner_y'] + 1J * parsed_detector_dict[p]['corner_x']
#
r = i * dy + j * dx + r_0
#
y[parsed_detector_dict[p]['min_ss']: parsed_detector_dict[p]['max_ss'] + 1, parsed_detector_dict[p]['min_fs']: parsed_detector_dict[p]['max_fs'] + 1] = r.real
x[parsed_detector_dict[p]['min_ss']: parsed_detector_dict[p]['max_ss'] + 1, parsed_detector_dict[p]['min_fs']: parsed_detector_dict[p]['max_fs'] + 1] = r.imag
if return_dict :
return x, y, parsed_detector_dict
else :
return x, y
def read_geometry_file(fnam, return_preamble = False):
# get everything from the file
f = open(fnam, 'r')
f_lines = []
for line in f:
if len(line.lstrip()) > 0 and line.lstrip()[0] != ';':
f_lines.append(line)
# define the data we want
shape = (4, 16) # (quadrants, asics)
min_fs = np.zeros(shape, np.int16)
min_ss = np.zeros(shape, np.int16)
max_fs = np.zeros(shape, np.int16)
max_ss = np.zeros(shape, np.int16)
fs = np.zeros((shape[0], shape[1], 2), np.float64) # (quadrant, asic, [y, x])
ss = np.zeros((shape[0], shape[1], 2), np.float64) # (quadrant, asic, [y, x])
corner_x = np.zeros((shape[0], shape[1]), np.float64) # (quadrant, asic)
corner_y = np.zeros((shape[0], shape[1]), np.float64) # (quadrant, asic)
# get the detector distance offset from the encoded values (meters)
preamble = {}
preamble['coffset'] = float([l for l in f_lines if 'coffset' in l][0].rsplit()[2])
preamble['adu_per_eV'] = float([l for l in f_lines if 'adu_per_eV' in l][0].rsplit()[2])
preamble['res'] = float([l for l in f_lines if 'res' in l][0].rsplit()[2])
# get the values from e.g. q3a6/min_fs = 1164
for q in range(4):
for a in range(16):
# define the strings to search for
qa_string = 'q' + str(q) + 'a' + str(a) + '/'
min_fs_string = qa_string + 'min_fs'
min_ss_string = qa_string + 'min_ss'
max_fs_string = qa_string + 'max_fs'
max_ss_string = qa_string + 'max_ss'
fs_string = qa_string + 'fs'
ss_string = qa_string + 'ss'
corner_x_string = qa_string + 'corner_x'
corner_y_string = qa_string + 'corner_y'
# find the line containing the desired string
line_min_fs = [l for l in f_lines if min_fs_string in l][0]
line_min_ss = [l for l in f_lines if min_ss_string in l][0]
line_max_fs = [l for l in f_lines if max_fs_string in l][0]
line_max_ss = [l for l in f_lines if max_ss_string in l][0]
line_fs = [l for l in f_lines if fs_string in l][0]
line_ss = [l for l in f_lines if ss_string in l][0]
line_corner_x = [l for l in f_lines if corner_x_string in l][0]
line_corner_y = [l for l in f_lines if corner_y_string in l][0]
# get the values
min_fs[q, a] = int( float(line_min_fs.rsplit()[2] ))
min_ss[q, a] = int( float(line_min_ss.rsplit()[2] ))
max_fs[q, a] = int( float(line_max_fs.rsplit()[2] ))
max_ss[q, a] = int( float(line_max_ss.rsplit()[2] ))
fs[q, a, 0] = float( line_fs.rsplit()[3][:-1] )
fs[q, a, 1] = float( line_fs.rsplit()[2][:-1] )
ss[q, a, 0] = float( line_ss.rsplit()[3][:-1] )
ss[q, a, 1] = float( line_ss.rsplit()[2][:-1] )
corner_x[q, a] = float( line_corner_x.rsplit()[2] )
corner_y[q, a] = float( line_corner_y.rsplit()[2] )
if return_preamble :
return min_fs, min_ss, max_fs, max_ss, fs, ss, corner_x, corner_y, preamble
else :
return min_fs, min_ss, max_fs, max_ss, fs, ss, corner_x, corner_y
def make_yx_from_1480_1552(geom_fnam):
# now I want
x = np.zeros((1480, 1552), dtype=np.float32)
y = np.zeros((1480, 1552), dtype=np.float32)
# read from the geometry file
min_fs, min_ss, max_fs, max_ss, fs, ss, corner_x, corner_y = read_geometry_file(geom_fnam)
for q in range(4):
for a in range(16):
# get the pixel coords for this asic
i, j = np.meshgrid( np.arange(max_ss[q, a] - min_ss[q, a] + 1), np.arange(max_fs[q, a] - min_fs[q, a] + 1), indexing='ij')
#
# make the y-x ( ss, fs ) vectors, using complex notation
dx = fs[q, a][0] + 1J * fs[q, a][1]
dy = ss[q, a][0] + 1J * ss[q, a][1]
r_0 = corner_y[q, a] + 1J * corner_x[q, a]
#
r = i * dy + j * dx + r_0
#
y[min_ss[q, a]: max_ss[q, a] + 1, min_fs[q, a]: max_fs[q, a]+1] = r.real
x[min_ss[q, a]: max_ss[q, a] + 1, min_fs[q, a]: max_fs[q, a]+1] = r.imag
return y, x
def get_ij_slab_shaped(geom_fnam):
""" Example:
ij, NM = get_ij_slab_shaped('/home/amorgan/Downloads/cspad-cxia2514-taw1.geom')
cspad_geom = np.zeros(NM, dtype=np.int16)
cspad_geom[ij[0], ij[1]] = cspad_np.flatten()
"""
x, y = pixel_maps_from_geometry_file(geom_fnam)
# find the smallest size of cspad_geom that contains all
# xy values but is symmetric about the origin
N = 2 * int(max(abs(y.max()), abs(y.min()))) + 2
M = 2 * int(max(abs(x.max()), abs(x.min()))) + 2
# convert y x values to i j values
i = np.array(y, dtype=np.int) + N//2 - 1
j = np.array(x, dtype=np.int) + M//2 - 1
ij = (i.flatten(), j.flatten())
cspad_geom_shape = (N, M)
return ij, cspad_geom_shape
def make_asic_map_from_1480_1552(geom_fnam):
asics_slab = np.zeros((1480, 1552), dtype=np.int8)
asic_shape = (185, 194)
# read from the geometry file
min_fs, min_ss, max_fs, max_ss, fs, ss, corner_x, corner_y = read_geometry_file(geom_fnam)
for q in range(min_fs.shape[0]):
for a in range(min_fs.shape[1]):
asics_slab[min_ss[q, a] : max_ss[q, a] + 1, min_fs[q, a] : max_fs[q, a] + 1] = q*16 + a
asics_geom = apply_geom(geom_fnam, asics_slab + 1)
# make the background -1
asics_geom -= 1
return asics_geom
def ijkl_to_ss_fs(cspad_ijkl):
"""
0: 388 388: 2 * 388 2*388: 3*388 3*388: 4*388
(0, 0, :, :) (1, 0, :, :) (2, 0, :, :) (3, 0, :, :)
(0, 1, :, :) (1, 1, :, :) (2, 1, :, :) (3, 1, :, :)
(0, 2, :, :) (1, 2, :, :) (2, 2, :, :) (3, 2, :, :)
... ... ... ...
(0, 7, :, :) (1, 7, :, :) (2, 7, :, :) (3, 7, :, :)
"""
if cspad_ijkl.shape != cspad_psana_shape :
raise ValueError('cspad input is not the required shape:' + str(cspad_psana_shape) )
cspad_ij = np.zeros(cspad_geom_shape, dtype=cspad_ijkl.dtype)
for i in range(4):
cspad_ij[:, i * cspad_psana_shape[3]: (i+1) * cspad_psana_shape[3]] = cspad_ijkl[i].reshape((cspad_psana_shape[1] * cspad_psana_shape[2], cspad_psana_shape[3]))
return cspad_ij
def ss_fs_to_ijkl(cspad_ij):
"""
0: 388 388: 2 * 388 2*388: 3*388 3*388: 4*388
(0, 0, :, :) (1, 0, :, :) (2, 0, :, :) (3, 0, :, :)
(0, 1, :, :) (1, 1, :, :) (2, 1, :, :) (3, 1, :, :)
(0, 2, :, :) (1, 2, :, :) (2, 2, :, :) (3, 2, :, :)
... ... ... ...
(0, 7, :, :) (1, 7, :, :) (2, 7, :, :) (3, 7, :, :)
"""
if cspad_ij.shape != cspad_geom_shape :
raise ValueError('cspad input is not the required shape:' + str(cspad_geom_shape) )
cspad_ijkl = np.zeros(cspad_psana_shape, dtype=cspad_ij.dtype)
for i in range(4):
cspad_ijkl[i] = cspad_ij[:, i * cspad_psana_shape[3]: (i+1) * cspad_psana_shape[3]].reshape((cspad_ijkl.shape[1:]))
return cspad_ijkl
def make_yx_from_4_8_185_388(geom_fnam):
# now I want
x = np.zeros(cspad_psana_shape, dtype=np.float32)
y = np.zeros(cspad_psana_shape, dtype=np.float32)
# read from the geometry file
min_fs, min_ss, max_fs, max_ss, fs, ss, corner_x, corner_y = read_geometry_file(geom_fnam)
for q in range(4):
for a_8 in range(8):
for a_2 in range(2):
a = a_8 * 2 + a_2
# get the pixel coords for this asic
i, j = np.meshgrid( np.arange(max_ss[q, a] - min_ss[q, a] + 1), np.arange(max_fs[q, a] - min_fs[q, a] + 1), indexing='ij')
#
# make the y-x ( ss, fs ) vectors, using complex notation
dx = fs[q, a][0] + 1J * fs[q, a][1]
dy = ss[q, a][0] + 1J * ss[q, a][1]
r_0 = corner_y[q, a] + 1J * corner_x[q, a]
#
r = i * dy + j * dx + r_0
#
y[q, a_8, :, a_2 * 194 : (a_2+1)*194] = r.real
x[q, a_8, :, a_2 * 194 : (a_2+1)*194] = r.imag
return y, x
def apply_geom_ij_yx(yx, cspad_np):
"""
Apply the geometry from yx to reshape raw cspad data for display.
Nearest neighbour interpolation is used to place the pixels on
the larger canvas (to save time). That is, the x-y values in
the tuple yx are converted to integer pixel coordinates after
shifting.
e.g. if the cspad has a shape of (i, j, k) then
cspad_np.shape = (i, j, k, l)
yx.shape = (y, x)
y.shape = (i, j, k, l) # y-values of each pixel
x.shape = (i, j, k, l) # x-values of each pixel
Parameters
----------
yx : 2-D tuple, containing float numpy arrays
The x-y values corresponding to each pixel in cspad_np.
Each numpy array in the yx tuple must have the same shape
as cspad_np. yx values are interpreted in units of pixels.
cspad_np : numpy array
A numpy array of any shape containing the pixel
values of the cspad detector.
Returns
-------
cspad_geom : cspad_np.dtype, 2-D numpy array
The geometry corrected cspad values. The origin
(y=0, x=0) is placed in the centre of the array
(N / 2 - 1, M / 2 - 1).
"""
y = yx[0]
x = yx[1]
# find the smallest size of cspad_geom that contains all
# xy values but is symmetric about the origin
N = 2 * int(max(abs(y.max()), abs(y.min()))) + 2
M = 2 * int(max(abs(x.max()), abs(x.min()))) + 2
cspad_geom = np.zeros((N, M), dtype=cspad_np.dtype)
# convert y x values to i j values
i = np.array(y, dtype=np.int) + N//2 - 1
j = np.array(x, dtype=np.int) + M//2 - 1
# apply geometry
cspad_geom[i.flatten(), j.flatten()] = cspad_np.flatten()
return cspad_geom
def apply_geom(geom_fnam, cspad_np):
"""
Apply the cspad geometry provided in the file geom_fnam to reshape cspad_np data for display.
Nearest neighbour interpolation is used to place the pixels on
the larger canvas (to save time). Only works on "psana" shaped
arrays, e.g. (4, 8, 185, 388) or "CrystFel" shape arrays
e.g. (1480, 1552).
Parameters
----------
geom_fnam : string
The file name of a geometry file used by CrystFel.
cspad_np : numpy array, shape = (4, 8, 185, 388) or (1480, 1552)
A numpy array containing the pixel values of the
cspad detector.
Returns
-------
cspad_geom : cspad_np.dtype, 2-D numpy array
The geometry corrected cspad values. The origin
(y=0, x=0) is placed in the centre of the array
(N / 2 - 1, M / 2 - 1).
"""
# check if cspad is "psana" shaped or "CrystFel" shaped
if cspad_np.shape == (4, 8, 185, 388) :
y, x = make_yx_from_4_8_185_388(geom_fnam)
else :
x, y = pixel_maps_from_geometry_file(geom_fnam)
cspad_geom = apply_geom_ij_yx((y, x), cspad_np)
return cspad_geom
def get_ij_psana_shaped(geom_fnam):
""" Example:
ij, NM = get_ij_psana_shaped('/home/amorgan/Downloads/cspad-cxia2514-taw1.geom')
cspad_geom = np.zeros(NM, dtype=np.int16)
cspad_geom[ij[0], ij[1]] = cspad_np.flatten()
"""
y, x = make_yx_from_4_8_185_388(geom_fnam)
# find the smallest size of cspad_geom that contains all
# xy values but is symmetric about the origin
N = 2 * int(max(abs(y.max()), abs(y.min()))) + 2
M = 2 * int(max(abs(x.max()), abs(x.min()))) + 2
# convert y x values to i j values
i = np.array(y, dtype=np.int) + N//2 - 1
j = np.array(x, dtype=np.int) + M//2 - 1
ij = (i.flatten(), j.flatten())
cspad_geom_shape = (N, M)
return ij, cspad_geom_shape
def get_corners_ss_fs(q, a, cspad_geom_shape, geom_fnam):
min_fs, min_ss, max_fs, max_ss, fs, ss, corner_x, corner_y = read_geometry_file(geom_fnam)
x_asic = cspad_psana_shape[-1] // 2
y_asic = cspad_psana_shape[-2]
# make the y-x ( ss, fs ) vectors, using complex notation
dx = fs[q, a][0] + 1J * fs[q, a][1]
dy = ss[q, a][0] + 1J * ss[q, a][1]
r_00 = corner_y[q, a] + 1J * corner_x[q, a]
r_01 = r_00 + x_asic * dx
r_11 = r_00 + y_asic * dy + x_asic * dx
r_10 = r_00 + y_asic * dy
x = np.array([r_00.imag, r_01.imag, r_11.imag, r_10.imag, r_00.imag])
y = np.array([r_00.real, r_01.real, r_11.real, r_10.real, r_00.real])
# convert y x values to i j values
i = y + cspad_geom_shape[0]//2 - 1
j = x + cspad_geom_shape[1]//2 - 1
return i, j
def polarization_map(geom_fnam, z, polarization_axis = 'x'):
min_fs, min_ss, max_fs, max_ss, fs, ss, corner_x, corner_y, preamble = read_geometry_file(geom_fnam, return_preamble = True)
du = 1 / preamble['res']
y, x = make_yx_from_1480_1552(geom_fnam)
y *= du
x *= du
if polarization_axis == 'x':
polarization_map = 1 - x**2 / (z**2 + x**2 + y**2)
return polarization_map
|
StarcoderdataPython
|
3397898
|
<reponame>jkpubsrc/python-module-jk-simplexml
from .HAbstractElement import HAbstractElement
class HText(HAbstractElement):
def __init__(self, text:str):
self.text = text
self.tag = None
#
def isDeepEqualTo(self, obj) -> bool:
if isinstance(obj, HText):
return obj.text == self.text
else:
return False
#
def isShallowEqualTo(self, obj) -> bool:
if isinstance(obj, HText):
return obj.text == self.text
else:
return False
#
def deepClone(self):
return HText(self.text)
#
def toPlainText(self, HWriter) -> str:
raise NotImplementedError()
#
#
|
StarcoderdataPython
|
1267
|
<reponame>hadrianmontes/jax-md<gh_stars>100-1000
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_md.space."""
from absl.testing import absltest
from absl.testing import parameterized
from jax.config import config as jax_config
from jax import random
import jax.numpy as jnp
from jax import grad, jit, jacfwd
from jax import test_util as jtu
from jax_md import space, test_util, quantity, energy
from jax_md.util import *
from functools import partial
from unittest import SkipTest
test_util.update_test_tolerance(5e-5, 5e-13)
jax_config.parse_flags_with_absl()
jax_config.enable_omnistaging()
FLAGS = jax_config.FLAGS
PARTICLE_COUNT = 10
STOCHASTIC_SAMPLES = 10
SHIFT_STEPS = 10
SPATIAL_DIMENSION = [2, 3]
BOX_FORMATS = ['scalar', 'vector', 'matrix']
if FLAGS.jax_enable_x64:
POSITION_DTYPE = [f32, f64]
else:
POSITION_DTYPE = [f32]
def make_periodic_general_test_system(N, dim, dtype, box_format):
assert box_format in BOX_FORMATS
box_size = quantity.box_size_at_number_density(N, 1.0, dim)
box = dtype(box_size)
if box_format == 'vector':
box = jnp.array(jnp.ones(dim) * box_size, dtype)
elif box_format == 'matrix':
box = jnp.array(jnp.eye(dim) * box_size, dtype)
d, s = space.periodic(jnp.diag(box) if box_format == 'matrix' else box)
d_gf, s_gf = space.periodic_general(box)
d_g, s_g = space.periodic_general(box, fractional_coordinates=False)
key = random.PRNGKey(0)
R_f = random.uniform(key, (N, dim), dtype=dtype)
R = space.transform(box, R_f)
E = jit(energy.soft_sphere_pair(d))
E_gf = jit(energy.soft_sphere_pair(d_gf))
E_g = jit(energy.soft_sphere_pair(d_g))
return R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g)
# pylint: disable=invalid-name
class SpaceTest(jtu.JaxTestCase):
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_transform(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
T = random.normal(
split2, (spatial_dimension, spatial_dimension), dtype=dtype)
R_prime_exact = jnp.array(jnp.einsum('ij,kj->ki', T, R), dtype=dtype)
R_prime = space.transform(T, R)
self.assertAllClose(R_prime_exact, R_prime)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}'.format(dim),
'spatial_dimension': dim
} for dim in SPATIAL_DIMENSION))
def test_transform_grad(self, spatial_dimension):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(split1, (PARTICLE_COUNT, spatial_dimension))
T = random.normal(split2, (spatial_dimension, spatial_dimension))
R_prime = space.transform(T, R)
energy_direct = lambda R: jnp.sum(R ** 2)
energy_indirect = lambda T, R: jnp.sum(space.transform(T, R) ** 2)
grad_direct = grad(energy_direct)(R_prime)
grad_indirect = grad(energy_indirect, 1)(T, R)
self.assertAllClose(grad_direct, grad_indirect)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_transform_inverse(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
T = random.normal(
split2, (spatial_dimension, spatial_dimension), dtype=dtype)
T_inv = space.inverse(T)
R_test = space.transform(T_inv, space.transform(T, R))
self.assertAllClose(R, R_test)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_canonicalize_displacement_or_metric(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
displacement, _ = space.periodic_general(jnp.eye(spatial_dimension))
metric = space.metric(displacement)
test_metric = space.canonicalize_displacement_or_metric(displacement)
metric = space.map_product(metric)
test_metric = space.map_product(test_metric)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.normal(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
self.assertAllClose(metric(R, R), test_metric(R, R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_displacement(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split = random.split(key)
R = random.uniform(
split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = space.map_product(space.pairwise_displacement)(R, R)
dR_wrapped = space.periodic_displacement(f32(1.0), dR)
dR_direct = dR
dr_direct = space.distance(dR)
dr_direct = jnp.reshape(dr_direct, dr_direct.shape + (1,))
if spatial_dimension == 2:
for i in range(-1, 2):
for j in range(-1, 2):
dR_shifted = dR + jnp.array([i, j], dtype=R.dtype)
dr_shifted = space.distance(dR_shifted)
dr_shifted = jnp.reshape(dr_shifted, dr_shifted.shape + (1,))
dR_direct = jnp.where(dr_shifted < dr_direct, dR_shifted, dR_direct)
dr_direct = jnp.where(dr_shifted < dr_direct, dr_shifted, dr_direct)
elif spatial_dimension == 3:
for i in range(-1, 2):
for j in range(-1, 2):
for k in range(-1, 2):
dR_shifted = dR + jnp.array([i, j, k], dtype=R.dtype)
dr_shifted = space.distance(dR_shifted)
dr_shifted = jnp.reshape(dr_shifted, dr_shifted.shape + (1,))
dR_direct = jnp.where(
dr_shifted < dr_direct, dR_shifted, dR_direct)
dr_direct = jnp.where(
dr_shifted < dr_direct, dr_shifted, dr_direct)
dR_direct = jnp.array(dR_direct, dtype=dR.dtype)
assert dR_wrapped.dtype == dtype
self.assertAllClose(dR_wrapped, dR_direct)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_shift(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2 = random.split(key, 3)
R = random.uniform(
split1, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = jnp.sqrt(f32(0.1)) * random.normal(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = jnp.where(dR > 0.49, f32(0.49), dR)
dR = jnp.where(dR < -0.49, f32(-0.49), dR)
R_shift = space.periodic_shift(f32(1.0), R, dR)
assert R_shift.dtype == R.dtype
assert jnp.all(R_shift < 1.0)
assert jnp.all(R_shift > 0.0)
dR_after = space.periodic_displacement(f32(1.0), R_shift - R)
assert dR_after.dtype == R.dtype
self.assertAllClose(dR_after, dR)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_against_periodic_general(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2, split3 = random.split(key, 4)
max_box_size = f32(10.0)
box_size = max_box_size * random.uniform(
split1, (spatial_dimension,), dtype=dtype)
transform = jnp.diag(box_size)
R = random.uniform(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R_scaled = R * box_size
dR = random.normal(
split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
disp_fn, shift_fn = space.periodic(box_size)
general_disp_fn, general_shift_fn = space.periodic_general(transform)
disp_fn = space.map_product(disp_fn)
general_disp_fn = space.map_product(general_disp_fn)
self.assertAllClose(disp_fn(R_scaled, R_scaled), general_disp_fn(R, R))
assert disp_fn(R_scaled, R_scaled).dtype == dtype
self.assertAllClose(
shift_fn(R_scaled, dR), general_shift_fn(R, dR) * box_size)
assert shift_fn(R_scaled, dR).dtype == dtype
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_against_periodic_general_grad(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
tol = 1e-13
if dtype is f32:
tol = 1e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split1, split2, split3 = random.split(key, 4)
max_box_size = f32(10.0)
box_size = max_box_size * random.uniform(
split1, (spatial_dimension,), dtype=dtype)
transform = jnp.diag(box_size)
R = random.uniform(
split2, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R_scaled = R * box_size
dR = random.normal(
split3, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
disp_fn, shift_fn = space.periodic(box_size)
general_disp_fn, general_shift_fn = space.periodic_general(transform)
disp_fn = space.map_product(disp_fn)
general_disp_fn = space.map_product(general_disp_fn)
grad_fn = grad(lambda R: jnp.sum(disp_fn(R, R) ** 2))
general_grad_fn = grad(lambda R: jnp.sum(general_disp_fn(R, R) ** 2))
self.assertAllClose(grad_fn(R_scaled), general_grad_fn(R))
assert general_grad_fn(R).dtype == dtype
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype,
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_general_dynamic(self, spatial_dimension, dtype):
key = random.PRNGKey(0)
eye = jnp.eye(spatial_dimension)
for _ in range(STOCHASTIC_SAMPLES):
key, split_T0_scale, split_T0_dT = random.split(key, 3)
key, split_T1_scale, split_T1_dT = random.split(key, 3)
key, split_t, split_R, split_dR = random.split(key, 4)
size_0 = 10.0 * random.uniform(split_T0_scale, ())
dtransform_0 = 0.5 * random.normal(
split_T0_dT, (spatial_dimension, spatial_dimension))
T_0 = jnp.array(size_0 * (eye + dtransform_0), dtype=dtype)
size_1 = 10.0 * random.uniform(split_T1_scale, (), dtype=dtype)
dtransform_1 = 0.5 * random.normal(
split_T1_dT, (spatial_dimension, spatial_dimension), dtype=dtype)
T_1 = jnp.array(size_1 * (eye + dtransform_1), dtype=dtype)
disp_fn, shift_fn = space.periodic_general(T_0)
true_disp_fn, true_shift_fn = space.periodic_general(T_1)
disp_fn = partial(disp_fn, box=T_1)
disp_fn = space.map_product(disp_fn)
true_disp_fn = space.map_product(true_disp_fn)
R = random.uniform(
split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
dR = random.normal(
split_dR, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
self.assertAllClose(
disp_fn(R, R), jnp.array(true_disp_fn(R, R), dtype=dtype))
self.assertAllClose(
shift_fn(R, dR, box=T_1), jnp.array(true_shift_fn(R, dR), dtype=dtype))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': '_dim={}_dtype={}'.format(dim, dtype.__name__),
'spatial_dimension': dim,
'dtype': dtype,
} for dim in SPATIAL_DIMENSION for dtype in POSITION_DTYPE))
def test_periodic_general_wrapped_vs_unwrapped(
self, spatial_dimension, dtype):
key = random.PRNGKey(0)
eye = jnp.eye(spatial_dimension, dtype=dtype)
tol = 1e-13
if dtype is f32:
tol = 2e-5
for _ in range(STOCHASTIC_SAMPLES):
key, split_R, split_T = random.split(key, 3)
dT = random.normal(
split_T, (spatial_dimension, spatial_dimension), dtype=dtype)
T = eye + dT + jnp.transpose(dT)
R = random.uniform(
split_R, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R0 = R
unwrapped_R = R
displacement, shift = space.periodic_general(T)
_, unwrapped_shift = space.periodic_general(T, wrapped=False)
displacement = space.map_product(displacement)
for _ in range(SHIFT_STEPS):
key, split = random.split(key)
dR = random.normal(
split, (PARTICLE_COUNT, spatial_dimension), dtype=dtype)
R = shift(R, dR)
unwrapped_R = unwrapped_shift(unwrapped_R, dR)
self.assertAllClose(
displacement(R, R0),
displacement(unwrapped_R, R0))
assert not (jnp.all(unwrapped_R > 0) and jnp.all(unwrapped_R < 1))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_energy(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
self.assertAllClose(E(R), E_gf(R_f))
self.assertAllClose(E(R), E_g(R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_force(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
self.assertAllClose(grad(E)(R), grad(E_gf)(R_f))
self.assertAllClose(grad(E)(R), grad(E_g)(R))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_shift(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
R_new = s(R, grad(E)(R))
R_gf_new = s_gf(R_f, grad(E_gf)(R_f))
R_g_new = s_g(R, grad(E_g)(R))
self.assertAllClose(R_new, space.transform(box, R_gf_new))
self.assertAllClose(R_new, R_g_new)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform(self, spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
self.assertAllClose(E_gf(R_f, box=deformed_box),
E_g(R, new_box=deformed_box))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform_grad(self,
spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
self.assertAllClose(grad(E_gf)(R_f, box=deformed_box),
grad(E_g)(R, new_box=deformed_box))
self.assertAllClose(jacfwd(E_gf)(R_f, box=deformed_box),
jacfwd(E_g)(R, new_box=deformed_box))
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_deform_shift(self,
spatial_dimension, dtype, box_format):
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
deformed_box = box * 0.9
R_new = s_g(R, grad(E_g)(R), new_box=deformed_box)
R_gf_new = space.transform(deformed_box, s_gf(R_f, grad(E_gf)(R_f)))
self.assertAllClose(R_new, R_gf_new)
@parameterized.named_parameters(jtu.cases_from_list(
{
'testcase_name': f'_dim={dim}_dtype={dtype.__name__}_box_format={box_format}',
'spatial_dimension': dim,
'dtype': dtype,
'box_format': box_format
} for dim in SPATIAL_DIMENSION
for dtype in POSITION_DTYPE
for box_format in BOX_FORMATS))
def test_periodic_general_grad_box(self, spatial_dimension, dtype, box_format):
if box_format == 'scalar':
raise SkipTest('Scalar case fails due to JAX Issue #5849.')
N = 16
R_f, R, box, (s, E), (s_gf, E_gf), (s_g, E_g) = \
make_periodic_general_test_system(N, spatial_dimension, dtype, box_format)
@grad
def box_energy_g_fn(box):
return E_g(R, new_box=box)
@grad
def box_energy_gf_fn(box):
return E_gf(R_f, box=box)
self.assertAllClose(box_energy_g_fn(box), box_energy_gf_fn(box))
if __name__ == '__main__':
absltest.main()
|
StarcoderdataPython
|
84436
|
<filename>ball_catching/config.py
#!/usr/bin/python
###############################################
# Configuration
import os
here = os.path.abspath(os.path.dirname(__file__))
params_yml = 'params.yml'
data_root = os.path.expanduser("~/ball_catching_data")
settings_root = os.path.join(here, "_files")
|
StarcoderdataPython
|
3390019
|
'''
Do some forced-photometry simulations to look at how errors in
astrometry affect the results. Can we do anything with forced
photometry to measure astrometric offsets? (photometer PSF + its
derivatives?)
'''
from __future__ import print_function
import sys
import os
import numpy as np
import pylab as plt
import fitsio
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.plotutils import PlotSequence, plothist, loghist
from astrometry.util.ttime import Time
from astrometry.util.util import Tan
from legacypipe.survey import LegacySurveyData, imsave_jpeg, get_rgb
from scipy.ndimage.filters import gaussian_filter
from tractor import *
from tractor.pointsource import BasicSource
pixscale = 0.262 / 3600.
class TrackingTractor(Tractor):
def __init__(self, *args, **kwargs):
super(TrackingTractor, self).__init__(*args, **kwargs)
self.reset_tracking()
def reset_tracking(self):
self.tracked_params = []
self.tracked_lnprob = []
def setParams(self, p):
self.tracked_params.append(np.array(p).copy())
super(TrackingTractor, self).setParams(p)
self.tracked_lnprob.append(self.getLogProb())
class SourceDerivatives(MultiParams, BasicSource):
def __init__(self, real, freeze, thaw, brights):
'''
*real*: The real source whose derivatives are my profiles.
*freeze*: List of parameter names to freeze before taking derivs
*thaw*: List of parameter names to thaw before taking derivs
'''
# This a subclass of MultiParams and we pass the brightnesses
# as our params.
super(SourceDerivatives,self).__init__(*brights)
self.real = real
self.freeze = freeze
self.thaw = thaw
self.brights = brights
self.umods = None
# forced photom calls getUnitFluxModelPatches
def getUnitFluxModelPatches(self, img, minval=0., modelMask=None):
self.real.freezeParamsRecursive(*self.freeze)
self.real.thawParamsRecursive(*self.thaw)
#print('SourceDerivatives: source has params:')
#self.real.printThawedParams()
# The derivatives will be scaled by the source brightness;
# undo that scaling.
#print('Brightness:', self.real.brightness)
counts = img.getPhotoCal().brightnessToCounts(self.real.brightness)
derivs = self.real.getParamDerivatives(img, modelMask=modelMask)
#print('SourceDerivs: derivs', derivs)
for d in derivs:
if d is not None:
d /= counts
print('Deriv: abs max', np.abs(d.patch).max(), 'range', d.patch.min(), d.patch.max(), 'sum', d.patch.sum())
# and revert...
self.real.freezeParamsRecursive(*self.thaw)
self.real.thawParamsRecursive(*self.freeze)
self.umods = derivs
return derivs
def getModelPatch(self, img, minsb=0., modelMask=None):
if self.umods is None:
return None
#print('getModelPatch()')
#print('modelMask', modelMask)
pc = img.getPhotoCal()
#counts = [pc.brightnessToCounts(b) for b in self.brights]
#print('umods', self.umods)
return (self.umods[0] * pc.brightnessToCounts(self.brights[0]) +
self.umods[1] * pc.brightnessToCounts(self.brights[1]))
def sim(nims, nsrcs, H,W, ps, dpix, nsamples, forced=True, ceres=False,
alphas=None, derivs=False):
truewcs = Tan(0., 0., W/2., H/2., -pixscale, 0., 0., pixscale,
float(W), float(H))
#ngrid = int(np.ceil(np.sqrt(nsrcs)))
#xx,yy = np.meshgrid(
assert(nsrcs == 1)
sig1 = 0.25
flux = 100.
# sig1 = 0.0025
# flux = 1.
#psf_sigma = 1.5
psf_sigma = 2.0
psfnorm = 1./(2. * np.sqrt(np.pi) * psf_sigma)
nsigma = flux * psfnorm / sig1
print('S/N:', nsigma)
realsrcs = []
derivsrcs = []
for i in range(nsrcs):
src = PointSource(RaDecPos(0., 0.), Flux(flux))
realsrcs.append(src)
if forced:
src.freezeAllBut('brightness')
if derivs:
realsrc = src
dsrc = SourceDerivatives(realsrc, ['brightness'], ['pos'],
[Flux(0.),Flux(0.)])
derivsrcs.append(dsrc)
tims = []
for i in range(nims):
v = psf_sigma**2
xx,yy = np.meshgrid(np.arange(-12,13), np.arange(-12,13))
pp = np.exp(-0.5 * (xx**2 + yy**2) / psf_sigma**2)
pp /= np.sum(pp)
psf = PixelizedPSF(pp)
#psf=GaussianMixturePSF(1., 0., 0., v, v, 0.)))
tims.append(Image(data=np.zeros((H,W), np.float32),
inverr=np.ones((H,W), np.float32) * 1./sig1,
wcs=ConstantFitsWcs(truewcs),
photocal=LinearPhotoCal(1.),
psf=psf))
opt = None
if ceres:
from tractor.ceres_optimizer import *
opt = CeresOptimizer()
# Render "true" models, add noise
tr = TrackingTractor(tims, realsrcs, optimizer=opt)
mods = []
for i,tim in enumerate(tims):
mod = tr.getModelImage(i)
mod += np.random.normal(size=mod.shape) * sig1
tim.data = mod
mods.append(mod)
if ps is not None:
plt.clf()
plt.imshow(mods[0], interpolation='nearest', origin='lower')
ps.savefig()
tr.freezeParam('images')
if derivs:
tr.catalog = Catalog(*(realsrcs + derivsrcs))
print('Params:')
tr.printThawedParams()
p0 = tr.getParams()
results = []
for isamp in range(nsamples):
#print('Sample', isamp)
if isamp % 100 == 0:
print('Sample', isamp)
tr.reset_tracking()
# Scatter the tim WCS CRPIX values
dx = np.zeros(len(tims))
dy = np.zeros(len(tims))
for i,tim in enumerate(tims):
# dx[i] = dpix * np.random.uniform(low=-1., high=1.)
# dy[i] = dpix * np.random.uniform(low=-1., high=1.)
dx[i] = dpix * np.random.normal()
dy[i] = dpix * np.random.normal()
wcs = Tan(0., 0.,
W/2. + dx[i], H/2. + dy[i],
-pixscale, 0., 0., pixscale, float(W), float(H))
tim.wcs = ConstantFitsWcs(wcs)
if ps is not None and isamp == 0:
plt.clf()
cols = int(np.ceil(np.sqrt(len(tims))))
rows = int(np.ceil(len(tims) / float(cols)))
for i,tim in enumerate(tims):
# from astrometry.util.resample import resample_with_wcs
# Yo,Xo,Yi,Xi,rims = resample_with_wcs(truewcs, tim.wcs.wcs,
# [tim.data])
# rimg = np.zeros(truewcs.shape)
# rimg[Yo,Xo] = rims[0]
# plt.subplot(rows, cols, i+1)
# plt.imshow(rimg, interpolation='nearest', origin='lower')
plt.subplot(rows, cols, i+1)
plt.imshow(tim.data, interpolation='nearest', origin='lower',
cmap='gray')
x,y = tim.wcs.positionToPixel(realsrcs[0].pos)
plt.axhline(y, color='r', alpha=0.5, lw=2)
plt.axvline(x, color='r', alpha=0.5, lw=2)
x,y = W/2, H/2
plt.axhline(y, color='b', alpha=0.5, lw=2)
plt.axvline(x, color='b', alpha=0.5, lw=2)
plt.suptitle('Astrometric scatter: +- %g pix' % dpix)
ps.savefig()
tr.setParams(p0)
track = []
if forced:
tr.optimize_forced_photometry()
else:
optargs = dict(priors=False, shared_params=False)
if alphas is not None:
optargs.update(alphas=alphas)
#tr.optimize_loop()
track.append(((None,None,None),tr.getParams(),tr.getLogProb()))
if not ceres:
for step in range(50):
dlnp,X,alpha = tr.optimizer.optimize(tr, **optargs)
track.append(((dlnp,X,alpha),tr.getParams(),tr.getLogProb()))
#print('dlnp,X,alpha', dlnp,X,alpha)
if dlnp == 0:
break
else:
tr.optimize_loop()
if forced:
results.append((dx, dy, tr.getParams()))
else:
results.append((dx, dy, tr.getParams(), track, tr.tracked_params,
tr.tracked_lnprob,
tr.getLogProb()))
if ps is not None and isamp == 0:
if derivs:
plt.clf()
tim = tims[0]
mod1 = tr.getModelImage(tim, srcs=realsrcs)
print('mod1 max value', mod1.max()/np.sum(mod1))
# save derivative params
pd = [d.getParams() for d in derivsrcs]
# zero out the dDec coefficient
for d,dp0 in zip(derivsrcs,pd):
p = dp0[:]
p[1] = 0.
d.setParams(p)
modr = tr.getModelImage(tim, srcs=derivsrcs)
# zero out the dRA coefficient, restore the dDec coeff
for d,dp0 in zip(derivsrcs,pd):
p = dp0[:]
p[0] = 0.
d.setParams(p)
modd = tr.getModelImage(tim, srcs=derivsrcs)
# restore the dRA coeff
for d,dp0 in zip(derivsrcs,pd):
d.setParams(dp0)
mod = tr.getModelImage(tim)
mx = mod.max()
ima = dict(interpolation='nearest', origin='lower',
vmin=-mx, vmax=mx, cmap='gray')
plt.subplot(2,3,1)
plt.imshow(tim.getImage(), **ima)
plt.title('data')
plt.subplot(2,3,2)
plt.imshow(mod1, **ima)
plt.title('source')
dscale = 5
plt.subplot(2,3,3)
plt.imshow(dscale * (tim.getImage() - mod1), **ima)
plt.title('(data - source) x %g' % dscale)
plt.subplot(2,3,4)
plt.imshow(modr*dscale, **ima)
plt.title('dRA x %g' % dscale)
plt.subplot(2,3,5)
plt.imshow(modd*dscale, **ima)
plt.title('dDec x %g' % dscale)
plt.subplot(2,3,6)
plt.imshow(mod, **ima)
plt.title('total')
x1,y1 = tim.wcs.positionToPixel(realsrcs[0].pos)
x2,y2 = W/2, H/2
for i in [1,2,4,5,6]:
plt.subplot(2,3,i)
plt.axhline(y1, color='r', alpha=0.5, lw=2)
plt.axvline(x1, color='r', alpha=0.5, lw=2)
plt.axhline(y2, color='b', alpha=0.5, lw=2)
plt.axvline(x2, color='b', alpha=0.5, lw=2)
ps.savefig()
return results
def compare_optimizers():
allfluxes = []
allra = []
alldec = []
alldx = []
alldy = []
alltracks = []
alllnprobtracks = []
names = []
bestlogprobs = None
#for i in range(3):
for i in range(3):
np.random.seed(seed)
name = ''
nsamples = 200
if i in [0,1]:
print()
print('LSQR Opt')
print()
alphas = None
if i == 1:
alphas = [0.1, 0.3, 1.0]
name = 'LSQR, alphas'
else:
name = 'LSQR'
results = sim(nims, nsrcs, H,W, None, 1.0, nsamples, forced=False,
alphas=alphas)
else:
print()
print('Ceres Opt')
print()
name = 'Ceres'
results = sim(nims, nsrcs, H,W, None, 1.0, nsamples, forced=False, ceres=True)
#results = sim(nims, nsrcs, H,W, None, 1.0, 10, forced=False)
names.append(name)
dx = np.array([r[0] for r in results])
dy = np.array([r[1] for r in results])
pp = np.array([r[2] for r in results])
#print('Params:', pp.shape)
tracks = [r[3] for r in results]
tracks2 = [r[4] for r in results]
flux = pp[:,2]
logprobs = np.array([r[6] for r in results])
if bestlogprobs is None:
bestlogprobs = logprobs
else:
bestlogprobs = np.maximum(bestlogprobs, logprobs)
alltracks.append(tracks)
allfluxes.append(flux)
allra.append(pp[:,0])
alldec.append(pp[:,1])
alldx.append(dx)
alldy.append(dy)
alllnprobtracks.append([r[5] for r in results])
ras = pp[:,0] - dx * pixscale
decs = pp[:,1] + dy * pixscale
meanra = np.mean(ras)
meandec = np.mean(decs)
plt.clf()
plt.scatter(dx, dy, c=flux)
plt.colorbar()
plt.xlabel('WCS Scatter x (pix)')
plt.ylabel('WCS Scatter y (pix)')
plt.axis('equal')
ax = plt.axis()
mx = max(np.abs(ax))
plt.axis([-mx,mx,-mx,mx])
plt.axhline(0., color='k', alpha=0.2)
plt.axvline(0., color='k', alpha=0.2)
plt.axis([-2,2,-2,2])
plt.title(name)
ps.savefig()
# plt.clf()
# for dxi,dyi,track in zip(dx, dy, tracks):
# tp = np.array([t[1] for t in track])
# rapix = tp[:,0] / pixscale - dxi
# decpix = tp[:,1] / pixscale + dyi
# flux = tp[:,2]
# plt.scatter(rapix, decpix, c=flux, zorder=20)
# plt.plot(rapix, decpix, 'k-', alpha=0.1, lw=2, zorder=10)
# plt.colorbar()
# plt.xlabel('RA (pix)')
# plt.ylabel('Dec (pix)')
# #plt.axis('equal')
# #plt.axis('scaled')
# ax = plt.axis()
# mx = max(np.abs(ax))
# plt.axis([-mx,mx,-mx,mx])
# plt.axhline(0., color='k', alpha=0.2)
# plt.axvline(0., color='k', alpha=0.2)
# plt.axis([-2,2,-2,2])
# plt.title(name)
# ps.savefig()
plt.clf()
for dxi,dyi,track,track2 in zip(dx, dy, tracks, tracks2):
#tp = np.array([t[1] for t in track])
#print('track2', track2)
tp = np.vstack(track2)
rapix = (tp[:,0] - dxi*pixscale - meanra ) / pixscale
decpix = (tp[:,1] + dyi*pixscale - meandec) / pixscale
#rapix = tp[:,0] / pixscale - dxi
#decpix = tp[:,1] / pixscale + dyi
#flux = tp[:,2]
#plt.scatter(rapix, decpix, c=flux, zorder=20)
plt.scatter(rapix, decpix,
c=np.arange(len(rapix))/float(len(rapix)),zorder=20)
plt.plot(rapix, decpix, 'k-', alpha=0.1, lw=2, zorder=10)
plt.colorbar()
plt.xlabel('RA (pix)')
plt.ylabel('Dec (pix)')
#plt.axis('equal')
#plt.axis('scaled')
ax = plt.axis()
mx = max(np.abs(ax))
plt.axis([-mx,mx,-mx,mx])
plt.axhline(0., color='k', alpha=0.2)
plt.axvline(0., color='k', alpha=0.2)
plt.axis([-2,2,-2,2])
plt.title(name)
ps.savefig()
# plt.xscale('symlog', linthreshx=1e-4)
# plt.yscale('symlog', linthreshy=1e-4)
# ps.savefig()
# plt.axis([-0.2, 0.2, -0.2, 0.2])
# ps.savefig()
# plt.axis([-0.02, 0.02, -0.02, 0.02])
# ps.savefig()
# plt.clf()
# plt.subplot(2,1,1)
# for dxi,track in zip(dx,tracks):
# tp = np.array([t[1] for t in track])
# rapix = tp[:,0] / pixscale
# decpix = tp[:,1] / pixscale
# flux = tp[:,2]
# plt.plot(rapix - dxi, 'o-')
# #plt.axhline(dxi)
# plt.ylabel('RA - dx (pix)')
# plt.subplot(2,1,2)
# for dyi,track in zip(dy,tracks):
# tp = np.array([t[1] for t in track])
# rapix = tp[:,0] / pixscale
# decpix = tp[:,1] / pixscale
# flux = tp[:,2]
# plt.plot(decpix + dyi, 'o-')
# #plt.axhline(dxi)
# plt.ylabel('Dec + dy (pix)')
# plt.xlabel('Opt Step')
# ps.savefig()
# plt.clf()
# for dxi,dyi,track in zip(dx, dy, tracks):
# tp = np.array([t[1] for t in track])
# rapix = tp[:,0] / pixscale - dxi
# decpix = tp[:,1] / pixscale + dyi
# #flux = tp[:,2]
# plt.plot(np.hypot(rapix, decpix), 'o-')
# plt.xlabel('Opt Step')
# plt.ylabel('Radius (pix)')
# ps.savefig()
plt.clf()
for dxi,dyi,track,track2 in zip(dx, dy, tracks, tracks2):
#tp = np.array([t[1] for t in track])
#print('track2', track2)
tp = np.vstack(track2)
#for dxi,dyi,track in zip(dx, dy, tracks):
#tp = np.array([t[1] for t in track])
rapix = (tp[:,0] - dxi*pixscale - meanra ) / pixscale
decpix = (tp[:,1] + dyi*pixscale - meandec) / pixscale
# print('Track ra', tp[:,0])
# print('Mean RA', meanra)
# print('Track dec', tp[:,1])
# print('Mean Dec', meandec)
plt.plot(np.hypot(rapix, decpix), '.-')
plt.xlabel('Opt Step')
plt.ylabel('Radius from mean (pix)')
plt.yscale('symlog', linthreshy=1e-3)
ps.savefig()
# plt.clf()
# for dxi,dyi,track in zip(dx, dy, tracks):
# tp = np.array([t[1] for t in track])
# rapix = tp[:,0] / pixscale - dxi
# decpix = tp[:,1] / pixscale + dyi
# flux = tp[:,2]
# plt.plot(np.hypot(rapix, decpix), flux, 'o-')
# plt.xlabel('Radius (pix)')
# plt.ylabel('Flux')
# ps.savefig()
#for name,tracks in zip(names, alltracks):
for name,tracks in zip(names, alllnprobtracks):
plt.clf()
for track,bestlnp in zip(tracks, bestlogprobs):
lnprob = [-(t - bestlnp) for t in track]
plt.plot(lnprob, '.-')
plt.xlabel('Opt Step')
plt.ylabel('Log-Prob gap vs best')
plt.yscale('symlog', linthreshy=1e-4)
plt.title(name)
ps.savefig()
plt.clf()
for name,flux in zip(names, allfluxes):
plt.hist(flux, bins=20, histtype='step', label=name)
plt.xlabel('Flux')
plt.legend()
ps.savefig()
plt.clf()
for dx,ra in zip(alldx,allra):
plt.plot(dx, ra, 'x')
for dy,dec in zip(alldy,alldec):
plt.plot(dy, dec, 's')
plt.xlabel('Pixel shift')
plt.ylabel('RA/Dec shift')
ps.savefig()
plt.clf()
for dx,ra in zip(alldx,allra):
A = np.empty((len(dx),2))
A[:,0] = 1.
A[:,1] = dx
r = np.linalg.lstsq(A, ra)
fit = r[0]
fitline = fit[0] + dx*fit[1]
plt.plot(dx, ra - fitline, 'x')
for dy,dec in zip(alldy,alldec):
A = np.empty((len(dy),2))
A[:,0] = 1.
A[:,1] = dy
r = np.linalg.lstsq(A, dec)
fit = r[0]
fitline = fit[0] + dy*fit[1]
plt.plot(dy, dec - fitline, 's', mfc='none')
plt.xlabel('Pixel shift')
plt.ylabel('RA/Dec shift - fit')
ps.savefig()
if __name__ == '__main__':
import datetime
ps = PlotSequence('sim')
nims = 1
nsrcs = 1
#H,W = 50,50
H,W = 21,21
us = datetime.datetime.now().microsecond
print('Setting random seed to', us)
seed = us
if True:
nsamples = 400
np.random.seed(seed)
results = sim(nims, nsrcs, H,W, None, 1.0, nsamples)
pp = np.array([p for x,y,p in results])
flux0 = pp[:,0]
np.random.seed(seed)
results = sim(nims, nsrcs, H,W, ps, 1.0, nsamples, derivs=True)
dx = np.array([x for x,y,p in results])
dy = np.array([y for x,y,p in results])
pp = np.array([p for x,y,p in results])
print('Params:', pp.shape)
flux = pp[:,0]
fluxdx = pp[:,1]
fluxdy = pp[:,2]
r = np.hypot(dx, dy)
plt.clf()
plt.plot(r, flux0, 'k.', label='Flux (no derivs)')
plt.plot(r, flux, 'b.', label='Flux')
plt.xlabel('WCS Scatter Distance (pix)')
plt.ylabel('Flux')
plt.title('Forced photometry: Astrometry sensitivity')
plt.legend(loc='lower left')
ps.savefig()
plt.clf()
plt.plot(dx, fluxdx / flux / pixscale, 'r.', label='RA deriv')
plt.plot(dy, -fluxdy / flux / pixscale, 'g.', label='Dec deriv')
ax = plt.axis()
plt.plot([-10,10],[-10,10],'k-', alpha=0.1)
mx = np.abs(np.array(ax)).max()
plt.axis([-mx,mx,-mx,mx])
plt.legend(loc='upper left')
plt.xlabel('WCS scatter (pix)')
plt.ylabel('Computed offset (pix)')
plt.title('Forced photometry: Fitting derivatives to recover scatter')
plt.axhline(0, color='k', alpha=0.1)
plt.axvline(0, color='k', alpha=0.1)
ps.savefig()
plt.clf()
plt.scatter(dx, dy, c=flux)
plt.axhline(0, color='k', alpha=0.1)
plt.axvline(0, color='k', alpha=0.1)
ax = plt.axis()
mx = np.abs(np.array(ax)).max()
plt.axis([-mx,mx,-mx,mx])
plt.xlabel('dx (pix)')
plt.ylabel('dy (pix)')
plt.title('Forced photometry: flux when fit w/derivatives')
plt.colorbar()
ps.savefig()
plt.clf()
plt.scatter(dx, dy, c=flux0)
plt.axhline(0, color='k', alpha=0.1)
plt.axvline(0, color='k', alpha=0.1)
ax = plt.axis()
mx = np.abs(np.array(ax)).max()
plt.axis([-mx,mx,-mx,mx])
plt.xlabel('dx (pix)')
plt.ylabel('dy (pix)')
plt.title('Forced photometry: flux when fit w/out derivatives')
plt.colorbar()
ps.savefig()
# i = np.argmax(dx)
# fluxdx = pp[:,1]
# fluxdy = pp[:,2]
# print('dx', dx[i], 'pixels')
# print('fluxdx', fluxdx[i])
# print('flux', flux[i])
# print('d pix', fluxdx[i] / flux[i] / pixscale)
sys.exit(0)
if False:
results = sim(nims, nsrcs, H,W, ps, 1.0, 100)
# Zoom in near zero
np.random.seed(42)
results2 = sim(nims, nsrcs, H,W, None, 0.1, 100)
results.extend(results2)
dx = np.array([x for x,y,p in results])
dy = np.array([y for x,y,p in results])
pp = np.array([p for x,y,p in results])
print('Params:', pp.shape)
flux = pp[:,0]
plt.clf()
plt.scatter(dx, dy, c=flux)
plt.colorbar()
plt.xlabel('WCS Scatter x (pix)')
plt.ylabel('WCS Scatter y (pix)')
plt.axis('equal')
ax = plt.axis()
mx = max(np.abs(ax))
plt.axis([-mx,mx,-mx,mx])
plt.axhline(0., color='k', alpha=0.2)
plt.axvline(0., color='k', alpha=0.2)
plt.axis([-2,2,-2,2])
ps.savefig()
r = np.hypot(dx, dy)
plt.clf()
plt.plot(r, flux, 'b.')
plt.xlabel('WCS Scatter Distance (pix)')
plt.ylabel('Flux')
plt.title('Forced photometry: Astrometry sensitivity')
ps.savefig()
if False:
# How does scatter in the WCS (single image) affect flux measurements?
# (this devolved into looking at differences between LSQR and Ceres)
compare_optimizers()
if True:
# Look at how scatter in WCS solutions (multiple images) affects
# flux measurements.
nims = 4
nsamples = 500
allfluxes = []
names = []
dpixes = [1.5, 1., 0.5, 0.1, 0.]
for dpix in dpixes:
# Reset the seed -- same pixel noise instantiation for
# each set, same directions of dpix scatter; all that
# changes is the dpix scaling.
np.random.seed(seed)
ns = nsamples
if dpix == 0:
ns = 1
alphas = [0.1, 0.3, 1.0]
results = sim(nims, nsrcs, H, W, ps if dpix==1. else None,
dpix, ns,
forced=False,
alphas=alphas)
#ceres=True)
pp = np.array([r[2] for r in results])
flux = pp[:,2]
allfluxes.append(flux)
names.append('+- %g pix' % dpix)
plt.clf()
bins = 50
mn = min([min(flux) for flux in allfluxes])
mx = max([max(flux) for flux in allfluxes])
bins = np.linspace(mn, mx, bins)
mx = 0
for flux,name,dpix in zip(allfluxes, names, dpixes):
if dpix == 0:
plt.axvline(flux, color='k', alpha=0.5, lw=2, label=name)
else:
n,bins,p = plt.hist(flux, bins=bins, histtype='step',label=name)
mx = max(mx, max(n))
plt.ylim(0, mx*1.05)
plt.xlabel('Flux')
plt.legend(loc='upper left')
plt.title('Astrometric scatter: %i images' % nims)
ps.savefig()
|
StarcoderdataPython
|
148340
|
<reponame>fsanges/glTools
import maya.cmds as mc
import glTools.utils.channelState
import glTools.utils.defaultAttrState
import glTools.utils.attribute
import glTools.utils.base
import glTools.utils.cleanup
import glTools.utils.colorize
import glTools.utils.component
import glTools.utils.connection
import glTools.utils.deformer
import glTools.utils.joint
import glTools.utils.mathUtils
import glTools.utils.matrix
import glTools.utils.mesh
import glTools.utils.namespace
import glTools.utils.primvar
import glTools.utils.reference
import glTools.utils.selection
import glTools.utils.shape
import glTools.utils.skinCluster
import glTools.utils.stringUtils
import glTools.utils.transform
import types
import ast
def tagCtrl(control,ctrlLod='primary',module=None,category=None):
'''
Tag transform with control data. Also sets the "rotateOrder" attribute as keyable.
@param control: Control to tag
@type control: str
@param ctrlLod: Control LOD. Valid values include "primary", "secondary", "tertiary" and "costume".
@type ctrlLod: str
@param module: Control module. If empty, module attribute is skipped.
@type module: str or None
@param category: Control category classification. Used to specify general purpose multi module class groupings (ie "face"). If empty, module attribute is skipped.
@type category: str or None
'''
# ==========
# - Checks -
# ==========
# Check Control
if not mc.objExists(control):
raise Exception('Control object "'+control+'" does not exist!')
# Check Control LOD
ctrlLodList = ['primary','secondary','tertiary','allTrans','face','costume','hair','prop','misc']
if not ctrlLod in ctrlLodList: raise Exception('Invalid control LOD "'+ctrlLod+'"!')
ctrlLodIndex = ctrlLodList.index(ctrlLod)
# ===================
# - Tag Control LOD -
# ===================
lodAttr = 'ctrlLod'
if not mc.objExists(control+'.'+lodAttr):
mc.addAttr(control,ln=lodAttr,at='enum',en=':'.join(ctrlLodList))
mc.setAttr(control+'.'+lodAttr,ctrlLodIndex)
# ======================
# - Tag Control Module -
# ======================
if module:
moduleAttr = 'ctrlModule'
if not mc.objExists(control+'.'+moduleAttr):
mc.addAttr(control,ln=moduleAttr,dt='string')
else:
mc.setAttr(control+'.'+moduleAttr,l=False)
mc.setAttr(control+'.'+moduleAttr,module,type='string')
mc.setAttr(control+'.'+moduleAttr,l=True)
# ========================
# - Tag Control Category -
# ========================
if category:
categoryAttr = 'ctrlCategory'
if not mc.objExists(control+'.'+categoryAttr):
mc.addAttr(control,ln=categoryAttr,dt='string')
mc.setAttr(control+'.'+categoryAttr,category,type='string')
mc.setAttr(control+'.'+categoryAttr,l=True)
# =================
# - Clean Control -
# =================
# Set Rotate Order Keyable
try: mc.setAttr(control+'.ro',cb=True)
except: pass
# Hide Joint Attrs
if mc.objExists(control+'.radius'): mc.setAttr(control+'.radius',k=False,cb=False)
if mc.objExists(control+'.liw'): mc.setAttr(control+'.liw',k=False,cb=False)
# =================
# - Return Result -
# =================
return control
def tagBindJoint(joint,bind=True):
'''
Tag joint as a skinCluster influence.
@param joint: Joint to tag and bind influence
@type joint: str
@param bind: Bind state.
@type bind: bool
'''
# ==========
# - Checks -
# ==========
if not mc.objExists(joint):
raise Exception('Joint "'+joint+'" does not exist!')
if mc.objectType(joint) != 'joint':
raise Exception('Object "'+joint+'" is not a valid joint!')
# =============
# - Tag Joint -
# =============
if not mc.objExists(joint+'.bindJoint'):
mc.addAttr(joint,ln='bindJoint',at='bool',dv=True)
# ===============
# - Clean Joint -
# ===============
if mc.objExists(joint+'.radius'):
mc.setAttr(joint+'.radius',k=False,cb=False)
if mc.objExists(joint+'.liw'):
mc.setAttr(joint+'.liw',k=False,cb=False)
def offsetGroup( ctrl,
pivot = None,
orientTo = None,
prefix = None ):
'''
Create offset group node.
Optionally, set custom pivot and orientation.
@param ctrl: Control or control group to create offset group for.
@type ctrl: str
@param pivot: Transform for pivot match. If None, use control pivot.
@type pivot: str or None
@param orientTo: Transform for orient match. If None, use world orientation.
@type orientTo: str or None
@param prefix: Naming prefix.
@type prefix: str or None
'''
# ==========
# - Checks -
# ==========
# Control
if not mc.objExists(ctrl):
raise Exception('Control "'+ctrl+'" does not exist!')
# Pivot
if not pivot: pivot = ctrl
if not mc.objExists(pivot):
raise Exception('Pivot "'+pivot+'" does not exist!')
# Orient To
if orientTo:
if not mc.objExists(orientTo):
raise Exception('Orient target "'+orientTo+'" does not exist!')
# Prefix
if not prefix: prefix = glTools.utils.stringUtils.stripSuffix(ctrl)
# ======================
# - Build Offset Group -
# ======================
# Create Offset Group
offsetGrp = mc.group(em=True,n=prefix+'_offsetGrp')
# Set Pivot
piv = mc.xform(pivot,q=True,ws=True,rp=True)
mc.xform(offsetGrp,ws=True,piv=piv)
# Orient Offset Group
if orientTo: mc.delete(mc.orientConstraint(orientTo,offsetGrp))
# Parent Control
mc.parent(ctrl,offsetGrp)
# =================
# - Return Result -
# =================
return offsetGrp
def negateTransform( ctrl,
negateGrp = None,
translate = False,
rotate = False,
scale = False,
prefix = None ):
'''
Setup transform negation node network.
@param ctrl: Control to create transform negation for.
@type ctrl: str
@param negateGrp: Negate transform. If None, create group from control.
@type negateGrp: str or None
@param translate: Negate transform translate.
@type translate: bool
@param rotate: Negate transform rotate.
@type rotate: bool
@param scale: Negate transform scale.
@type scale: bool
@param prefix: Naming prefix.
@type prefix: str or None
'''
# ==========
# - Checks -
# ==========
# Control
if not mc.objExists(ctrl):
raise Exception('Control "'+ctrl+'" does not exist!')
# Negate Group
if negateGrp:
if not mc.objExists(negateGrp):
raise Exception('Control "'+ctrl+'" does not exist!')
# Prefix
if not prefix: prefix = glTools.utils.stringUtils.stripSuffix(ctrl)
# ======================
# - Build Negate Group -
# ======================
if not negateGrp:
negateGrp = mc.duplicate(ctrl,po=True,n=prefix+'_negate')[0]
glTools.utils.attribute.deleteUserAttrs(negateGrp)
mc.parent(ctrl,negateGrp)
# ======================
# - Build Negate Nodes -
# ======================
if translate:
tNegate = mc.createNode('multiplyDivide',n=prefix+'_translateNegate_multiplyDivide')
mc.connectAttr(ctrl+'.t',tNegate+'.input1',f=True)
mc.setAttr(tNegate+'.input2',-1,-1,-1)
mc.connectAttr(tNegate+'.output',negateGrp+'.t',f=True)
if rotate:
rNegate = mc.createNode('multiplyDivide',n=prefix+'_rotateNegate_multiplyDivide')
mc.connectAttr(ctrl+'.r',rNegate+'.input1',f=True)
mc.setAttr(rNegate+'.input2',-1,-1,-1)
mc.connectAttr(rNegate+'.output',negateGrp+'.r',f=True)
# Reverse Rotate Order
rotOrderMap = [5,3,4,1,2,0]
mc.setAttr(negateGrp+'.ro',rotOrderMap[mc.getAttr(ctrl+'.ro')])
if scale:
sNegate = mc.createNode('multiplyDivide',n=prefix+'_scaleNegate_multiplyDivide')
mc.connectAttr(ctrl+'.s',sNegate+'.input2',f=True)
mc.setAttr(sNegate+'.input1',1,1,1)
mc.setAttr(sNegate+'.operation',2) # Divide
mc.connectAttr(sNegate+'.output',negateGrp+'.s',f=True)
# =================
# - Return Result -
# =================
return negateGrp
def lockJointAttrs(jointList=[]):
'''
Lock joint attributes on the specified list of joints
@param jointList: List of joints to lock joint attributes on.
@type jointList: list
'''
# ==========
# - Checks -
# ==========
if not jointList: jointList = mc.ls(type='joint') or []
# =========================
# - Lock Joint Attributes -
# =========================
for joint in jointList:
# Lock Joint Orient
if mc.getAttr(joint+'.jointOrient',se=True):
mc.setAttr(joint+'.jointOrient',l=True)
# Lock Preferred Angle Attr
if mc.getAttr(joint+'.preferredAngle',se=True):
mc.setAttr(joint+'.preferredAngle',l=True)
# =================
# - Return Result -
# =================
return jointList
def setToDefault(ctrl):
'''
Set the attributes of the specified control to default values
@param ctrl: The control to set default attribute values for
@type ctrl: str
'''
# =================
# - Check Control -
# =================
if not mc.objExists(ctrl):
raise Exception('Control "'+ctrl+'" does not exist!')
# ==============================
# - Define Transform Constants -
# ==============================
tAttr = ['tx','ty','tz']
rAttr = ['rx','ry','rz']
sAttr = ['sx','sy','sz']
# =======================
# - Get User Attributes -
# =======================
udAttr = mc.listAttr(ctrl,ud=True,k=True)
if not udAttr: udAttr = []
cbAttr = mc.listAttr(ctrl,ud=True,cb=True)
if not cbAttr: cbAttr = []
# =====================
# - Reset to Defaults -
# =====================
for attr in tAttr:
if mc.getAttr(ctrl+'.'+attr,se=True):
mc.setAttr(ctrl+'.'+attr,0.0)
for attr in rAttr:
if mc.getAttr(ctrl+'.'+attr,se=True):
mc.setAttr(ctrl+'.'+attr,0.0)
for attr in sAttr:
if mc.getAttr(ctrl+'.'+attr,se=True):
mc.setAttr(ctrl+'.'+attr,1.0)
for attr in udAttr:
dv = mc.addAttr(ctrl+'.'+attr,q=True,dv=True)
if mc.getAttr(ctrl+'.'+attr,se=True):
mc.setAttr(ctrl+'.'+attr,dv)
for attr in cbAttr:
dv = mc.addAttr(ctrl+'.'+attr,q=True,dv=True)
if mc.getAttr(ctrl+'.'+attr,se=True):
mc.setAttr(ctrl+'.'+attr,dv)
def isCtrlZeroed(ctrl,tol=0.00001,skipLocked=True,skipNonkeyable=False,verbose=False):
'''
Check the attributes of the specified control are set to default values.
@param ctrl: The control to check default attribute values on.
@type ctrl: str
@param tol: The tolerance within which the current value must be to the default value to be considered as zeroed.
@type tol: float
@param skipLocked: Skip locked or connected channels.
@type skipLocked: bool
@param skipNonkeyable: Skip non-keyable channels.
@type skipNonkeyable: bool
@param verbose: Print details of which attribute was determined an not zeroed.
@type verbose: bool
'''
# Check Control
if not mc.objExists(ctrl):
raise Exception('Control "'+ctrl+'" does not exist!')
# Define standard transform controls
tAttr = ['tx','ty','tz']
rAttr = ['rx','ry','rz']
sAttr = ['sx','sy','sz']
# Get user defined attrs
udAttr = mc.listAttr(ctrl,ud=True,k=True)
if not udAttr: udAttr = []
cbAttr = mc.listAttr(ctrl,ud=True,cb=True)
if not cbAttr: cbAttr = []
# ============================
# - Check Attribute Defaults -
# ============================
# Translate
for attr in tAttr:
if not mc.getAttr(ctrl+'.'+attr,se=True) and skipLocked: continue
if not mc.getAttr(ctrl+'.'+attr,k=True) and skipNonkeyable: continue
if not glTools.utils.mathUtils.isEqual(mc.getAttr(ctrl+'.'+attr),0.0,tol):
if verbose: print ('Attribute "'+ctrl+'.'+attr+'" is not zeroed ('+str(mc.getAttr(ctrl+'.'+attr))+')!')
return False
# Rotate
for attr in rAttr:
if not mc.getAttr(ctrl+'.'+attr,se=True) and skipLocked: continue
if not mc.getAttr(ctrl+'.'+attr,k=True) and skipNonkeyable: continue
if not glTools.utils.mathUtils.isEqual(mc.getAttr(ctrl+'.'+attr),0.0,tol):
if verbose: print ('Attribute "'+ctrl+'.'+attr+'" is not zeroed ('+str(mc.getAttr(ctrl+'.'+attr))+')!')
return False
# Scale
for attr in sAttr:
if not mc.getAttr(ctrl+'.'+attr,se=True) and skipLocked: continue
if not mc.getAttr(ctrl+'.'+attr,k=True) and skipNonkeyable: continue
if not glTools.utils.mathUtils.isEqual(mc.getAttr(ctrl+'.'+attr),1.0,tol):
if verbose: print ('Attribute "'+ctrl+'.'+attr+'" is not zeroed ('+str(mc.getAttr(ctrl+'.'+attr))+')!')
return False
# User Defined (Keyable)
for attr in udAttr:
if not mc.getAttr(ctrl+'.'+attr,se=True) and skipLocked: continue
dv = mc.addAttr(ctrl+'.'+attr,q=True,dv=True)
if not glTools.utils.mathUtils.isEqual(mc.getAttr(ctrl+'.'+attr),dv,tol):
if verbose: print ('Attribute "'+ctrl+'.'+attr+'" is not zeroed ('+str(mc.getAttr(ctrl+'.'+attr))+')!')
return False
# Channel Box (Non-Keyable)
if not skipNonkeyable:
for attr in cbAttr:
if not mc.getAttr(ctrl+'.'+attr,se=True) and skipLocked: continue
dv = mc.addAttr(ctrl+'.'+attr,q=True,dv=True)
if not glTools.utils.mathUtils.isEqual(mc.getAttr(ctrl+'.'+attr),dv,tol):
if verbose: print ('Attribute "'+ctrl+'.'+attr+'" is not zeroed ('+str(mc.getAttr(ctrl+'.'+attr))+')!')
return False
# =================
# - Return Result -
# =================
return True
def poleVectorPosition(startJoint,midJoint,endJoint,distance=1.0):
'''
Calculate the pole vector position based on input arguments
@param startJoint: The start joint of the ik chain
@type startJoint: str
@param midJoint: The middle joint of the ik chain
@type midJoint: str
@param endJoint: The end joint of the ik chain
@type endJoint: str
@param distance: The distance factor for the pole vector position based on chain length
@type distance: float
'''
# Check joint
if not mc.objExists(startJoint):
raise Exception('Start joint "'+startJoint+'" does not exist!')
if not mc.objExists(midJoint):
raise Exception('Middle joint "'+midJoint+'" does not exist!')
if not mc.objExists(endJoint):
raise Exception('End joint "'+endJoint+'" does not exist!')
# Get joint positions
stPt = glTools.utils.base.getPosition(startJoint)
mdPt = glTools.utils.base.getPosition(midJoint)
enPt = glTools.utils.base.getPosition(endJoint)
# Get Joint lengths
stLen = glTools.utils.joint.length(startJoint)
mdLen = glTools.utils.joint.length(midJoint)
pvLen = glTools.utils.mathUtils.distanceBetween(stPt,enPt) * distance
wt = stLen/(stLen+mdLen)
# Calculate Center Point
ctPt = glTools.utils.mathUtils.averagePosition(stPt,enPt,wt)
# Calculate Pole Vector Offset
pvOffset = glTools.utils.mathUtils.offsetVector(ctPt,mdPt)
# Check Center Point Offset
if glTools.utils.mathUtils.mag(pvOffset) < 0.001:
stRotate = [i/abs(i) if (abs(i)>0) else 0 for i in mc.getAttr(startJoint+'.preferredAngle')[0]]
mdRotate = [i/abs(i) if (abs(i)>0) else 0 for i in mc.getAttr(midJoint+'.preferredAngle')[0]]
mc.setAttr(startJoint+'.r',*stRotate)
mc.setAttr(midJoint+'.r',*mdRotate)
mdPt = glTools.utils.base.getPosition(midJoint)
enPt = glTools.utils.base.getPosition(endJoint)
cnPt = glTools.utils.mathUtils.averagePosition(stPt,enPt,wt)
pvOffset = glTools.utils.mathUtils.offsetVector(cnPt,mdPt)
mc.setAttr(startJoint+'.r',0,0,0)
mc.setAttr(midJoint+'.r',0,0,0)
# Calculate poleVector
poleVec = glTools.utils.mathUtils.normalizeVector(pvOffset)
# Calculate poleVector position
pvPt = [ctPt[0]+(poleVec[0]*pvLen),ctPt[1]+(poleVec[1]*pvLen),ctPt[2]+(poleVec[2]*pvLen)]
# Return result
return pvPt
def ikFkBlend( blendJoints,
fkJoints,
ikJoints,
blendAttr,
translate = True,
rotate = True,
scale = True,
skipEnd = True,
useConstraints = False,
prefix = ''):
'''
Setup IK/FK joint blending using blendColor nodes
@param blendJoints: The joint chain to blend between IK and FK chains
@type blendJoints: list
@param fkJoints: Target FK joint chain
@type fkJoints: list
@param ikJoints: Target IK joint chain
@type ikJoints: list
@param blendAttr: FK to IK blend attribute
@type blendAttr: str
@param translate: Blend translate channels
@type translate: bool
@param rotate: Blend rotate channels
@type rotate: bool
@param scale: Blend scale channels
@type scale: bool
@param skipEnd: Skip chain end joint
@type skipEnd: bool
@param useConstraints: Use blended constraints instead of blendColor nodes for rotations
@type useConstraints: bool
@param prefix: Name prefix for created nodes
@type prefix: str
'''
# Check blend attribute
if not mc.objExists(blendAttr):
ctrl = blendAttr.split('.')[0]
attr = blendAttr.split('.')[-1]
if not mc.objExists(ctrl): raise Exception('Blend control "'+ctrl+'" does not exist!')
mc.addAttr(ctrl,ln=attr,min=0,max=1,dv=0,k=True)
# Check joint chains
if (len(blendJoints) != len(fkJoints)) or (len(blendJoints) != len(ikJoints)):
raise Exception('Chain length mis-match!!')
# Check Skip End
if skipEnd:
blendJoints = blendJoints[:-1]
fkJoints = fkJoints[:-1]
ikJoints = ikJoints[:-1]
# Blend Joint Translate/Rotate/Scale
tBlendNode = ''
rBlendNode = ''
sBlendNode = ''
# Blend Attribute Reverse
blendRevNode = ''
if useConstraints:
blendRevNode = mc.createNode('reverse',n=prefix+'_blendAttr_reverse')
mc.connectAttr(blendAttr,blendRevNode+'.inputX',f=True)
mc.connectAttr(blendAttr,blendRevNode+'.inputY',f=True)
mc.connectAttr(blendAttr,blendRevNode+'.inputZ',f=True)
for i in range(len(blendJoints)):
# Naming index
ind = glTools.utils.stringUtils.alphaIndex(i,upper=True)
# Translate
if translate:
# Create blend node
tBlendNode = mc.createNode('blendColors',n=prefix+'_tr'+ind+'_blendColors')
# Connect blend node
mc.connectAttr(fkJoints[i]+'.tx',tBlendNode+'.color1R',f=True)
mc.connectAttr(fkJoints[i]+'.ty',tBlendNode+'.color1G',f=True)
mc.connectAttr(fkJoints[i]+'.tz',tBlendNode+'.color1B',f=True)
mc.setAttr(tBlendNode+'.color2',0,0,0)
mc.connectAttr(blendAttr,tBlendNode+'.blender',f=True)
# Connect to joint
mc.connectAttr(tBlendNode+'.outputR',blendJoints[i]+'.tx',f=True)
mc.connectAttr(tBlendNode+'.outputG',blendJoints[i]+'.ty',f=True)
mc.connectAttr(tBlendNode+'.outputB',blendJoints[i]+'.tz',f=True)
# Rotate
if rotate:
if useConstraints:
# Create orientConstraint node
rBlendNode = mc.orientConstraint(fkJoints[i],ikJoints[i],blendJoints[i],n=prefix+'_rt'+ind+'_orientConstraint')[0]
rBlendAlias = mc.orientConstraint(rBlendNode,q=True,wal=True)
mc.connectAttr(blendAttr,rBlendNode+'.'+rBlendAlias[0],f=True)
mc.connectAttr(blendRevNode+'.outputY',rBlendNode+'.'+rBlendAlias[1],f=True)
else:
# Create blend node
rBlendNode = mc.createNode('blendColors',n=prefix+'_rt'+ind+'_blendColors')
# Connect blend node
mc.connectAttr(fkJoints[i]+'.rx',rBlendNode+'.color1R',f=True)
mc.connectAttr(fkJoints[i]+'.ry',rBlendNode+'.color1G',f=True)
mc.connectAttr(fkJoints[i]+'.rz',rBlendNode+'.color1B',f=True)
mc.connectAttr(ikJoints[i]+'.rx',rBlendNode+'.color2R',f=True)
mc.connectAttr(ikJoints[i]+'.ry',rBlendNode+'.color2G',f=True)
mc.connectAttr(ikJoints[i]+'.rz',rBlendNode+'.color2B',f=True)
mc.connectAttr(blendAttr,rBlendNode+'.blender',f=True)
# Connect to joint
mc.connectAttr(rBlendNode+'.outputR',blendJoints[i]+'.rx',f=True)
mc.connectAttr(rBlendNode+'.outputG',blendJoints[i]+'.ry',f=True)
mc.connectAttr(rBlendNode+'.outputB',blendJoints[i]+'.rz',f=True)
# Scale
if scale:
#if useConstraints:
#
# # Create scaleConstraint node
# sBlendNode = mc.scaleConstraint(fkJoints[i],ikJoints[i],blendJoints[i],n=prefix+'_sc'+ind+'_scaleConstraint')[0]
# sBlendAlias = mc.scaleConstraint(sBlendNode,q=True,wal=True)
# mc.connectAttr(blendAttr,sBlendNode+'.'+sBlendAlias[0],f=True)
# mc.connectAttr(blendRevNode+'.outputZ',sBlendNode+'.'+sBlendAlias[1],f=True)
#
#else:
# Create blend node
sBlendNode = mc.createNode('blendColors',n=prefix+'_sc'+ind+'_blendColors')
# Connect blend node
mc.connectAttr(fkJoints[i]+'.sx',sBlendNode+'.color1R',f=True)
mc.connectAttr(fkJoints[i]+'.sy',sBlendNode+'.color1G',f=True)
mc.connectAttr(fkJoints[i]+'.sz',sBlendNode+'.color1B',f=True)
mc.connectAttr(ikJoints[i]+'.sx',sBlendNode+'.color2R',f=True)
mc.connectAttr(ikJoints[i]+'.sy',sBlendNode+'.color2G',f=True)
mc.connectAttr(ikJoints[i]+'.sz',sBlendNode+'.color2B',f=True)
mc.connectAttr(blendAttr,sBlendNode+'.blender',f=True)
# Connect to joint
mc.connectAttr(sBlendNode+'.outputR',blendJoints[i]+'.sx',f=True)
mc.connectAttr(sBlendNode+'.outputG',blendJoints[i]+'.sy',f=True)
mc.connectAttr(sBlendNode+'.outputB',blendJoints[i]+'.sz',f=True)
# Return Result
return [tBlendNode,rBlendNode,sBlendNode]
def getAllCtrls(all='all'):
'''
'''
# Check all exists
if not mc.objExists(all):
raise Exception('All node '+all+' does not exist!')
# Get comtrols list
return mc.getAttr(all+'.allCtrls')
def setAllCtrls(all='all',ctrlList=[],append=False):
'''
Add a multi string attribute to a specified node to store a list of all rig control names
@param all: The node to add the control name list atribute to. Generally, the top node of the rig. ("all")
@type all: str
@param ctrlList: The list of control names to add to the multi string attribute.
@type ctrlList: list
@param append: Append to the mulit string attribute, if it already exists. Oterwise, replace.
@type append: bool
'''
# Check all exists
if not mc.objExists(all):
raise Exception('All node '+all+' does not exist!')
# Check All Controls Attribute
if not mc.objExists(all+'.allCtrls'):
mc.addAttr(all,ln='allCtrls',dt='string',multi=True,hidden=True)
# Check append
if append:
allCtrls = getAllCtrls(all)
allCtrls.extend(ctrlList)
ctrlList = allCtrls
# Set all controls attribute array values
for i in range(len(ctrlList)):
mc.setAttr(all+'.allCtrls['+str(i)+']',ctrlList[i],type='string')
def connectControlVisOld(ctrlLodAttr=['all.primaryCtrlVis','all.secondaryCtrlVis','all.tertiaryCtrlVis']):
'''
Connect tagged control shape visibility based on the specified list of source attributes.
The control ".ctrlLod" attribute value is used as an index into the incoming source attribute list.
@param ctrlLodAttr: List of visibility control source attributes.
@type ctrlLodAttr: list
'''
# Get Control LOD node
ctrlLodNode = mc.ls(ctrlLodAttr[0],o=True)[0]
# Get Control List
ctrlList = mc.ls('*.ctrlLod',o=True)
ctrlList.sort()
# Connect Control Visibility
for ctrl in ctrlList:
# Get Control Shapes
ctrlShapes = mc.listRelatives(ctrl,s=True,ni=True,pa=True,type='nurbsCurve')
if not ctrlShapes: continue
# Get Control Lod
ctrlLod = mc.getAttr(ctrl+'.ctrlLod')
# Connect to Visibility
for ctrlShape in ctrlShapes:
# Check Existing Connections
shapeVisConn = mc.listConnections(ctrlShape+'.v',s=True,d=False,skipConversionNodes=True)
if shapeVisConn and not (shapeVisConn[0] == ctrlLodNode):
# Double check intermediate visibility connection
# !! This is a little more messy than I would like. But will keep until it breaks !! - (10/15/12)
shapeVisConnCheck = mc.listConnections(shapeVisConn[0],s=True,d=False,skipConversionNodes=True,p=True)
if not shapeVisConnCheck: shapeVisConnCheck = []
for shapeVisNodeCheck in shapeVisConnCheck:
if ctrlLodAttr.count(shapeVisNodeCheck):
mc.delete(shapeVisConn[0])
# Get connections with plug information
shapeVisConn = mc.listConnections(ctrlShape+'.v',s=True,d=False,p=True)
# Merge visibility inputs
shapePrefix = glTools.utils.stringUtils.stripSuffix(ctrlShape)
shapeVisNode = mc.createNode('multDoubleLinear',n=shapePrefix+'_allVis_multDoubleLinear')
mc.connectAttr(shapeVisConn[0],shapeVisNode+'.input1',f=True)
mc.connectAttr(ctrlLodAttr[ctrlLod],shapeVisNode+'.input2',f=True)
mc.connectAttr(shapeVisNode+'.output',ctrlShape+'.v',f=True)
else:
# No existing connection - Direct connection
try: mc.connectAttr(ctrlLodAttr[ctrlLod],ctrlShape+'.v',f=True)
except: pass
def connectControlVis( ctrlList = None,
ctrlLodNode = 'all',
ctrlLodAttr = ['primaryCtrlVis','secondaryCtrlVis','tertiaryCtrlVis'] ):
'''
Connect tagged control LOD visibility based on the specified list of source attributes.
The control ".ctrlLod" attribute value is used as an index into the incoming source attribute list.
@param ctrlList: List of controls to connect visibility for. If None, select by "ctrlLod" attribute.
@type ctrlList: list
@param ctrlLodNode: Control LOD toggle node.
@type ctrlLodNode: str
@param ctrlLodAttr: List of control LOD toggle attributes.
@type ctrlLodAttr: list
'''
# ==========
# - Checks -
# ==========
# Control LOD Toggle Node
if not mc.objExists(ctrlLodNode):
raise Exception('Control LOD toggle node "'+ctrlLodNode+'" does not exist!')
# Control List
if not ctrlList: ctrlList = mc.ls('*.ctrlLod',o=True,r=True)
# ==============================
# - Connect Control Visibility -
# ==============================
for ctrl in ctrlList:
# Get Control Lod
if not mc.attributeQuery('ctrlLod',n=ctrl,ex=True): continue
ctrlLod = mc.getAttr(ctrl+'.ctrlLod')
if ctrlLod >= len(ctrlLodAttr): continue
# Get Control Shapes
ctrlShapes = mc.listRelatives(ctrl,s=True,ni=True,pa=True,type='nurbsCurve')
# -------------------------------------------------------------------------------
# !!! If No Shapes, Show Display Handle and LOD Override (Normal/BoundingBox) !!!
# -------------------------------------------------------------------------------
if not ctrlShapes:
# Show Display Handle
mc.setAttr(ctrl+'.displayHandle',True)
# Get/Create LOD Switch Reverse Node
rev = mc.ls(mc.listConnections(ctrlLodNode+'.'+ctrlLodAttr[ctrlLod],s=False,d=True) or [],type='reverse') or []
if not rev:
rev = mc.createNode('reverse',n=ctrlLodAttr[ctrlLod]+'_reverse')
mc.connectAttr(ctrlLodNode+'.'+ctrlLodAttr[ctrlLod],rev+'.inputX',f=True)
else: rev = rev[0]
# Set/Connect Display Overrides
mc.setAttr(ctrl+'.overrideEnabled',1)
mc.connectAttr(rev+'.outputX',ctrl+'.overrideLevelOfDetail',f=True)
# Connect Control Shape Visibility
else:
for ctrlShape in ctrlShapes:
# Check Existing Connections
lodVisConn = mc.listConnections(ctrlShape+'.lodVisibility',s=True,d=False)
if lodVisConn:
# Disconnect Attribute
lodVisConn = mc.listConnections(ctrlShape+'.lodVisibility',s=True,d=False,p=True)
mc.disconnectAttr(lodVisConn[0],ctrlShape+'.lodVisibility')
# Connect LOD Visibility
try: mc.connectAttr(ctrlLodNode+'.'+ctrlLodAttr[ctrlLod],ctrlShape+'.lodVisibility',f=True)
except: print('Error connecting ctrl LOD attr to "'+ctrlShape+'.lodVisibility"!')
# =================
# - Return Result -
# =================
return ctrlList
def connectCostumeCtrlVis( ctrlList = None,
ctrlVisNode = 'all',
ctrlVisAttr = 'costumeCtrlVis',
useCategory = True ):
'''
Connect costume control visibility.
@param ctrlList: List of controls to connect visibility for.
@type ctrlList: list
@param ctrlLodNode: Control LOD toggle node.
@type ctrlLodNode: str
@param ctrlLodAttr: List of control LOD toggle attributes.
@type ctrlLodAttr: list
'''
# ==========
# - Checks -
# ==========
# Control Vis Toggle Node
if not mc.objExists(ctrlVisNode):
raise Exception('Visibility control toggle node "'+ctrlVisNode+'" does not exist!')
# Control List
if not ctrlList:
if useCategory: ctrlList = mc.ls('*.ctrlCategory',o=True,r=True)
else: ctrlList = mc.ls('*.ctrlLod',o=True,r=True)
# ======================================
# - Connect Costume Control Visibility -
# ======================================
# Add Control Attribute
if not mc.attributeQuery(ctrlVisAttr,n=ctrlVisNode,ex=True):
mc.addAttr(ctrlVisNode,ln=ctrlVisAttr,at='enum',en='Off:On',dv=0)
mc.setAttr(ctrlVisNode+'.'+ctrlVisAttr,cb=True)
# Connect Control Visibility
for ctrl in ctrlList:
ctrlTag = mc.getAttr(ctrl+'.ctrlLod')
if useCategory: ctrlTag = mc.getAttr(ctrl+'.ctrlCategory')
if ctrlTag != 'costume': continue
# Connect Control Shapes
ctrlShapes = mc.listRelatives(ctrl,s=True,ni=True,pa=True,type='nurbsCurve')
for ctrlShape in ctrlShapes:
# Check Existing Connections
lodVisConn = mc.listConnections(ctrlShape+'.lodVisibility',s=True,d=False)
if lodVisConn:
# Disconnect Attribute
lodVisConn = mc.listConnections(ctrlShape+'.lodVisibility',s=True,d=False,p=True)
mc.disconnectAttr(lodVisConn[0],ctrlShape+'.lodVisibility')
# Connect LOD Visibility
try: mc.connectAttr(ctrlVisNode+'.'+ctrlVisAttr,ctrlShape+'.lodVisibility',f=True)
except: print('Error connecting ctrl LOD attr to "'+ctrlShape+'.lodVisibility"!')
# Print Msg
print('Costume Control Shape "'+ctrlShape+'" connected to "'+ctrlVisNode+'.'+ctrlVisAttr+'"...')
def connectLoresVis(toggleAttr='all.loGeoVis'):
'''
Connect lores geometry visibility to the specified visibility toggle attribute
@param toggleAttr: Visibility toggle attribute
@type toggleAttr: str
'''
# Check visibility toggle attribute
if not mc.objExists(toggleAttr):
raise Exception('Visibility toggle attribute "'+toggleAttr+'" does not exist!')
# Get all joint list
jointList = mc.ls(type='joint')
if not jointList: return
# Iterate over all joints
for joint in jointList:
# Get all joint mesh shapes
allShapes = mc.listRelatives(joint,s=True,pa=True)
if not allShapes: continue
meshShapes = mc.ls(allShapes,type='mesh')
if not meshShapes: continue
# Connect mesh shape visibility to vis toggle attr
for meshShape in meshShapes:
mc.connectAttr(toggleAttr,meshShape+'.v',f=True)
def connectVisOld(objList=[],toggleAttr='all.hiGeoVis',attrName='',defaultValue=0):
'''
Connect node visibility to the specified visibility toggle attribute
@param objList: List of objects to toggle visibility for
@type objList: list
@param toggleAttr: Visibility toggle attribute
@type toggleAttr: str
@param attrName: Attribute nice name for UI
@type attrName: str
@param defaultValue: Default value for the visibility toggle attribute
@type defaultValue: int
'''
#### DEPRECATED WARNING
print('#### - DEPRECATED (glTools.rig.utils.connectVisOld) - ####')
# Check Object List
if type(objList) == str or type(objList) == unicode:
objList = [str(objList)]
for obj in objList:
if not mc.objExists(obj):
raise Exception('Object "'+obj+'" does not exist!')
# Check Visibility Toggle Attribute
if not mc.objExists(toggleAttr):
node = toggleAttr.split('.')[0]
if not mc.objExists(node):
raise Exception('Visibility control node "'+node+'" does not exist!')
attr = toggleAttr.split('.')[-1]
mc.addAttr(node,ln=attr,nn=attrName,at='enum',en='Off:On',dv=defaultValue)
mc.setAttr(node+'.'+attr,cb=True)
else:
mc.addAttr(toggleAttr,e=True,dv=defaultValue)
# Connect Visibility
for obj in objList:
visConn = mc.listConnections(obj+'.v',s=True,d=False,p=True)
if not visConn: visConn = []
if not visConn.count(toggleAttr):
try: mc.connectAttr(toggleAttr,obj+'.v',f=True)
except: print 'Unable to connect "'+obj+'" visibility!'
# Return Result
return toggleAttr
def connectVis( objList,
toggleNode,
toggleAttr,
toggleName='',
defaultValue=0,
force=True,
enumStr='Off:On' ):
'''
Connect node visibility to the specified visibility toggle node and attribute.
If toggle attribute doesn't exist, a new enum attr of the specified name will be created.
@param objList: List of objects to toggle visibility for
@type objList: list
@param toggleNode: Visibility toggle attribute
@type toggleNode: str
@param toggleAttr: Visibility toggle attribute
@type toggleAttr: str
@param toggleName: Attribute nice name for UI
@type toggleName: str
@param defaultValue: Default value for the visibility toggle attribute
@type defaultValue: int
@param force: Force visibility connection if incoming connection already exists.
@type force: bool
@param enumStr: Visibility toggle enum string.
@type enumStr: str
'''
# ==========
# - Checks -
# ==========
if not objList: raise Exception('Invalid or empty object list argument! (objList)')
if not toggleNode: raise Exception('Invalid or empty toggle node argument! (toggleNode)')
if not toggleAttr: raise Exception('Invalid or empty toggle attribute argument! (toggleAttr)')
# Check Object List
if isinstance(objList,types.StringTypes):
objList = [str(objList)]
if not isinstance(objList,types.ListType):
raise Exception('Invalid object list!')
for obj in objList:
if not mc.objExists(obj):
raise Exception('Object "'+obj+'" does not exist!')
# Check Toggle Node
if not mc.objExists(toggleNode):
raise Exception('Visibility control node "'+obj+'" does not exist!')
# Check Toggle Name
if not toggleName: toggleName = toggleAttr
# Check Visibility Toggle Attribute
if not mc.attributeQuery(toggleAttr,n=toggleNode,ex=True):
mc.addAttr(toggleNode,ln=toggleAttr,nn=toggleName,at='enum',en=enumStr,dv=defaultValue)
mc.setAttr(toggleNode+'.'+toggleAttr,cb=True)
else:
mc.addAttr(toggleNode+'.'+toggleAttr,e=True,nn=toggleName,dv=defaultValue)
toggleNodeAttr = toggleNode+'.'+toggleAttr
# ======================
# - Connect Visibility -
# ======================
for obj in objList:
# Check Incoming Connections
nodeVisConn = mc.listConnections(obj+'.v',s=True,d=False)
if nodeVisConn:
if force:
# Connect Visibility (Force Override)
try: mc.connectAttr(toggleNodeAttr,obj+'.v',f=True)
except: pass # print('Problem overriding visibility connection! ('+toggleNodeAttr+' >> '+obj+'.v)')
else:
raise Exception('Existing visibility connection already exists! Use force=True to override...')
else:
# Connect Visibility
try: mc.connectAttr(toggleNodeAttr,obj+'.v',f=True)
except: raise Exception('Problem connecting visibility! ('+toggleNodeAttr+' >> '+obj+'.v)')
# =================
# - Return Result -
# =================
return toggleNodeAttr
def connectDisplayTypeOld(objList,toggleAttr='all.meshDisplayType',defaultValue=0):
'''
Connect object display type to the specified enum attribute
@param objList: List of objects to toggle display type for
@type objList: list
@param toggleAttr: Display type toggle attribute
@type toggleAttr: str
@param defaultValue: Default value for the visibility toggle attribute
@type defaultValue: int
'''
#### DEPRECATED WARNING
print('#### - DEPRECATED (glTools.rig.utils.connectDisplayTypeOld) - ####')
# Check Object List
if type(objList) == str or type(objList) == unicode:
objList = [str(objList)]
for obj in objList:
if not mc.objExists(obj):
raise Exception('Object "'+obj+'" does not exist!')
# Check visibility toggle attribute
if not mc.objExists(toggleAttr):
node = toggleAttr.split('.')[0]
if not node:
raise Exception('Visibility control node "'+node+'" does not exist!')
attr = toggleAttr.split('.')[-1]
mc.addAttr(node,ln=attr,at='enum',en=':Normal:Template:Reference:',dv=defaultValue)
mc.setAttr(node+'.'+attr,cb=True)
else:
mc.addAttr(toggleAttr,e=True,dv=defaultValue)
# Connect Display Type
for obj in objList:
mc.setAttr(obj+'.overrideEnabled',1)
try: mc.connectAttr(toggleAttr,obj+'.overrideDisplayType',f=True)
except:
objConn = mc.listConnections(obj+'.overrideDisplayType',s=True,d=False,p=True)
if objConn.count(toggleAttr):
print('Attribute "'+toggleAttr+'" is already connect to "'+obj+'.overrideDisplayType"! Skipping connectAttr...')
else:
print('Unable to connect "'+toggleAttr+'" to "'+obj+'.overrideDisplayType"!')
# Return Result
return toggleAttr
def connectDisplayType( objList,
toggleNode,
toggleAttr,
toggleName='',
defaultValue=0,
force=True,
enumStr='Normal:Template:Reference' ):
'''
Connect object display type to the specified enum attribute
@param objList: List of objects to toggle display type for
@type objList: list
@param toggleNode: Display type toggle node
@type toggleNode: str
@param toggleAttr: Display type toggle attribute name
@type toggleAttr: str
@param toggleName: Display type toggle attribute nice name for UI
@type toggleName: str
@param defaultValue: Default value for the display type toggle attribute
@type defaultValue: int
@param force: Force display type connection if incoming connection already exists.
@type force: bool
@param enumStr: Display type toggle enum string.
@type enumStr: str
'''
# ==========
# - Checks -
# ==========
if not objList: raise Exception('Invalid or empty object list argument! (objList)')
if not toggleNode: raise Exception('Invalid or empty toggle node argument! (toggleNode)')
if not toggleAttr: raise Exception('Invalid or empty toggle attribute argument! (toggleAttr)')
# Check Object List
if isinstance(objList,types.StringTypes):
objList = [str(objList)]
if not isinstance(objList,types.ListType):
raise Exception('Invalid object list!')
for obj in objList:
if not mc.objExists(obj):
raise Exception('Object "'+obj+'" does not exist!')
# Check Toggle Node
if not mc.objExists(toggleNode):
raise Exception('Display type control node "'+obj+'" does not exist!')
# Check Toggle Name
if not toggleName: toggleName = toggleAttr
# Check Visibility Toggle Attribute
if not mc.attributeQuery(toggleAttr,n=toggleNode,ex=True):
mc.addAttr(toggleNode,ln=toggleAttr,nn=toggleName,at='enum',en=enumStr,dv=defaultValue)
mc.setAttr(toggleNode+'.'+toggleAttr,cb=True)
else:
mc.addAttr(toggleNode+'.'+toggleAttr,e=True,nn=toggleName,dv=defaultValue)
toggleNodeAttr = toggleNode+'.'+toggleAttr
# ========================
# - Connect Display Type -
# ========================
for obj in objList:
# Enable Display Overrides
mc.setAttr(obj+'.overrideEnabled',1)
# Check Incoming Connections
nodeDispAttr = 'overrideDisplayType'
nodeDispConn = mc.listConnections(obj+'.'+nodeDispAttr,s=True,d=False)
if nodeDispConn:
if force:
# Connect Display Type (Force Override)
try: mc.connectAttr(toggleNodeAttr,obj+'.'+nodeDispAttr,f=True)
except: pass # print('Problem overriding visibility connection! ('+toggleNodeAttr+' >> '+obj+'.'+nodeDispAttr+')')
else:
raise Exception('Existing display type connection already exists! Use force=True to override...')
else:
# Connect Visibility
try: mc.connectAttr(toggleNodeAttr,obj+'.'+nodeDispAttr,f=True)
except: raise Exception('Problem connecting visibility! ('+toggleNodeAttr+' >> '+obj+'.'+nodeDispAttr+')')
# =================
# - Return Result -
# =================
return toggleNodeAttr
def connectAttr(targetNode,targetAttr,sourceNode,sourceAttr,force=True):
'''
Connect specified source and target attributes
@param targetNode: Target node
@type targetNode: str
@param targetAttr: Target attribute
@type targetAttr: str
@param sourceNode: Source node
@type sourceNode: str
@param sourceAttr: Source attribute
@type sourceAttr: str
@param force: Force connection if incoming connection already exists
@type force: bool
'''
# ==========
# - Checks -
# ==========
if not targetNode: raise Exception('Invalid or empty target node argument! (targetNode)')
if not targetAttr: raise Exception('Invalid or empty target attribute argument! (targetAttr)')
if not sourceNode: raise Exception('Invalid or empty source node argument! (sourceNode)')
if not sourceAttr: raise Exception('Invalid or empty source attribute argument! (sourceAttr)')
if not mc.objExists(targetNode): raise Exception('Target node "'+targetNode+'" does not exist!')
if not mc.objExists(sourceNode): raise Exception('Source node "'+targetNode+'" does not exist!')
if not mc.attributeQuery(targetAttr,n=targetNode,ex=True): raise Exception('Target attribute "'+targetNode+'.'+targetAttr+'" does not exist!')
if not mc.attributeQuery(sourceAttr,n=sourceNode,ex=True): raise Exception('Source attribute "'+sourceNode+'.'+sourceAttr+'" does not exist!')
sourceNodeAttr = sourceNode+'.'+sourceAttr
targetNodeAttr = targetNode+'.'+targetAttr
# Check Existing Connection to Target
existingConn = mc.listConnections(targetNodeAttr,s=True,d=False,p=True) or []
if existingConn:
for srcConn in existingConn:
print('Breaking existing connection - "'+srcConn+'" >< "'+targetNodeAttr+'"...')
mc.disconnectAttr(srcConn,targetNodeAttr)
# =====================
# - Connect Attribute -
# =====================
try: mc.connectAttr(sourceNodeAttr,targetNodeAttr,f=force)
except Exception, e:
raise Exception('Error connecting attribute "'+sourceNodeAttr+'" >> "'+targetNodeAttr+'"! Exception Msg: '+str(e))
else: print('Connecting attributes - "'+sourceNodeAttr+'" >> "'+targetNodeAttr+'"...')
# =================
# - Return Result -
# =================
return sourceNodeAttr,targetNodeAttr
def nonRenderableFaceSet(facelist,buildStandin=False):
'''
Define a list of faces to be ignored/deleted at render time.
Creates a nonRenderable preview mesh with the specified polygon faces deleted.
The original mesh is unchanged except for an ABC primvar attr that lists the face IDs to be ignored at render time.
@param facelist: List of faces to ignore during render.
@type facelist: list
@param buildStandin: Build standin geometry with faces removed, set original visibility off.
@type buildStandin: bool
'''
# ==========
# - Checks -
# ==========
facelist = mc.filterExpand(facelist,sm=34)
if not facelist: raise Exception('Invalid face list!')
# ===================
# - Get Set Members -
# ===================
# Sort Faces by Object
faceObjList = glTools.utils.selection.componentListByObject(facelist)
# For Each Object in Set
meshPreviewList = []
for faceList in faceObjList:
# Get Mesh
faceMesh = mc.ls(faceList[0],o=True)[0]
if not glTools.utils.transform.isTransform(faceMesh):
faceMesh = mc.listRelatives(faceMesh,p=True,pa=True)[0]
# Get Face Id List
faceIdList = glTools.utils.component.singleIndexList(faceList)
faceIdStr = str(faceIdList)[1:-1]
# ========================
# - Add ABC PrimVar Attr -
# ========================
attrName = 'deleteFaceSet'
if mc.objExists(faceMesh+'.ABC_'+attrName):
try: mc.setAttr(faceMesh+'.ABC_'+attrName,l=False)
except: pass
mc.deleteAttr(faceMesh+'.ABC_'+attrName)
glTools.utils.primvar.addAbcPrimVarStr( geo = faceMesh,
attrName = attrName,
stringVal = faceIdStr,
lock = False )
# =================
# - Build Standin -
# =================
if buildStandin:
# Duplicate Original (with Connections)
meshPreview = mc.polyDuplicateAndConnect(faceMesh)[0]
meshPreview = mc.rename(meshPreview,faceMesh+'_standin')
# Reparent Object
try: mc.parent(meshPreview,w=True)
except: pass
# Delete Unused Shapes
meshPreviewShapes = mc.listRelatives(meshPreview,s=True,pa=True)
if meshPreviewShapes:
meshPreviewIntShapes = mc.ls(meshPreviewShapes,intermediateObjects=True)
if meshPreviewIntShapes: mc.delete(meshPreviewIntShapes)
# Rename Shape
meshPreviewShapes = mc.listRelatives(meshPreview,s=True,pa=True)
if meshPreviewShapes: meshPreviewShape = mc.rename(meshPreviewShapes[0],meshPreview+'Shape')
# Delete Faces
mc.delete([meshPreview+'.f['+str(i)+']' for i in faceIdList])
# Append Output List
meshPreviewList.append(meshPreview)
# =================
# - Return Result -
# =================
return meshPreviewList
def selectNonRenderableFaces(geo):
'''
Select non-renderable faces for selected geometry
'''
# ==========
# - Checks -
# ==========
# Check Mesh
if not glTools.utils.mesh.isMesh(geo):
mc.warning('Object "'+geo+'" is not a valid mesh! Unable to select non-renderable faces...')
return []
# Check Attribute
attrName = 'ABC_deleteFaceSet'
if not mc.attributeQuery(attrName,n=geo,ex=True):
mc.warning('Attribute "'+geo+'.'+attrName+'" does not exist! Unable to select non-renderable faces...')
return []
# ================
# - Select Faces -
# ================
faceIdStr = mc.getAttr(geo+'.'+attrName)
faceIdList = ast.literal_eval(faceIdStr)
faceList = [geo+'.f['+str(i)+']' for i in faceIdList]
try: mc.select(faceList)
except: mc.warning('Problem selecting face list! '+str(faceList))
# =================
# - Return Result -
# =================
return faceList
def checkNonReferencedInputShape(geo):
'''
Check if the input shape on the specified referenced geometry is a referenced node.
@param geo: Geometry to check referenced input shape on.
@type geo: str
'''
# ==========
# - Checks -
# ==========
# Check Geometry Exists
if not mc.objExists(geo):
raise Exception('Geometry "'+geo+'" does not exist!')
# Check Geometry is Referenced
if not glTools.utils.reference.isReferenced(geo):
raise Exception('Geometry "'+geo+'" is not referenced! No referenced shapes under nonReference parent...')
# Get Geometry Shapes
shapes = mc.listRelatives(geo,s=True,pa=True)
if not shapes:
raise Exception('No shapes found under geometry "'+geo+'"!')
if len(shapes) == 1:
print('Geometry "'+geo+'" has only one shape! Nothing to do here, skipping...')
return False
# Check for Referenced Shapes
refShapes = [shape for shape in shapes if glTools.utils.reference.isReferenced(shape)]
if not refShapes:
raise Exception('No referenced shapes found under geometry "'+geo+'"!')
# Get Output Shape
resultShape = mc.listRelatives(geo,s=True,ni=True,pa=True)
if not resultShape:
raise Exception('No non-intermediate shapes under geometry "'+geo+'"!')
if len(resultShape) != 1:
print('Multiple non-intermediate shapes! Checking first shape ("'+resultShape[0]+'") for input connections... ')
# Get Input Shape
inputShape = glTools.utils.shape.findInputShape(resultShape[0],recursive=True)
if not inputShape:
raise Exception('Unable to determine input shape for "'+resultShape[0]+'"!')
if inputShape == resultShape[0]:
if mc.listHistory(inputShape):
print('WARNING: Input shape is same as output shape for geometry "'+geo+'"! Check graph for cyclic dependancy...')
else:
print('Output shape "'+resultShape[0]+'" has no incoming connections! Nothing to do, skipping...')
return False
# Check Input Shape is Referenced
return not glTools.utils.reference.isReferenced(inputShape)
def fixNonReferencedInputShape(geo):
'''
Ensure the input shape on the specified referenced geometry is a referenced shape node.
@param geo: Geometry to fix referenced input shape on.
@type geo: str
'''
# ==========
# - Checks -
# ==========
# Check Geometry Exists
if not mc.objExists(geo):
raise Exception('Geometry "'+geo+'" does not exist!')
# Check Geometry is Referenced
if not glTools.utils.reference.isReferenced(geo):
raise Exception('Geometry "'+geo+'" is not referenced! No referenced shapes under nonReference parent...')
# Get Geometry Shapes
shapes = mc.listRelatives(geo,s=True,pa=True)
if not shapes:
raise Exception('No shapes found under geometry "'+geo+'"!')
if len(shapes) == 1:
print('Geometry "'+geo+'" has only one shape! Nothing to do here, skipping...')
return ''
# Check for Referenced Shapes
refShapes = [shape for shape in shapes if glTools.utils.reference.isReferenced(shape)]
if not refShapes:
raise Exception('No referenced shapes found under geometry "'+geo+'"!')
if len(refShapes) > 1:
print('Found multiple referenced shapes under geometry transform "'+geo+'"! Using first shape "'+refShapes[0]+'" for input connections...')
# Get Output Shape
resultShape = mc.listRelatives(geo,s=True,ni=True,pa=True)
if not resultShape:
raise Exception('No non-intermediate shapes found under geometry "'+geo+'"!')
if len(resultShape) != 1:
print('Found multiple non-intermediate shapes! Using first shape "'+resultShape[0]+'" for input connections...')
# Get Input Shape
inputShape = glTools.utils.shape.findInputShape(resultShape[0],recursive=True)
if not inputShape:
raise Exception('No input shape found for "'+resultShape[0]+'"!')
if inputShape == resultShape[0]:
if mc.listHistory(inputShape):
print('WARNING: Input shape is same as output shape for geometry "'+geo+'"! Check graph for cyclic dependancy...')
else:
print('Output shape "'+resultShape[0]+'" has no incoming connections! Nothing to do, skipping...')
return ''
# Check Input Shape is Referenced
if glTools.utils.reference.isReferenced(inputShape):
print('Input shape is referenced! Skipping...')
return ''
# =============================================
# - Replace Input Shape with Referenced Shape -
# =============================================
# Check Reference Shape is Output
if resultShape[0] == refShapes[0]:
# Swap Input/Output Node Conections
print('References shape is output (result) shape! Rearranging geometry graph...')
glTools.utils.connection.swap(inputShape,refShapes[0])
# Set Intermediate Object Status
mc.setAttr(inputShape+'.intermediateObject',0)
mc.setAttr(refShapes[0]+'.intermediateObject',1)
# Fix Shape Names
if 'Orig' in inputShape:
if 'Deformed' in inputShape:
inputShape = mc.rename(inputShape,inputShape.replace('Orig',''))
else:
inputShape = mc.rename(inputShape,inputShape.replace('Orig','Deformed'))
else:
# Check inMesh Connections to Referenced Shape
if mc.listConnections(refShapes[0]+'.inMesh',s=True,d=False):
# Swap Input/Output Node Conections
glTools.utils.connection.swap(inputShape,refShapes[0])
else:
# Replace Input/Output Node Conections
glTools.utils.connection.replace(inputShape,refShapes[0],inputs=True,outputs=True)
# =================
# - Return Result -
# =================
return refShapes[0]
def cleanUnusedIntermediateShapes(geo):
'''
'''
# Check Geometry Exists
if not mc.objExists(geo):
raise Exception('Geometry "'+geo+'" does not exist!')
# Get Geometry Shapes
shapes = mc.listRelatives(geo,s=True,pa=True)
if not shapes:
raise Exception('No shapes found under geometry "'+geo+'"!')
if len(shapes) == 1:
print('Geometry "'+geo+'" has only one shape! Nothing to do here, skipping...')
return None
# Get Output Shape
resultShapes = mc.listRelatives(geo,s=True,ni=True,pa=True)
if not resultShapes:
raise Exception('No non-intermediate shapes found under geometry "'+geo+'"!')
if len(resultShapes) != 1:
print('Found multiple non-intermediate shapes!')
# For Each Output Shape
for resultShape in resultShapes:
# Get Input Shape
inputShape = glTools.utils.shape.findInputShape(resultShape,recursive=True)
if not inputShape:
print('No input shape found for "'+resultShape+'"! Skipping')
continue
if inputShape == resultShape:
if mc.listHistory(inputShape):
print('WARNING: Input shape is same as output shape for geometry "'+geo+'"! Check graph for cyclic dependancy...')
else:
print('Output shape "'+resultShape+'" has no incoming connections! Nothing to do, skipping...')
continue
# Replace Unused Intermediate Shapes Connections
intermediateShape = glTools.utils.shape.findInputShape(resultShape)
while(intermediateShape != inputShape):
# Store Next Intermediate Shape
intShape = intermediateShape
intermediateShape = glTools.utils.shape.findInputShape(intShape)
# MESH
if mc.objectType(intShape) == 'mesh':
inMeshConn = mc.listConnections(intShape+'.inMesh',s=True,d=False,p=True)
if inMeshConn:
outMeshConn = mc.listConnections([intShape+'.outMesh',intShape+'.worldMesh'],s=False,d=True,p=True) or []
for outConn in outMeshConn: mc.connectAttr(inMeshConn[0],outConn,f=True)
# NURBS
elif mc.objectType(intShape) in ['nurbsCurve','nurbsSurface']:
inNurbConn = mc.listConnections(intShape+'.create',s=True,d=False,p=True)
if inNurbConn:
outNurbConn = mc.listConnections([intShape+'.local',intShape+'.worldSpace'],s=False,d=True,p=True) or []
for outConn in outNurbConn: mc.connectAttr(inNurbConn[0],outConn,f=True)
# UNSUPPORTED
else:
print('Unsupported shape type! ('+mc.objectType(intShape)+')! Skipping geometry...')
break
# Replace Generic Connections
#glTools.utils.connection.replace(intShape,inputShape,inputs=True,outputs=True)
# Delete Unused Intermediate Shape
mc.delete(intShape)
# Print Shape Result
print('# DELETED Intermediate Shape: '+intShape)
|
StarcoderdataPython
|
3222664
|
<filename>OperatorsPrecedence.py
#@Author <NAME>
a = 20
b = 10
c = 15
d = 5
print ("a:%d b:%d c:%d d:%d" % (a,b,c,d ))
e = (a + b) * c / d #( 30 * 15 ) / 5
print ("Value of (a + b) * c / d is ", e)
e = ((a + b) * c) / d # (30 * 15 ) / 5
print ("Value of ((a + b) * c) / d is ", e)
e = (a + b) * (c / d) # (30) * (15/5)
print ("Value of (a + b) * (c / d) is ", e)
e = a + (b * c) / d # 20 + (150/5)
print ("Value of a + (b * c) / d is ", e)
|
StarcoderdataPython
|
96509
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 09:26:24 2019
@author: <NAME>
"""
import argparse
from pathlib import Path
import rpg_lib
import numpy as np
def main():
parser = argparse.ArgumentParser(prog='FRM4RADAR')
parser.add_argument(
"path",
type=Path,
default=Path(__file__).absolute().parent / "data",
help="Path to the data directory",
)
parser.add_argument(
"--proc",
type=bool,
default = False ,
help="Path is for processed data or raw data.(default False)",
)
parser.add_argument(
"--time",
type=int,
default = 3600 ,
help="Time to average at in seconds.(default 3600s)",
)
parser.add_argument(
"--height",
type=int,
default = 500 ,
help="Height to average atin meters.(default 500 m)",
)
p = parser.parse_args()
print("Looking for files starting from this path:",p.path)
print("Found files will be treated as processed netcdCDF4:",p.proc)
print("Time at which the averaging will be done:",p.time)
print("Height at which the averaging will be done:",p.height)
daily_data = rpg_lib.read_folders(p.path,p.proc)
if not p.proc:
for data in daily_data:
if data:
daily_reflectivity_log = 10 * np.log10(data[2])
mask = rpg_lib.filter_rpg(daily_reflectivity_log)
reverse_mask = np.logical_not(mask)
clean_data = daily_reflectivity_log * mask / mask
noise = daily_reflectivity_log * reverse_mask / reverse_mask
daily_time = data[0]
height_range = data[1]
save_data = {"data" : clean_data,
"mask" : mask,
"noise" : noise,
"time" : daily_time,
"height": height_range}
save_name = data[3]
### Saving data
rpg_lib.save_netcdf(save_name,save_data)
clean_data = data[2] * mask / mask
averaged_time,averaged_range,avearaged_data = rpg_lib.average(daily_time,height_range,clean_data,height_bin_size=p.height,time_bin_size=p.time)
averaged_time,averaged_range,avearaged_noise = rpg_lib.average(daily_time,height_range,mask,height_bin_size=p.height,time_bin_size=p.time)
avearaged_mask = avearaged_data * 0 + 1
avearaged_mask[np.where(np.isnan(avearaged_mask))] = 0
save_data_avr = {"data" : 10 * np.log10(avearaged_data),
"time" : averaged_time,
"height": averaged_range,
"mask" : avearaged_mask,
"noise" : avearaged_noise,}
rpg_lib.save_netcdf(save_name+"_averaged",save_data_avr)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4841280
|
import unittest
import os
from reactivexcomponent.configuration.api_configuration import APIConfiguration
class TestConfiguration(unittest.TestCase):
def setUp(self):
self.configuration = APIConfiguration(
os.path.join("tests", "unit", "data", "WebSocket_NewDevinetteApi_test.xcApi"))
def test_get_component_code(self):
"""get_component_code should return the right code given an existing component name"""
code = self.configuration.get_component_code('Devinette')
correct_code = -725052640
self.assertEqual(code, correct_code)
"""get_component_code throws exception when using an unkonwn component name"""
with self.assertRaises(Exception):
self.configuration.get_component_code('UnkonwnComponent')
def test_get_state_machine_code(self):
"""get_state_machine_code should return the right code given existing component name and statemachine name"""
code = self.configuration.get_state_machine_code(
'Devinette', 'Devinette')
correct_code = -725052640
self.assertEqual(code, correct_code)
code = self.configuration.get_state_machine_code(
'Devinette', 'DevinetteStatus')
correct_code = 2089109814
self.assertEqual(code, correct_code)
"""get_state_machine_code throws exception when using an unkonwn state machine name"""
with self.assertRaises(Exception):
self.configuration.get_state_machine_code(
'Devinette', 'UnkownStateMachine')
def test_get_publisher_details(self):
"""get_publisher_details should return the right publisher details given existing component and stateMachine codes"""
correct_publisher_details = {
'eventCode': 8, 'routingKey': 'input.1_0.microservice1.Devinette.DevinetteChecker'}
publisher_details = self.configuration.get_publisher_details(
-725052640, -2027871621, 'XComponent.Devinette.UserObject.CheckWord')
self.assertEqual(publisher_details, correct_publisher_details)
"""get_publisher_details should throw exeption when using an unknown stateMachine name"""
component_code = 101
state_machine_code = 102
message_type = 'type'
with self.assertRaises(Exception):
self.configuration.get_publisher_details(
component_code, state_machine_code, message_type)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3304827
|
<reponame>Crossing-Minds/reco-api-benchmarks
from .amazonrecoapi import AmazonRecoApi
|
StarcoderdataPython
|
4808245
|
import json
import logging
import platform
import re
from io import BytesIO
from subprocess import check_output, CalledProcessError
import time
import os
import yaml
from binstar_client import errors
from binstar_client.utils import get_server_api, store_token
from binstar_client.utils.notebook.inflection import parameterize
log = logging.getLogger(__name__)
class Uploader(object):
_package = None
_release = None
_project = None
def __init__(self, name, content):
self.aserver_api = get_server_api()
self.name = parameterize(name)
self.content = content
self.summary = self.metadata.get("summary", "Jupyter Notebook")
self.username = self.metadata.get("organization", None)
if self.metadata.get("attach-environment", None):
self.env_name = self.metadata.get("environment", None)
if self.env_name is None:
ksname = self.ksname
if ksname in ["python2", "python3"]:
# we are dealing with the native kernel, so let's find out
# the name of the env where that kernel lives
self.env_name = self.default_env()
else:
# ksname comes in the form conda-env-name-[py/r] or
# conda-root-[py/r] so split them and catch the name
self.env_name = ksname.split("-")[-2]
else:
self.env_name = None
if self.username is None:
self.username = self.aserver_api.user()['login']
def upload(self, force=False):
"""
Uploads a notebook
:param force: True/False
:returns {}
"""
self.package and self.release
try:
return self.aserver_api.upload(
self.username,
self.project,
self.version,
self.name,
self.content_io(),
"ipynb")
except errors.Conflict:
if force:
self.remove()
return self.upload()
else:
msg = "Conflict: {}/{} already exist".format(
self.project, self.version)
raise errors.BinstarError(msg)
def default_env(self):
conda_info = self._exec(['conda', 'info', '--json'])
if not conda_info:
return None
conda_info = json.loads(conda_info.decode("utf-8"))
if conda_info["default_prefix"] == conda_info["root_prefix"]:
return "root"
return os.path.basename(conda_info["default_prefix"])
def attach_env(self, content):
""" given an environment name, update the content with a normalized
`conda env import`-compatible environment
"""
env = yaml.load(
self._exec(['conda', 'env', 'export',
'-n', self.env_name,
'--no-builds'])
)
# this is almost certainly not useful to anybody else
env.pop('prefix')
# this is currently a mess
channels = env.get("channels", [])
dependencies = []
pip_deps = []
# currently seeing weird stuff
for dep in env.get("dependencies", []):
if isinstance(dep, dict):
if "pip" in dep:
pip_deps = dep["pip"]
else:
channel = None
if "::" in dep:
channel, dep = dep.split("::")
if channel is not None:
channels.append(channel)
dependencies.append(dep)
# i guess no dependencies could happen
env["dependencies"] = sorted(set(dependencies or []))
# getting lots of extra pip deps... this might not always be needed
if pip_deps:
unique_pip_deps = []
conda_deps = [cdep.split("=")[0].replace("_", "-")
for cdep in dependencies]
for dep in pip_deps:
# local files are not reproducible
if "(" in dep:
continue
pip_dep = dep.split("=")[0]
if pip_dep.replace("_", "-") not in conda_deps:
unique_pip_deps.append(pip_dep)
if unique_pip_deps:
env["dependencies"].append({"pip": sorted(unique_pip_deps)})
# only add channels if you got some
if channels:
env["channels"] = channels
# avoid foot-shooting here
if env.get("name") == "root":
env["name"] = "notebook-{}".format(self.name)
# whew, we made it! it would be great to have a verify step
content['metadata']['environment'] = env
return content
def _exec(self, cmd):
try:
output = check_output(cmd)
except CalledProcessError as e:
log.error(e)
output = {}
return output
def content_io(self):
_notebook = BytesIO()
if self.env_name is not None:
self.content = self.attach_env(self.content)
_notebook.write(json.dumps(self.content).encode())
_notebook.seek(0)
return _notebook
def remove(self):
return self.aserver_api.remove_dist(self, self.username, self.project,
self.version,
basename=self.notebook)
@property
def version(self):
return time.strftime('%Y.%m.%d.%H%M')
@property
def project(self):
if self._project is None:
return re.sub('\-ipynb$', '', self.name)
else:
return self._project
@property
def ksname(self):
ks = self.content.get("metadata", {}).get("kernelspec", {})
return ks.get("name", None)
@property
def metadata(self):
return self.content.get("metadata", {}).get("anaconda-cloud", {})
@property
def notebook_attrs(self):
attrs = {}
# thumbnails should be coming back with a proper data URI starting with
# "data:image/png;base64,"... but the uploader/template will add its
# own later. Just strip it, or fail if it's not properly formatted
try:
attrs['thumbnail'] = self.metadata["thumbnail"].split(",")[1]
except Exception:
pass
return attrs
@property
def package(self):
if self._package is None:
try:
self._package = self.aserver_api.package(
self.username,
self.project)
except errors.NotFound:
self._package = self.aserver_api.add_package(
self.username,
self.project,
summary=self.summary,
attrs=self.notebook_attrs)
return self._package
@property
def release(self):
if self._release is None:
try:
self._release = self.aserver_api.release(
self.username,
self.project,
self.version)
except errors.NotFound:
self._release = self.aserver_api.add_release(
self.username,
self.project,
self.version,
None,
None,
None)
return self._release
class AccountManager(object):
def __init__(self):
self._user = None
self.aserver_api = get_server_api()
def is_logged_in(self):
return self.user is not None
def login(self, username, password):
fake_args = FakeArgs(username, password)
token = self.get_token(fake_args)
store_token(token, fake_args)
def get_token(self, args):
return self.aserver_api.authenticate(
args.login_username,
args.login_password,
'https://api.anaconda.org',
created_with='nb_anacondacloud', fail_if_already_exists=True,
hostname=platform.node()
)
@property
def user(self):
if self._user is None:
try:
self._user = self.aserver_api.user()
except errors.Unauthorized:
self._user = None
return self._user
@property
def organizations(self):
output = []
for org in self.aserver_api.user_orgs():
if 'name' in org:
output.append({'name': org['name'], 'login': org['login']})
else:
output.append({'name': org['login'], 'login': org['login']})
return output
class FakeArgs(object):
def __init__(self, username, password):
self.token = None
self.site = None
self.hostname = None
self.login_username = username
self.login_password = password
|
StarcoderdataPython
|
1722973
|
# =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
import torch
from nemo import logging
__all__ = ['eval_iter_callback', 'eval_epochs_done_callback']
GLOBAL_KEYS = [
"loss",
"per_example_loss",
"beam_results",
"src_ids",
"src_first_tokens",
"pred",
"labels",
"labels_mask",
]
def eval_iter_callback(tensors, global_vars, tokenizer):
for key in GLOBAL_KEYS:
if key not in global_vars.keys():
global_vars[key] = []
for kv, v in tensors.items():
if "crossentropylossnm1" in kv:
for per_example_loss in v:
pel = per_example_loss.cpu().numpy().tolist()
global_vars["per_example_loss"].extend(pel)
if "logits" in kv:
for pred in v:
p = torch.argmax(pred, dim=-1).int().cpu().numpy().tolist()
global_vars["pred"].extend(p)
if "labels~" in kv:
for label in v:
l = label.cpu().numpy().tolist()
global_vars["labels"].extend(l)
if "labels_mask" in kv:
for mask in v:
m = mask.cpu().numpy().tolist()
global_vars["labels_mask"].extend(m)
def eval_epochs_done_callback(global_vars, validation_dataset=None):
losses = np.array(global_vars["per_example_loss"])
eval_loss = np.mean(losses)
global_vars["per_example_loss"] = []
labels = np.array([np.array(n) for n in global_vars["labels"]])
predictions = np.array([np.array(n) for n in global_vars["pred"]])
labels_mask = np.array([np.array(n) for n in global_vars["labels_mask"]])
for key in GLOBAL_KEYS:
global_vars[key] = []
lor = np.logical_or(labels == predictions, ~labels_mask.astype(np.bool))
accuracy = np.mean(np.all(lor, axis=1).astype(np.float32))
logging.info("------------------------------------------------------------")
logging.info("Validation loss: {0}".format(np.round(eval_loss, 3)))
logging.info("Sentence level accuracy: {0}".format(accuracy))
logging.info("------------------------------------------------------------")
return dict({"eval_loss": eval_loss})
|
StarcoderdataPython
|
3391124
|
<reponame>sirosen/temp-cli-test<gh_stars>10-100
from typing import Tuple
import click
from globus_cli.login_manager import LoginManager
from globus_cli.parsing import command, no_local_server_option
@command(
"consent",
short_help="Update your session with specific consents",
disable_options=["format", "map_http_status"],
)
@no_local_server_option
@click.argument("SCOPES", nargs=-1, required=True)
def session_consent(scopes: Tuple[str], no_local_server: bool) -> None:
"""
Update your current CLI auth session by authenticating with a specific scope or set
of scopes.
This command is necessary when the CLI needs access to resources which require the
user to explicitly consent to access.
"""
manager = LoginManager()
manager.run_login_flow(
no_local_server=no_local_server,
local_server_message=(
"You are running 'globus session consent', "
"which should automatically open a browser window for you to "
"authenticate with specific identities.\n"
"If this fails or you experience difficulty, try "
"'globus session consent --no-local-server'"
"\n---"
),
epilog="\nYou have successfully updated your CLI session.\n",
scopes=list(scopes),
)
|
StarcoderdataPython
|
3316346
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-18 10:17
from __future__ import unicode_literals
from django.db import migrations
def store_last_status(apps, schema_editor):
VenueRequest = apps.get_model('froide_food', 'VenueRequest')
VenueRequestItem = apps.get_model('froide_food', 'VenueRequestItem')
for venue in VenueRequest.objects.all():
vris = VenueRequestItem.objects.filter(venue=venue).order_by('-timestamp')
if vris:
vri = vris[0]
venue.last_request = vri.timestamp
if vri.foirequest:
venue.last_status = vri.foirequest.status
venue.save()
class Migration(migrations.Migration):
dependencies = [
('froide_food', '0006_auto_20180612_1231'),
]
operations = [
migrations.RunPython(store_last_status),
]
|
StarcoderdataPython
|
50215
|
<reponame>incident-reporter/incident-reporter
import datetime
from typing import Optional
import discord
from discord.ext import commands
from ..storage import Storage
from ..util import is_staff
class Config(commands.Cog):
@commands.command(help='Change the prefix of the bot.')
@commands.has_permissions(manage_guild=True)
async def prefix(self, ctx: commands.Context, *, prefix: Optional[str]):
if prefix: # new prefix
if len(prefix) > 32:
await ctx.send(embed=discord.Embed(
description='Sorry, that prefix is too long. '
'It can not be longer than 32 characters.',
color=ctx.bot.colorsg['failure']
).set_footer(
text=f'Requested by {ctx.author}',
icon_url=ctx.author.avatar_url
))
else:
stor = ctx.bot.get_storage(ctx.guild)
await stor.set('prefix', prefix)
await ctx.send(embed=discord.Embed(
description=f'Prefix updated :ok_hand:\n'
f'It is now: {prefix}',
color=ctx.bot.colorsg['success']
).set_footer(
text=f'Requested by {ctx.author}',
icon_url=ctx.author.avatar_url
))
else:
prefix = (await ctx.bot.get_command_prefix(ctx.bot, ctx.message))
await ctx.send(embed=discord.Embed(
description=f'My current prefix is: {prefix[0]}',
color=ctx.bot.colorsg['info']
).set_footer(
text=f'Requested by {ctx.author}',
icon_url=ctx.author.avatar_url
))
@commands.command(help='Change the timezone displayed in incidents')
@is_staff()
async def timezone(self, ctx: commands.Context, tz: str = None):
storage = ctx.bot.get_storage(ctx.guild) # type: Storage
if tz is None:
offset = await storage.get_float('timezone', default=0)
tzinfo = datetime.timezone(datetime.timedelta(seconds=offset))
time = ctx.message.created_at.replace(tzinfo=tzinfo)
str = time.strftime('UTC%z')
return await ctx.send(embed=discord.Embed(
description=f'Current timezone is: {str}',
color=ctx.bot.colorsg['info']
))
try:
time = datetime.datetime.strptime(tz, 'UTC%z')
except ValueError:
return await ctx.send(embed=discord.Embed(
description=(
f'Invalid timezone: {tz!r}\n\n'
f'- Make sure the timezone starts with UTC\n'
f'- Make sure you don\'t forget the + or -\n\n'
f'Examples:\n'
f'- `UTC+0100` CET\n'
f'- `UTC-0700` PDT\n'
),
color=ctx.bot.colorsg['failure']
))
offset = time.tzinfo.utcoffset(time).total_seconds()
await storage.set('timezone', offset)
str = time.strftime('UTC%z')
return await ctx.send(embed=discord.Embed(
description=f'Timezone set to: {str}',
color=ctx.bot.colorsg['success']
))
def setup(bot: commands.Bot):
bot.add_cog(Config())
|
StarcoderdataPython
|
157993
|
<filename>OPENAI/NER.py
import openai
openai.api_key = "<KEY>"
restart_sequence = "\n"
primer = open("primer.txt").read()
labels = ["person", "organisation", "location"]
labels = "".join([i + ", " for i in labels])
sentences = [
"Jacco is studying at Utrecht University.",
"Dan is an old friend from my time at Princeton.",
"My back problems started to come back."
]
for sentence in sentences:
input_text = "[" + labels + "]:" + sentence
response = openai.Completion.create(
engine="curie",
prompt=primer + input_text,
max_tokens=256,
temperature=0.4,
logprobs=4,
stop="]",
)
# print(response)
string = response["choices"][0]["text"].strip() + "]"
# print("##################" + string + "##################")
|
StarcoderdataPython
|
3364846
|
'''
Functional tests for cassandra timeseries
'''
import time
import datetime
import os
import cql
from . import helpers
from .helpers import unittest, os, Timeseries
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraApiTest(helpers.ApiHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraApiTest,self).setUp()
def test_url_parse(self):
assert_equals( 'CassandraSeries',
Timeseries( 'cql://localhost', type='series' ).__class__.__name__ )
# Not running gregorian tests because they run in the "far future" where long
# TTLs are not supported.
#@<EMAIL>Unless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
#class CassandraGregorianTest(helpers.GregorianHelper):
#def setUp(self):
#self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
#super(CassandraGregorianTest,self).setUp()
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraSeriesTest(helpers.SeriesHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraSeriesTest,self).setUp()
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraHistogramTest(helpers.HistogramHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraHistogramTest,self).setUp()
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraCountTest(helpers.CountHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraCountTest,self).setUp()
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraGaugeTest(helpers.GaugeHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraGaugeTest,self).setUp()
@unittest.skipUnless( os.environ.get('TEST_CASSANDRA','true').lower()=='true', 'skipping cassandra' )
class CassandraSetTest(helpers.SetHelper):
def setUp(self):
self.client = cql.connect('localhost', 9160, os.environ.get('CASSANDRA_KEYSPACE','kairos'), cql_version='3.0.0')
super(CassandraSetTest,self).setUp()
|
StarcoderdataPython
|
1761757
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
from azlmbr.entity import EntityId
from azlmbr.math import Vector3
from editor_python_test_tools.editor_entity_utils import EditorEntity
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
import azlmbr.bus as bus
import azlmbr.components as components
import azlmbr.entity as entity
import azlmbr.legacy.general as general
def get_prefab_file_name(prefab_name):
return prefab_name + ".prefab"
def get_prefab_file_path(prefab_name):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), get_prefab_file_name(prefab_name))
def find_entities_by_name(entity_name):
searchFilter = entity.SearchFilter()
searchFilter.names = [entity_name]
return entity.SearchBus(bus.Broadcast, 'SearchEntities', searchFilter)
def find_entity_by_unique_name(entity_name):
unique_name_entity_found_result = (
"Entity with a unique name found",
"Entity with a unique name *not* found")
entities = find_entities_by_name(entity_name)
unique_name_entity_found = len(entities) == 1
Report.result(unique_name_entity_found_result, unique_name_entity_found)
if unique_name_entity_found:
return entities[0]
else:
Report.info(f"{len(entities)} entities with name '{entity_name}' found")
return EntityId()
def check_entity_at_position(entity_id, expected_entity_position):
entity_at_expected_position_result = (
"entity is at expected position",
"entity is *not* at expected position")
actual_entity_position = components.TransformBus(bus.Event, "GetWorldTranslation", entity_id)
is_at_position = actual_entity_position.IsClose(expected_entity_position)
Report.result(entity_at_expected_position_result, is_at_position)
if not is_at_position:
Report.info(f"Entity '{entity_id.ToString()}'\'s expected position: {expected_entity_position.ToString()}, actual position: {actual_entity_position.ToString()}")
return is_at_position
def check_entity_children_count(entity_id, expected_children_count):
entity_children_count_matched_result = (
"Entity with a unique name found",
"Entity with a unique name *not* found")
entity = EditorEntity(entity_id)
children_entity_ids = entity.get_children_ids()
entity_children_count_matched = len(children_entity_ids) == expected_children_count
Report.result(entity_children_count_matched_result, entity_children_count_matched)
if not entity_children_count_matched:
Report.info(f"Entity '{entity_id.ToString()}' actual children count: {len(children_entity_ids)}. Expected children count: {expected_children_count}")
return entity_children_count_matched
def get_children_ids_by_name(entity_id, entity_name):
entity = EditorEntity(entity_id)
children_entity_ids = entity.get_children_ids()
result = []
for child_entity_id in children_entity_ids:
child_entity = EditorEntity(child_entity_id)
child_entity_name = child_entity.get_name()
if child_entity_name == entity_name:
result.append(child_entity_id)
return result
def wait_for_propagation():
general.idle_wait_frames(1)
def open_base_tests_level():
helper.init_idle()
helper.open_level("Prefab", "Base")
|
StarcoderdataPython
|
1630811
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.views.generic.list_detail import object_list
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from models import Post
def posts(request, template='blog/posts.html'):
return object_list(request,
Post.objects.all(),
template_name=template,
paginate_by=5
)
def post(request, slug, template='blog/post.html'):
post = get_object_or_404(Post, slug=slug)
return render_to_response(template, {'post':post}, RequestContext(request))
|
StarcoderdataPython
|
3307959
|
<filename>pywubi/constants.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from enum import IntEnum, unique
import os
import re
from pywubi import wubi_dict
# 单字拼音库
WUBI_86_DICT = wubi_dict.wubi_86_dict
# 利用环境变量控制不做copy操作, 以减少内存使用
if not os.environ.get('PYWUBI_NO_DICT_COPY'):
WUBI_86_DICT = WUBI_86_DICT.copy()
# 能匹配的中文编码
RE_HANS = re.compile(
r'^(?:['
r'\u3007' # 〇
r'\u4e00-\ufa29'
r'])+$'
)
@unique
class Style(IntEnum):
"""编码"""
# 86 版编码
WUBI_86 = 1
# 96 版编码
WUBI_96 = 2
STYLE_WUBI_86 = Style.WUBI_86
STYLE_WUBI_96 = Style.WUBI_96
|
StarcoderdataPython
|
4841967
|
IMAGE_CODE_EXPIRE = 300
|
StarcoderdataPython
|
1662240
|
import subprocess
import os
import glob
import shutil
import sys
import optparse
import tempfile
TMP_OUT = tempfile.mkdtemp()
SECRETS = "secrets"
DISTINGUISHED_NAME = {"domain": "example.com",
"C": "US",
"ST": "Maryland",
"L": "Baltimore",
"O": "Foobars of the World",
"OU": "[let's generate certs]",
"CN": "example.com"}
def get_cnfs():
OPENSSL_CA_CNF = '''
HOME = .
RANDFILE = $ENV::HOME/.rnd
####################################################################
[ ca ]
default_ca = CA_default # The default ca section
[ CA_default ]
default_days = 1000 # how long to certify for
default_crl_days = 30 # how long before next CRL
default_md = sha256 # use public key default MD
preserve = no # keep passed DN ordering
x509_extensions = ca_extensions # The extensions to add to the cert
email_in_dn = no # Don't concat the email in the DN
copy_extensions = copy # Required to copy SANs from CSR to cert
####################################################################
[ req ]
default_bits = 4096
default_keyfile = {0}/cakey.pem
distinguished_name = ca_distinguished_name
x509_extensions = ca_extensions
string_mask = utf8only
####################################################################
[ ca_distinguished_name ]
countryName = Country Name (2 letter code)
stateOrProvinceName = State or Province Name (full name)
localityName = Locality Name (eg, city)
organizationName = Organization Name (eg, company)
organizationalUnitName = Organizational Unit (eg, division)
commonName = Common Name (e.g. server FQDN or YOUR name)
####################################################################
[ ca_extensions ]
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always, issuer
basicConstraints = critical, CA:true
keyUsage = keyCertSign, cRLSign
'''.format(TMP_OUT)
OPENSSL_CA_EXT_CNF = '''
HOME = .
RANDFILE = $ENV::HOME/.rnd
####################################################################
[ ca ]
default_ca = CA_default # The default ca section
[ CA_default ]
default_days = 1000 # how long to certify for
default_crl_days = 30 # how long before next CRL
default_md = sha256 # use public key default MD
preserve = no # keep passed DN ordering
x509_extensions = ca_extensions # The extensions to add to the cert
email_in_dn = no # Don't concat the email in the DN
copy_extensions = copy # Required to copy SANs from CSR to cert
certificate = {0}/cacert.pem # The CA certifcate
private_key = {0}/cakey.pem # The CA private key
new_certs_dir = {0} # Location for new certs
database = {0}/index.txt # Database index file
serial = {0}/serial.txt # The current serial number
unique_subject = no # Set to 'no' to allow creation of
# several certificates with same subject.
####################################################################
[ req ]
default_bits = 4096
default_keyfile = {0}/cakey.pem
distinguished_name = ca_distinguished_name
x509_extensions = ca_extensions
string_mask = utf8only
####################################################################
[ ca_distinguished_name ]
countryName = Country Name (2 letter code)
stateOrProvinceName = State or Province Name (full name)
localityName = Locality Name (eg, city)
organizationName = Organization Name (eg, company)
organizationalUnitName = Organizational Unit (eg, division)
commonName = Common Name (e.g. server FQDN or YOUR name)
####################################################################
[ ca_extensions ]
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always, issuer
basicConstraints = critical, CA:true
keyUsage = keyCertSign, cRLSign
####################################################################
[ signing_policy ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
####################################################################
[ signing_req ]
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
'''.format(TMP_OUT)
OPENSSL_SERVER_CNF = '''
HOME = .
RANDFILE = $ENV::HOME/.rnd
####################################################################
[ req ]
default_bits = 2048
default_keyfile = {0}/{1}.key
distinguished_name = server_distinguished_name
req_extensions = server_req_extensions
string_mask = utf8only
####################################################################
[ server_distinguished_name ]
countryName = Country Name (2 letter code)
stateOrProvinceName = State or Province Name (full name)
localityName = Locality Name (eg, city)
organizationName = Organization Name (eg, company)
commonName = Common Name (e.g. server FQDN or YOUR name)
####################################################################
[ server_req_extensions ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
subjectAltName = @alternate_names
nsComment = "OpenSSL Generated Certificate"
####################################################################
[ alternate_names ]
DNS.1 = {1}
DNS.2 = *.{1}
'''.format(SECRETS, DISTINGUISHED_NAME['domain'])
return [['OPENSSL_CA_CNF', OPENSSL_CA_CNF],
['OPENSSL_CA_EXT_CNF', OPENSSL_CA_EXT_CNF],
['OPENSSL_SERVER_CNF', OPENSSL_SERVER_CNF]]
def prepare_files():
if not os.path.exists(SECRETS):
os.makedirs(SECRETS)
elif not os.path.isdir(SECRETS):
m = "Check if the paths for the output are \
colliding with existing files."
sys.exit(m)
map(os.remove, glob.glob("{}/*".format(TMP_OUT)))
with open("{}/index.txt".format(TMP_OUT), "w") as f:
f.write("")
with open('{}/serial.txt'.format(TMP_OUT), 'w') as f:
f.write('01')
for cnf in get_cnfs():
with open('{}/{}'.format(TMP_OUT, cnf[0]), 'w') as f:
f.write(cnf[1])
def generate_certs():
prepare_files()
subprocess.call(['openssl', 'req',
'-x509',
'-config', '{}/OPENSSL_CA_CNF'.format(TMP_OUT),
'-subj', '/C={C}/ST={ST}/L={L}/O={O}/OU={OU}/CN={CN}'
.format(**DISTINGUISHED_NAME),
'-newkey', 'rsa:4096', '-sha256',
'-nodes',
'-out', '{}/cacert.pem'.format(TMP_OUT),
'-outform', 'PEM'])
subprocess.call(['openssl', 'req',
'-config', '{}/OPENSSL_SERVER_CNF'.format(TMP_OUT),
'-newkey', 'rsa:4096', '-sha256',
'-nodes',
'-subj', '/C={C}/ST={ST}/L={L}/O={O}/OU={OU}/CN={CN}'
.format(**DISTINGUISHED_NAME),
'-out', '{}/servercert.csr'.format(TMP_OUT),
'-outform', 'PEM'])
yes = subprocess.Popen('yes',
stdout=subprocess.PIPE)
c = subprocess.Popen(['openssl', 'ca',
'-config', '{}/OPENSSL_CA_EXT_CNF'.format(TMP_OUT),
'-policy', 'signing_policy',
'-extensions', 'signing_req',
'-out', '{}/{}.crt'.format(SECRETS, DISTINGUISHED_NAME['domain']),
'-infiles', '{}/servercert.csr'.format(TMP_OUT)],
stdin=yes.stdout,
stdout=subprocess.PIPE)
yes.kill()
c.communicate()
shutil.copyfile('{}/cacert.pem'.format(TMP_OUT),
'{}/{}_cacert.pem'.format(SECRETS, DISTINGUISHED_NAME['domain']))
def remove_files():
map(os.remove, glob.glob("{}/*".format(TMP_OUT)))
os.rmdir(TMP_OUT)
[os.remove(f) for f in glob.glob("{}/*".format(SECRETS))
if f not in ["{}/{}.key".format(SECRETS, DISTINGUISHED_NAME['domain']),
"{}/{}.crt".format(SECRETS, DISTINGUISHED_NAME['domain']),
"{}/{}_cacert.pem".format(SECRETS, DISTINGUISHED_NAME['domain'])]]
def print_info():
print("Domain: {}".format(DISTINGUISHED_NAME['domain']))
print("Domain info:")
print(" Common name : {}".format(DISTINGUISHED_NAME['CN']))
print(" Organization : {}".format(DISTINGUISHED_NAME['O']))
print(" Organization unit: {}".format(DISTINGUISHED_NAME['OU']))
print(" Country : {}".format(DISTINGUISHED_NAME['C']))
print(" State or province: {}".format(DISTINGUISHED_NAME['ST']))
print(" Location or city : {}".format(DISTINGUISHED_NAME['L']))
print("\nIf you want to add info via command line args first run it with --help flag.")
def main():
generate_certs()
remove_files()
if __name__ == '__main__':
p = optparse.OptionParser()
p.add_option('-d', '--domain-name',
help="Main domain (e.g. example.com)")
p.add_option('-c','--country',
help="Country Name (2 letter code)")
p.add_option('-s', '--state',
help="State or Province Name (full name)")
p.add_option('-l', '--location',
help="Locality Name (eg, city)")
p.add_option('-o', '--organization',
help="Organization Name (eg, company)")
p.add_option('-u', '--organization-unit',
help="Organizational Unit (eg, division)")
p.add_option('-n', '--common-name', help="Common Name (e.g. server FQDN or YOUR name)")
(options, remainder) = p.parse_args()
if len(sys.argv) > 1:
if options.domain_name is None:
print("You should provide at least a main domain name (if using flags).")
print("If there is no flags, info written in {} is used. Change it if you prefer hardcoding it into file.".format(sys.argv[0]))
print("Or use flags:\n")
p.print_help()
sys.exit(1)
DISTINGUISHED_NAME['domain'] = options.domain_name
DISTINGUISHED_NAME['C'] = options.country or ""
DISTINGUISHED_NAME['ST'] = options.state or ""
DISTINGUISHED_NAME['L'] = options.location or ""
DISTINGUISHED_NAME['O'] = options.organization or ""
DISTINGUISHED_NAME['OU'] = options.organization_unit or ""
DISTINGUISHED_NAME['CN'] = options.common_name or options.domain_name
print_info()
else:
print("Generating certs by using info written in {}:".format(sys.argv[0]))
print_info()
main()
|
StarcoderdataPython
|
3280320
|
<filename>source/visualization/regression_bias_variance.py
import numpy as np
import matplotlib.pyplot as plt
num_points = 50
f = lambda x: np.sin(x)
x = np.linspace(-10, 10, num_points)
y = f(x) + np.random.normal(0,0.5, len(x)) # function of the curve with some normal noise added
plt.figure(figsize=(20,10))
plt.subplot(3,1,1)
data, = plt.plot(x, y, '.', markersize=10)
good, = plt.plot(x, f(x), 'g')
plt.legend([data, good], ["Noisy Data", "Denoised Data"])
plt.subplot(3,1,2)
data, = plt.plot(x, y, '.', markersize=10)
high_bias, = plt.plot(x, np.zeros_like(x), 'b')
plt.legend([data, high_bias], ["Noisy Data", "High bias"])
plt.subplot(3,1,3)
data, = plt.plot(x, y, '.', markersize=10)
high_variance, = plt.plot(x, y, 'r')
plt.legend([data, high_variance], ["Noisy Data", "High variance"])
plt.show()
|
StarcoderdataPython
|
1728318
|
#!/usr/bin/env python2
"""install.py
Webware for Python installer
FUTURE
* Look for an install.py in each component directory and run it
(there's not a strong need right now).
* Use distutils or setuptools instead of our own plugin concept.
"""
import os
import sys
from glob import glob
from operator import itemgetter
from MiscUtils import StringIO
from MiscUtils.PropertiesObject import PropertiesObject
class OutputCatcher(object):
"""Auxiliary class for logging output."""
def __init__(self, output, log):
self._output = output
self._log = log
def write(self, stuff):
if stuff:
self._output.write(stuff)
self._output.flush()
self._log.append(stuff)
class Installer(object):
"""Install Webware.
The _comps attribute is a list of components,
each of which is an instance of MiscUtils.PropertiesObject.
"""
## Init ##
def __init__(self):
self._props = PropertiesObject('Properties.py')
self._props['dirname'] = '.'
self._comps = []
self._htHeader, self._htFooter = self.htHeaderAndFooter()
from DocSupport.pytp import PyTP
self._pytp = PyTP()
from DocSupport.autotoc import AutoToC
self._autotoc = AutoToC()
## Debug printing facility ##
def printMsg(self, msg):
if self._verbose:
print ' ' + msg
## Running the installation ##
def run(self, verbose=False,
passprompt=True, defaultpass='', keepdocs=False):
self._verbose = verbose
log = []
stdout, stderr = sys.stdout, sys.stderr
try:
sys.stdout = OutputCatcher(sys.stdout, log)
sys.stderr = OutputCatcher(sys.stderr, log)
self.printHello()
self.clearLogFile()
if not self.checkPyVersion() or not self.checkThreading():
return
self.detectComponents()
self.installDocs(keepdocs)
self.backupConfigs()
self.copyStartScript()
self.compileModules()
self.fixPermissions()
self.setupWebKitPassword(passprompt, defaultpass)
self.printGoodbye()
self.writeLogFile(log)
finally:
sys.stdout, sys.stderr = stdout, stderr
def clearLogFile(self):
"""Remove the install.log file.
This file with the logged output will get created at the
very end of the installation, provided there are no errors.
"""
if os.path.exists('install.log'):
print
print 'Removing log from last installation...'
os.remove('install.log')
def printHello(self):
from time import time, localtime, asctime
print
print '%(name)s %(versionString)s' % self._props
print 'Installer'
print
self.printKeyValue('Cur Date', asctime(localtime(time())))
self.printKeyValue('Python', sys.version.replace(') [', ')\n['))
self.printKeyValue('Op Sys', os.name)
self.printKeyValue('Platform', sys.platform)
self.printKeyValue('Cur Dir', os.getcwd())
def checkPyVersion(self, minver=(2, 6)):
"""Check for minimum required Python version."""
try:
ver = sys.version_info[:len(minver)]
except AttributeError:
ver = (1,)
if ver < minver:
print ('This Release of Webware requires Python %s.\n'
'Your currently used version is Python %s.\n'
'You can download a newer version at: http://www.python.org\n'
% ('.'.join(map(str, minver)), '.'.join(map(str, ver))))
response = raw_input('You may continue to install, '
'but Webware may not perform as expected.\n'
'Do you wish to continue with the installation? [yes/no] ')
return response[:1].upper() == "Y"
return True
def checkThreading(self):
try:
import threading
except ImportError:
print ('Webware requires that Python be compiled with threading support.\n'
'This version of Python does not appear to support threading.\n')
response = raw_input('You may continue, '
'but you will have to run the AppServer with a Python\n'
'interpreter that has threading enabled.\n'
'Do you wish to continue with the installation? [yes/no] ')
return response[:1].upper() == "Y"
return True
def detectComponents(self):
print
print 'Scanning for components...'
dirNames = [dir for dir in os.listdir(os.curdir)
if not dir.startswith('.') and os.path.isdir(dir)]
self._maxCompLen = max(map(len, dirNames))
oldPyVersion = False
column = 0
for dirName in sorted(dirNames):
propName = dirName + '/Properties.py'
try:
print dirName.ljust(self._maxCompLen, '.'),
except TypeError:
print dirName.ljust(self._maxCompLen),
if os.path.exists(propName):
comp = PropertiesObject(propName)
comp['dirname'] = dirName
for key in self._props:
if key not in comp:
comp[key] = self._props[key]
if sys.version_info[:3] < comp['requiredPyVersion']:
oldPyVersion = True
print 'no*',
else:
self._comps.append(comp)
print 'yes',
else:
print 'no ',
if column < 2 and not self._verbose:
print ' ',
column += 1
else:
print
column = 0
if column:
print
if oldPyVersion:
print "* some components require a newer Python version"
self._comps.sort(key=itemgetter('name'))
def setupWebKitPassword(self, prompt, defpass):
"""Setup a password for WebKit Application server."""
print 'Setting the WebKit password...'
print
if prompt:
print 'Choose a password for the WebKit Application Server.'
print 'If you will just press enter without entering anything,'
if defpass is None:
print 'a password will be automatically generated.'
else:
print 'the password specified on the command-line will be used.'
from getpass import getpass
password = getpass()
else:
if defpass is None:
print 'A password will be automatically generated.'
else:
print 'A password was specified on the command-line.'
password = None
print 'You can check the password after installation at:'
appConfig = 'WebKit/Configs/Application.config'
print os.path.normpath(appConfig)
if not password:
if defpass is None:
from string import letters, digits
from random import choice
password = ''.join(map(choice, [letters + digits]*8))
else:
password = <PASSWORD>
try: # read config file
data = open(appConfig).read()
except IOError:
print 'Error reading Application.config file.'
print 'Password not replaced, make sure to edit it by hand.'
return
# This will search for the construct "'AdminPassword': '...'"
# and replace '...' with the content of the 'password' variable:
if data.lstrip().startswith('{'):
pattern = "('AdminPassword'\s*:)\s*'.*?'"
else: # keyword arguments style
pattern = "(AdminPassword\\s*=)\\s*['\"].*?['\"]"
repl = "\g<1> '%s'" % password.replace( # escape critical characters
'\\', '\\\\\\\\').replace("'", "\\\\'").replace('%', '\\\\045')
from re import subn
data, count = subn(pattern, repl, data)
if count != 1:
print "Warning:",
if count > 1:
print "More than one 'AdminPassword' in config file."
else:
print "'AdminPassword' not found in config file."
return
try: # write back config file
open(appConfig, 'w').write(data)
except IOError:
print 'Error writing Application.config (probably no permission).'
print 'Password not replaced, make sure to edit it by hand.'
return
print 'Password replaced successfully.'
def installDocs(self, keep):
self.processHtmlDocFiles()
self.processPyTemplateFiles(keep)
self.createBrowsableSource()
self.createComponentIndex()
self.createComponentIndexes(keep)
self.createDocContexts()
def processHtmlDocFiles(self):
print
print 'Processing html doc files...'
for htmlFile in glob('Docs/*.html'):
self.processHtmlDocFile(htmlFile)
for comp in self._comps:
dir = comp['dirname']
for htmlFile in glob(dir + '/Docs/*.html'):
self.processHtmlDocFile(htmlFile)
def processPyTemplateFiles(self, keep):
print
print 'Processing phtml doc files...'
if keep:
print 'The templates will not be removed.'
else:
print 'The templates will be removed afterwards.'
for inFile in glob('Docs/*.phtml'):
if not os.path.splitext(inFile)[0].endswith('OfComponent'):
self.processPyTemplateFile(inFile, self._props, keep)
for comp in self._comps:
dir = comp['dirname']
for inFile in glob(dir + '/Docs/*.phtml'):
self.processPyTemplateFile(inFile, comp, keep)
def createBrowsableSource(self):
"""Create HTML docs for class hierarchies, summaries, sources etc."""
print
print 'Creating html source, summaries and doc files...'
column = 0
for comp in self._comps:
dir = comp['dirname']
if self._verbose:
print dir, '...'
else:
try:
print dir.ljust(self._maxCompLen, '.'),
except TypeError:
print dir.ljust(self._maxCompLen),
sourceDir = dir + '/Docs/Source'
self.makeDir(sourceDir)
filesDir = sourceDir + '/Files'
self.makeDir(filesDir)
summariesDir = sourceDir + '/Summaries'
self.makeDir(summariesDir)
docsDir = sourceDir + '/Docs'
self.makeDir(docsDir)
if dir == 'MiddleKit':
dir += '/Core'
for pyFilename in glob(dir + '/*.py'):
self.createHighlightedSource(pyFilename, filesDir)
self.createPySummary(pyFilename, summariesDir)
self.createPyDocs(pyFilename, docsDir)
self.createPyDocs(dir, docsDir)
self.createFileList(dir, sourceDir)
self.createClassList(dir, sourceDir)
if not self._verbose:
print "ok",
if column < 2:
print ' ',
column += 1
else:
print
column = 0
if column:
print
def createHighlightedSource(self, filename, dir):
"""Create highlighted HTML source code using py2html."""
from DocSupport import py2html
module = os.path.splitext(os.path.basename(filename))[0]
targetName = '%s/%s.html' % (dir, module)
self.printMsg('Creating %s...' % targetName)
stdout = sys.stdout
sys.stdout = StringIO()
try:
py2html.main((None, '-stdout', '-files', filename))
result = sys.stdout.getvalue()
finally:
sys.stdout = stdout
open(targetName, 'w').write(result)
def createPySummary(self, filename, dir):
"""Create an HTML module summary."""
from DocSupport.PySummary import PySummary
module = os.path.splitext(os.path.basename(filename))[0]
targetName = '%s/%s.html' % (dir, module)
self.printMsg('Creating %s...' % targetName)
sum = PySummary()
sum.readConfig('DocSupport/PySummary.config')
sum.readFileNamed(filename)
html = sum.html()
open(targetName, 'w').write(html)
def createPyDocs(self, filename, dir):
"""Create an HTML module documentation using pydoc."""
import pydoc
package, module = os.path.split(filename)
module = os.path.splitext(module)[0]
if package:
module = package.replace('/', '.') + '.' + module
targetName = '%s/%s.html' % (dir, module)
self.printMsg('Creating %s...' % targetName)
saveDir = os.getcwd()
os.chdir(dir)
try:
stdout = sys.stdout
sys.stdout = StringIO()
try:
try:
pydoc.writedoc(module)
except Exception:
pass
msg = sys.stdout.getvalue()
finally:
sys.stdout = stdout
finally:
os.chdir(saveDir)
if msg:
self.printMsg(msg)
def createFileList(self, filesDir, docsDir):
"""Create an HTML list of the source files."""
from DocSupport.FileList import FileList
name = filesDir.replace('/', '.')
self.printMsg('Creating file list of %s...' % name)
filelist = FileList(name)
filesDir, subDir = (filesDir + '/').split('/', 1)
saveDir = os.getcwd()
os.chdir(filesDir)
try:
filelist.readFiles(subDir + '*.py')
targetName = docsDir + '/FileList.html'
self.printMsg('Creating %s...' % targetName)
filelist.printForWeb('../' + targetName)
finally:
os.chdir(saveDir)
def createClassList(self, filesDir, docsDir):
"""Create an HTML class hierarchy listing of the source files."""
from DocSupport.ClassList import ClassList
name = filesDir.replace('/', '.')
self.printMsg('Creating class list of %s...' % name)
classlist = ClassList(name)
filesDir, subDir = (filesDir + '/').split('/', 1)
saveDir = os.getcwd()
os.chdir(filesDir)
try:
classlist.readFiles(subDir + '*.py')
targetName = docsDir + '/ClassList.html'
self.printMsg('Creating %s...' % targetName)
classlist.printForWeb(False, '../' + targetName)
targetName = docsDir + '/ClassHierarchy.html'
self.printMsg('Creating %s...' % targetName)
classlist.printForWeb(True, '../' + targetName)
finally:
os.chdir(saveDir)
def createComponentIndex(self):
"""Create an HTML component index of Webware itself."""
print 'Creating ComponentIndex.html...'
ht = ["<% header('Webware Documentation', 'titlebar',"
" 'ComponentIndex.css') %>"]
wr = ht.append
wr('<p>Don\'t know where to start? '
'Try <a href="../WebKit/Docs/index.html">WebKit</a>.</p>')
wr('<table class="doc">')
wr('<tr class="ComponentHeadings">'
'<th>Component</th><th>Status</th><th>Ver</th>'
'<th>Py</th><th>Summary</th></tr>')
row = 0
for comp in self._comps:
comp['nameAsLink'] = ('<a href='
'"../%(dirname)s/Docs/index.html">%(name)s</a>' % comp)
comp['indexRow'] = row + 1
wr('<tr class="ComponentRow%(indexRow)i top">'
'<td class="NameVersionCell">'
'<span class="Name">%(nameAsLink)s</span></td>'
'<td>%(status)s</td>'
'<td><span class="Version">%(versionString)s</span></td>'
'<td>%(requiredPyVersionString)s</td>'
'<td>%(synopsis)s</td></tr>' % comp)
row = 1 - row
wr('</table>')
wr("<% footer() %>")
ht = '\n'.join(ht)
ht = self.processPyTemplate(ht, self._props)
open('Docs/ComponentIndex.html', 'w').write(ht)
def createComponentIndexes(self, keep):
"""Create start page for all components."""
indexfile = 'Docs/indexOfComponent.phtml'
if not os.path.exists(indexfile):
return
print
print "Creating index.html for all components..."
index = open(indexfile).read()
link = '<p><a href="%s">%s</a></p>'
for comp in self._comps:
comp['webwareVersion'] = self._props['version']
comp['webwareVersionString'] = self._props['versionString']
# Create 'htDocs' as an HTML fragment corresponding to comp['docs']
ht = []
for doc in comp['docs']:
ht.append(link % (doc['file'], doc['name']))
ht = ''.join(ht)
comp['htDocs'] = ht
# Set up release notes
ht = []
files = glob(comp['dirname'] + '/Docs/RelNotes-*.html')
if files:
releaseNotes = []
for filename in files:
item = dict(dirname=os.path.basename(filename))
filename = item['dirname']
ver = filename[
filename.rfind('-') + 1 : filename.rfind('.')]
item['name'] = ver
if ver == 'X.Y':
item['ver'] = ver.split('.')
else:
i = 0
while i < len(ver) and ver[i] in '.0123456789':
i += 1
if i:
item['ver'] = map(int, ver[:i].split('.'))
releaseNotes.append(item)
releaseNotes.sort(key=itemgetter('ver'), reverse=True)
for item in releaseNotes:
ht.append(link % (item['dirname'], item['name']))
else:
ht.append('<p>None</p>')
ht = '\n'.join(ht)
comp['htReleaseNotes'] = ht
# Write file
filename = comp['dirname'] + '/Docs/index.html'
ht = self.processPyTemplate(index, comp)
open(filename, 'w').write(ht)
if not keep:
os.remove(indexfile)
def createDocContexts(self):
"""Create a WebKit context for every Docs directory."""
print
print 'Making all Docs directories browsable via WebKit...'
# Place an __init__.py file in every Docs directory
docsDirs = ['Docs']
for comp in self._comps:
if comp.get('docs'):
docsDir = comp['dirname'] + '/Docs'
if os.path.isdir(docsDir):
docsDirs.append(docsDir)
for docsDir in docsDirs:
initFile = docsDir + '/__init__.py'
if not os.path.exists(initFile):
open(initFile, 'w').write(
'# this can be browsed as a Webware context\n')
# Copy favicon to the default context
open('WebKit/Examples/favicon.ico', 'wb').write(
open('Docs/favicon.ico', 'rb').read())
def backupConfigs(self):
"""Copy *.config to *.config.default, if they don't already exist.
This allows the user to always go back to the default config file if
needed (for troubleshooting for example).
"""
print
print 'Creating backups of original config files...'
self._backupConfigs(os.curdir)
def _backupConfigs(self, dir):
for filename in os.listdir(dir):
fullPath = os.path.join(dir, filename)
if os.path.isdir(fullPath):
self._backupConfigs(fullPath)
elif (not filename.startswith('.') and
os.path.splitext(filename)[1] == '.config'):
self.printMsg(fullPath)
backupPath = fullPath + '.default'
if not os.path.exists(backupPath):
open(backupPath, 'wb').write(open(fullPath, 'rb').read())
def copyStartScript(self):
"""Copy the most appropriate start script to WebKit/webkit."""
if os.name == 'posix':
print
print 'Copying start script...',
ex = os.path.exists
if (ex('/etc/rc.status')
and ex('/sbin/startproc') and ex('/sbin/killproc')):
s = 'SUSE'
elif (ex('/etc/init.d/functions')
or ex('/etc/rc.d/init.d/functions')):
s = 'RedHat'
elif ex('/sbin/start-stop-daemon'):
s = 'Debian'
elif ex('/etc/rc.subr'):
s = 'NetBSD'
else:
s = 'Generic'
print s
# Copy start script:
s = 'WebKit/StartScripts/SysV/' + s
t = 'WebKit/webkit'
open(t, 'wb').write(open(s, 'rb').read())
def compileModules(self, force=True):
"""Compile modules in all installed componentes."""
from compileall import compile_dir
print
print 'Byte compiling all modules...'
for comp in self._comps:
dir = os.path.abspath(comp['dirname'])
compile_dir(dir, force=force, quiet=True)
def fixPermissions(self):
if os.name == 'posix':
print
print 'Setting permissions on CGI scripts...'
for comp in self._comps:
for filename in glob(comp['dirname'] + '/*.cgi'):
cmd = 'chmod a+rx ' + filename
self.printMsg(cmd)
os.system(cmd)
print 'Setting permission on start script...'
cmd = 'chmod a+rx WebKit/webkit'
self.printMsg(cmd)
os.system(cmd)
def printGoodbye(self):
print '''
Installation looks successful.
Welcome to Webware!
You can already try out the WebKit application server. Start it with
"WebKit%sAppServer" and point your browser to "http://localhost:8080".
Browsable documentation is available in the Docs folders.
You can use "Docs%sindex.html" as the main entry point.
Installation is finished.''' % ((os.sep,)*2)
def writeLogFile(self, log):
"""Write the logged output to the install.log file."""
open('install.log', 'w').write(''.join(log))
## Self utility ##
def printKeyValue(self, key, value):
"""Print a key/value pair."""
value = value.splitlines()
v = value.pop(0)
print '%12s: %s' % (key, v)
for v in value:
print '%14s%s' % ('', v)
def makeDir(self, dirName):
"""Create a directory."""
if not os.path.exists(dirName):
self.printMsg('Making %s...' % dirName)
os.makedirs(dirName)
def htHeaderAndFooter(self):
"""Return header and footer from HTML template."""
template = open('Docs/Template.html').read()
return template.split('\n<!-- page content -->\n', 1)
def processHtmlDocFile(self, htmlFile):
"""Process an HTML file."""
txtFile = os.path.splitext(htmlFile)[0] + '.txt'
if os.path.exists(txtFile):
# A text file with the same name exists:
page = open(htmlFile).read()
if ('<meta name="generator" content="Docutils' in page
and '<h1 class="title">' in page):
# This has obvisouly been created with Docutils; modify it
# to match style, header and footer of all the other docs.
page = page.replace('<h1 class="title">',
'<h1 class="header">')
page = page.replace('</body>\n</html>', self._htFooter)
self.printMsg('Modifying %s...' % htmlFile)
open(htmlFile, 'w').write(page)
def processPyTemplateFile(self, inFile, props, keep):
"""Process a Python template file."""
page = open(inFile).read()
page = self.processPyTemplate(page, props)
outFile = os.path.splitext(inFile)[0] + '.html'
self.printMsg('Creating %s...' % outFile)
open(outFile, 'w').write(page)
if not keep:
os.remove(inFile) # remove template
def processPyTemplate(self, input, props):
"""Process a Python template."""
global scope
def header(title, titleclass=None, style=None):
"""Get the header of a document."""
if not titleclass:
titleclass = 'header'
titleclass = ' class="%s"' % titleclass
link = '<link rel="stylesheet" href="%s" type="text/css">'
stylesheets = ['Doc.css']
if style and style.endswith('.css'):
stylesheets.append(style)
style = None
css = []
for s in stylesheets:
if not scope['dirname'].startswith('.'):
s = '../../Docs/' + s
s = link % s
css.append(s)
if style:
css.extend(('<style type="text/css">',
'<!--', style, '-->', '</style>'))
css = '\n'.join(css)
return scope['htHeader'] % locals()
def footer():
"""Get the footer of a document."""
return scope['htFooter']
scope = props.copy()
scope.update(header=header, htHeader=self._htHeader,
footer=footer, htFooter=self._htFooter)
return self._autotoc.process(self._pytp.process(input, scope))
def printHelp():
print "Usage: install.py [options]"
print "Install WebWare in the local directory."
print
print " -h, --help Print this help screen."
print " -v, --verbose Print extra information messages during install."
print " --no-password-prompt Don't prompt for WebKit password during install."
print " --set-password=... Set the WebKit password to the given value."
print " --keep-templates Keep the templates for creating the docs."
if __name__ == '__main__':
import getopt
verbose = False
passprompt = defaultpass = keepdocs = None
try:
opts, args = getopt.getopt(sys.argv[1:], "hv", ["help", "verbose",
"no-password-prompt", "set-password=", "keep-templates"])
except getopt.GetoptError:
printHelp()
else:
for o, a in opts:
if o in ("-v", "--verbose"):
verbose = True
elif o == "--no-password-prompt":
passprompt = False
elif o == "--set-password":
defaultpass = a
elif o == '--keep-templates':
keepdocs = True
elif o in ("-h", "--help", "h", "help"):
printHelp()
sys.exit(0)
if passprompt is None and defaultpass is None:
passprompt = True
Installer().run(verbose=verbose, passprompt=passprompt,
defaultpass=defaultpass, keepdocs=keepdocs)
|
StarcoderdataPython
|
4827268
|
"""
Tests for the Finances API class.
"""
import unittest
import datetime
import mws
from mws.utils import clean_date
from .utils import CommonAPIRequestTools
class FinancesTestCase(CommonAPIRequestTools, unittest.TestCase):
"""Test cases for Finances."""
api_class = mws.Finances
# TODO: Add remaining methods for Finances
def test_list_financial_event_groups(self):
"""
ListFinancialEventGroups operation.
"""
created_after = datetime.datetime.utcnow()
created_before = datetime.datetime.utcnow()
max_results = 659
params = self.api.list_financial_event_groups(
created_after=created_after,
created_before=created_before,
max_results=max_results,
)
self.assert_common_params(params, action="ListFinancialEventGroups")
self.assertEqual(
params["FinancialEventGroupStartedAfter"], clean_date(created_after)
)
self.assertEqual(
params["FinancialEventGroupStartedBefore"], clean_date(created_before)
)
self.assertEqual(params["MaxResultsPerPage"], str(max_results))
def test_list_financial_event_groups_by_next_token(self):
"""
ListFinancialEventGroupsByNextToken operation, via method decorator.
"""
next_token = "<PASSWORD>"
params = self.api.list_financial_event_groups(next_token=next_token)
self.assert_common_params(params, action="ListFinancialEventGroupsByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_list_financial_event_groups_by_next_token_alias(self):
"""
ListFinancialEventGroupsByNextToken operation, via alias method.
"""
next_token = "<PASSWORD>"
params = self.api.list_financial_event_groups_by_next_token(next_token)
self.assert_common_params(params, action="ListFinancialEventGroupsByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_list_financial_events(self):
"""
ListFinancialEvents operation.
"""
posted_after = datetime.datetime.utcnow()
posted_before = datetime.datetime.utcnow()
amazon_order_id = "123-4567890-1234567"
financial_event_group_id = "22YgYW55IGNhcm5hbCBwbGVhEXAMPLE"
max_results = 156
params = self.api.list_financial_events(
financial_event_group_id=financial_event_group_id,
amazon_order_id=amazon_order_id,
posted_after=posted_after,
posted_before=posted_before,
max_results=max_results,
)
self.assert_common_params(params, action="ListFinancialEvents")
self.assertEqual(params["FinancialEventGroupId"], financial_event_group_id)
self.assertEqual(params["AmazonOrderId"], amazon_order_id)
self.assertEqual(params["PostedAfter"], clean_date(posted_after))
self.assertEqual(params["PostedBefore"], clean_date(posted_before))
self.assertEqual(params["MaxResultsPerPage"], str(max_results))
def test_list_financial_events_by_next_token(self):
"""
ListFinancialEventsByNextToken operation, via method decorator.
"""
next_token = "<PASSWORD>"
params = self.api.list_financial_events(next_token=next_token)
self.assert_common_params(params, action="ListFinancialEventsByNextToken")
self.assertEqual(params["NextToken"], next_token)
def test_list_financial_events_by_next_token_alias(self):
"""
ListFinancialEventsByNextToken operation, via alias method.
"""
next_token = "<PASSWORD>"
params = self.api.list_financial_events_by_next_token(next_token)
self.assert_common_params(params, action="ListFinancialEventsByNextToken")
self.assertEqual(params["NextToken"], next_token)
|
StarcoderdataPython
|
1641782
|
# -*- encoding:utf-8 -*-
import os
import numpy as np
import cPickle as pkl
import json
def pick_article(words):
difficulty_set = ["high"]
raw_data = "../data/RACE"
cnt = 0
avg_article_length = 0
avg_question_length = 0
avg_option_length = 0
avg_article_sentence_count = 0
max_article_sentence_count = -1
min_article_sentence_count = 999
num_que = 0
for data_set in ["test"]:
for d in difficulty_set:
new_raw_data_path = os.path.join(raw_data, data_set, d)
for inf in os.listdir(new_raw_data_path):
cnt += 1
obj = json.load(open(os.path.join(new_raw_data_path, inf), "r"))
article_words = set(obj['article'].lower().split())
w_c = 0
for w in words:
if w not in article_words:
break
w_c += 1
if w_c == len(words):
print obj['article']
print obj['questions']
print obj['options']
print
def find_match():
sentence_store, sentence_att, \
sentence_article, sentence_question, \
sentence_options, sentence_answers, prediction = pkl.load(open('cache/visualization.pkl'))
count = 0
thres = 0.2
for sent_article, q, o, a, \
sent_att, res_att, pred in \
zip(sentence_article, sentence_question,
sentence_options, sentence_answers, \
sentence_store, sentence_att, prediction):
if pred == a == np.argmax(sent_att):
if np.sum(np.asarray(res_att) >= thres) >= 2 and np.sum(np.asarray(res_att) > 0) > 8:
sentences = [' '.join(sent).replace('<PAD>', '').strip(' ') for sent in sent_article]
for i, (sent, att) in enumerate(zip(sentences, res_att)):
if att >= thres:
sentences[i] = '\033[1;31;40m{}\033[0m'.format(sent)
print '|||'.join(sentences)
print ' '.join(q).replace('<PAD>', '').strip()
print ' '.join(o[0]).replace('<PAD>', '').strip()
print ' '.join(o[1]).replace('<PAD>', '').strip()
print ' '.join(o[2]).replace('<PAD>', '').strip()
print ' '.join(o[3]).replace('<PAD>', '').strip()
print a
print sent_att
print res_att
print
count += 1
# if count == 10:
# break
print count
pick_article(['heavy', 'suitcases'])
# find_match()
|
StarcoderdataPython
|
3286824
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import time
import numpy as np
os.environ[str("FLAGS_check_nan_inf")] = str("1")
os.environ[str("GLOG_vmodule")] = str("nan_inf_utils_detail=10")
import paddle
import paddle.nn as nn
from paddle.fluid.framework import _enable_legacy_dygraph
_enable_legacy_dygraph()
np.random.seed(0)
def generator():
batch_size = 5
for i in range(5):
curr_train_x = np.random.randint(
batch_size, size=(batch_size, 3)).astype("float32")
if i >= 2:
curr_train_x[0, :] = np.nan
curr_train_x[-1, :] = np.inf
res = []
for i in range(batch_size):
y = i % 3
res.append([y])
y_label = np.array(res).astype('int64')
yield [curr_train_x, y_label]
class TestLayer(nn.Layer):
def __init__(self):
super(TestLayer, self).__init__()
self.linear1 = nn.Linear(3, 400)
self.linear2 = nn.Linear(400, 400)
self.linear3 = nn.Linear(400, 3)
def forward(self, x):
x = self.linear1(x)
x = nn.functional.sigmoid(x)
x = self.linear2(x)
x = nn.functional.sigmoid(x)
x = self.linear3(x)
x = nn.functional.softmax(x)
return x
def check(use_cuda):
paddle.set_device('gpu' if use_cuda else 'cpu')
net = TestLayer()
sgd = paddle.optimizer.SGD(learning_rate=0.05, parameters=net.parameters())
for step, (x, y) in enumerate(generator()):
x = paddle.to_tensor(x)
y = paddle.to_tensor(y)
zero = paddle.zeros(shape=[1], dtype='int64')
fp16_zero = paddle.cast(zero, dtype='float16')
y = y + zero
y_pred = net(x)
cost = nn.functional.cross_entropy(y_pred, y, use_softmax=False)
avg_cost = paddle.mean(cost)
acc_top1 = paddle.metric.accuracy(input=y_pred, label=y, k=1)
print('iter={:.0f}, cost={}, acc1={}'.format(
step, avg_cost.numpy(), acc_top1.numpy()))
sgd.step()
sgd.clear_grad()
if __name__ == '__main__':
if paddle.is_compiled_with_cuda():
try:
check(use_cuda=True)
assert False
except Exception as e:
print(e)
print(type(e))
# Note. Enforce in cuda kernel may not catch in paddle, and
# Exception type will be RuntimeError
assert type(e) == OSError or type(e) == RuntimeError
try:
check(use_cuda=False)
assert False
except Exception as e:
print(e)
print(type(e))
assert type(e) == RuntimeError
|
StarcoderdataPython
|
1669745
|
from dbsp_drp import coadding
def test_group_coadds():
fname_to_spats = {
'a1': [100, 200, 300],
'a2': [100, 200, 300],
'a3': [101, 199],
'a4': [99, 201, 303]
}
correct = [
{
'spats': [99, 100, 100, 101],
'fnames': ['a4', 'a1', 'a2', 'a3']
},
{
'spats': [199, 200, 200, 201],
'fnames': ['a3', 'a1', 'a2', 'a4']
},
{
'spats': [300, 300],
'fnames': ['a1', 'a2']
},
{
'spats': [303],
'fnames': ['a4']
}
]
grouped_coadds = coadding.group_coadds(fname_to_spats)
assert correct == grouped_coadds
|
StarcoderdataPython
|
73435
|
<reponame>pyoor/distiller
import yaml
import os
import shutil
import sqlite3
import sys
class DistillerConfig:
def __init__(self, config_file, section):
self.config = read_config(config_file, section)
try:
self.project_name = self.config['name']
except KeyError:
raise Exception(" Project name not defined.")
self.trace_queue = "%s-trace-queue" % self.project_name
self.trace_results = "%s-trace-results" % self.project_name
self.min_queue = "%s-min-queue" % self.project_name
self.min_results = "%s-min-results" % self.project_name
try:
self.operations = self.config['operations']
if len(self.operations) == 0:
raise Exception("You must select atleast one mode of operation.")
except:
raise Exception("You must select atleast one mode of operation.")
try:
self.mode = self.config['filter']['mode']
self.modules = self.config['modules']
except KeyError:
# Optional arguments
self.mode = None
self.modules = None
if section == "server":
try:
self.seed_dir = self.config['seed_dir']
except KeyError:
raise Exception("No working path defined.")
try:
self.working_dir = self.config['working_dir']
except KeyError:
raise Exception("No working path defined.")
self.project_dir = os.path.join(self.working_dir, self.project_name)
self.db_path = os.path.join(self.project_dir, "backup.db")
self.min_dir = os.path.join(self.project_dir, "minimized")
self.trace_dir = os.path.join(self.project_dir, "traces")
self.results_dir = os.path.join(self.project_dir, "results")
if os.path.isdir(self.project_dir):
action = None
while action != "R" and action != "A":
action = raw_input("Project Exists! Replace or Append? [R/A]: ").upper()
if action == "R":
confirm = None
while confirm != "Y" and confirm != "N":
confirm = raw_input("Are you sure? All data will be deleted! [Y/N]: ").upper()
if confirm == "Y":
shutil.rmtree(self.project_dir)
os.makedirs(self.project_dir)
os.makedirs(self.min_dir)
os.makedirs(self.trace_dir)
os.makedirs(self.results_dir)
sql = sqlite3.connect(self.db_path)
c = sql.cursor()
c.execute('BEGIN TRANSACTION')
c.execute('''CREATE TABLE IF NOT EXISTS modules
(num INTEGER PRIMARY KEY, name TEXT, UNIQUE (name))''')
c.execute('''CREATE TABLE IF NOT EXISTS seeds
(num INTEGER PRIMARY KEY, seed_name TEXT, trace_name TEXT, ublock_cnt INT, UNIQUE (seed_name))''')
c.execute('''CREATE TABLE IF NOT EXISTS master_lookup
(bblock TEXT PRIMARY KEY)''')
# Results are calculated using the full data set - Wipe if they exist!
c.execute('''DROP TABLE IF EXISTS results''')
c.execute('''CREATE TABLE results (name TEXT PRIMARY KEY, ublock_cnt INT)''')
sql.commit()
else:
sys.exit()
elif section == "client":
try:
self.host = self.config['host']
except KeyError:
raise Exception("No host defined.")
try:
self.drio_path = self.config['drio_path']
if not os.path.isfile(self.drio_path):
raise Exception("Can not find DynamoRio - %s" % self.drio_path)
except KeyError:
raise Exception("No DynamoRio path defined.")
try:
self.target_path = self.config['target_path']
if not os.path.isfile(self.target_path):
raise Exception("Can not find target - %s" % self.target_path)
except KeyError:
raise Exception("No target path defined.")
try:
self.w_time = self.config['wait_time']
except KeyError:
raise Exception("No wait time defined.")
try:
self.m_time = self.config['max_timeout']
except KeyError:
raise Exception("No max timeout defined.")
# Optional args
try:
self.target_args = self.config['target_args']
if self.target_args is None:
self.target_args = ''
except KeyError:
self.target_args = None
try:
self.pre_cmd = self.config['pre_cmd']
except KeyError:
self.pre_cmd = None
try:
self.post_cmd = self.config['post_cmd']
except KeyError:
self.post_cmd = None
def read_config(config_file, section):
sections = ['project', section]
with open(config_file, 'r') as stream:
data = yaml.load(stream)
config = {}
try:
for section in sections:
for k, v in data[section].iteritems():
config[k] = v
except KeyError:
raise Exception(" Unable to find section %s" % section)
return config
|
StarcoderdataPython
|
3364160
|
"""Configuration for a stack."""
# Copyright (C) 2015 <NAME>, <NAME> and <NAME>.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
import networkx
from faucet.conf import Conf, test_config_condition
class Stack(Conf):
"""Stores state related to DP stack information, this includes the current elected root as that
is technically a fixed allocation for this DP Stack instance."""
defaults = {
# Sets the root priority value of the current DP with stacking
'priority': None,
# Use the stack route algorithms, will be forced true if routing is enabled
'route_learning': False,
# Number of update time intervals for a down stack node to still be considered healthy
'down_time_multiple': 3,
# Minimum percentage value of required UP stack ports for this stack
# node to be considered healthy
'min_stack_health': 1.0,
# Minimum percentage value of required UP LACP ports for this stack
# node to be considered healthy
'min_lacp_health': 1.0,
}
defaults_types = {
'priority': int,
'route_learning': bool,
'down_time_multiple': int,
'min_stack_health': float,
'min_lacp_health': float,
}
def __init__(self, _id, dp_id, name, canonical_port_order, lacp_down_ports, lacp_ports, conf):
"""
Constructs a new stack object
Args:
_id (str): Name of the configuration key
dp_id (int): DP ID of the DP that holds this stack instance
name (str): Name of the DP that holds this stack instance
canonical_port_order (func): Function to order ports in a standardized way
lacp_down_ports (func): Returns a tuple of the not UP LACP ports for this stack node
lacp_ports (func): Returns a tuple of all LACP ports for this stack node
conf (dict): Stack configuration
"""
self.name = name
# Function to order ports in a standardized way
self.canonical_port_order = canonical_port_order
self.lacp_down_ports = lacp_down_ports
self.lacp_ports = lacp_ports
# Stack configuration options
self.priority = None
self.route_learning = None
self.down_time_multiple = None
self.min_stack_health = None
self.min_lacp_health = None
# Ports that have stacking configured
self.ports = []
# Stack graph containing all the DPs & ports in the stacking topology
self.graph = None
# Additional stacking information
self.root_name = None
self.roots_names = None
self.root_flood_reflection = None
# Whether the stack node is currently healthy
# dyn_healthy_info := (<running>, <stack ports>, <lacp ports>)
self.dyn_healthy_info = (False, 0.0, 0.0)
self.dyn_healthy = False
super().__init__(_id, dp_id, conf)
def clone_dyn_state(self, prev_stack, dps=None):
"""Copy dyn state from the old stack instance when warm/cold starting"""
if prev_stack:
self.dyn_healthy = prev_stack.dyn_healthy
self.dyn_healthy_info = prev_stack.dyn_healthy_info
if dps:
stack_port_dps = [dp for dp in dps if dp.stack_ports()]
for dp in stack_port_dps:
for port in dp.stack_ports():
port_up = False
if port.is_stack_up():
port_up = True
elif port.is_stack_init() and port.stack['port'].is_stack_up():
port_up = True
self.modify_link(dp, port, add=port_up)
def live_timeout_healthy(self, last_live_time, now, update_time):
"""
Determines the timeout of the current stack node, and whether
the current stack node can be considered healthy according to
the `down_time_multiple` number of stack root update time intervals.
Args:
last_live_time (float): Last known live time for this current stack node
now (float): Current time
update_time (int): Update time interval
Return:
bool: If node down time is still in update time interval threshold; considered healthy,
float: Time elapsed since timed out
"""
# Time elapsed for the number of safe down time multiples before considered unhealthy
down_time = self.down_time_multiple * update_time
# Final time at which nodes are still considered healthy
health_timeout = now - down_time
# If node last known live time was greater than the health timeout then it is healthy
timeout_healthy = last_live_time >= health_timeout
return timeout_healthy, health_timeout
def stack_port_healthy(self):
"""
Determines the percentage of UP stack ports, and whether
the current stack node can be considered healthy according to
the `min_stack_health` configuration option.
Return:
bool: Whether threshold from DOWN stack ports is met; considered healthy,
float: Percentage of stack ports UP out of all stack ports
"""
down_ports = self.down_ports()
all_ports = self.ports
if len(all_ports) == 0:
return True, 1.0
percentage = 1.0 - (float(len(down_ports) / float(len(all_ports))))
stack_ports_healthy = percentage >= self.min_stack_health
return stack_ports_healthy, percentage
def lacp_port_healthy(self):
"""
Determines the percentage of UP LACP ports, and whether
the current stack node can be considered healthy according to
the `min_lacp_health` configuration option.
Return:
bool: Whether threshold from DOWN LACP ports is met; considered healthy,
float: Percentage of LACP ports UP out of all lacp ports
"""
down_ports = self.lacp_down_ports()
all_ports = self.lacp_ports()
if len(all_ports) == 0:
return True, 1.0
percentage = 1.0 - (float(len(down_ports) / float(len(all_ports))))
lacp_ports_healthy = percentage >= self.min_lacp_health
return lacp_ports_healthy, percentage
def update_health(self, now, dp_last_live_time, update_time):
"""
Determines whether the current stack node is healthy
Args:
now (float): Current time
last_live_times (dict): Last live time value for each DP
update_time (int): Stack root update interval time
Return:
tuple: Current stack node health state,
str: Reason for the current state
"""
reason = ''
last_live_time = dp_last_live_time.get(self.name, 0)
timeout_healthy, health_timeout = self.live_timeout_healthy(
last_live_time, now, update_time)
if not timeout_healthy:
# Too long since DP last running, if DP not running then
# number of UP stack or LACP ports should be 0
reason += 'last running %us ago (timeout %us)' % (now - last_live_time, health_timeout)
self.dyn_healthy_info = (False, 0.0, 0.0)
self.dyn_healthy = False
return self.dyn_healthy, reason
reason += 'running %us ago' % (now - last_live_time)
if reason:
reason += ', '
stack_ports_healthy, stack_percentage = self.stack_port_healthy()
if not stack_ports_healthy:
# The number of DOWN stack ports surpasses the threshold for DOWN stack port tolerance
reason += 'stack ports %s (%.0f%%) not up' % (
list(self.down_ports()), (1.0 - stack_percentage) * 100.0)
else:
reason += '%.0f%% stack ports running' % (stack_percentage * 100.0)
if self.lacp_ports():
if reason:
reason += ', '
lacp_ports_healthy, lacp_percentage = self.lacp_port_healthy()
if not lacp_ports_healthy:
# The number of DOWN LACP ports surpasses the threshold for DOWN LACP port tolerance
reason += 'lacp ports %s (%.0f%%) not up' % (
list(self.lacp_down_ports()), (1.0 - lacp_percentage) * 100.0)
else:
reason += '%.0f%% lacp ports running' % (lacp_percentage * 100.0)
else:
# No LACP ports in node, so default to 100% UP & don't report information
lacp_ports_healthy = True
lacp_percentage = 0.0
self.dyn_healthy_info = (timeout_healthy, stack_percentage, lacp_percentage)
if timeout_healthy and stack_ports_healthy and lacp_ports_healthy:
self.dyn_healthy = True
else:
self.dyn_healthy = False
return self.dyn_healthy, reason
@staticmethod
def nominate_stack_root(stacks):
"""Return stack names in priority order and the chosen root"""
def health_priority(stack):
# Invert the health priority info so it is sorted correctly
# in relation to priority and the binary health
invert_info = (1.0 - stack.dyn_healthy_info[1],
1.0 - stack.dyn_healthy_info[2])
return (not stack.dyn_healthy, *invert_info, stack.priority, stack.dp_id)
stack_priorities = sorted(stacks, key=health_priority)
priority_names = tuple(stack.name for stack in stack_priorities)
nominated_name = priority_names[0]
return priority_names, nominated_name
def resolve_topology(self, dps, meta_dp_state):
"""
Resolve & verify correct inter-DP stacking config
Args:
dps (list): List of configured DPs
meta_dp_state (MetaDPState): Provided if reloading when choosing a new root DP
"""
stack_dps = [dp for dp in dps if dp.stack is not None]
stack_priority_dps = [dp for dp in stack_dps if dp.stack.priority]
stack_port_dps = [dp for dp in dps if dp.stack_ports()]
if not stack_priority_dps:
test_config_condition(stack_dps, 'stacking enabled but no root DP')
return
if not self.ports:
return
for dp in stack_priority_dps:
test_config_condition(not isinstance(dp.stack.priority, int), (
'stack priority must be type %s not %s' % (
int, type(dp.stack.priority))))
test_config_condition(dp.stack.priority <= 0, (
'stack priority must be > 0'))
self.roots_names, self.root_name = self.nominate_stack_root(
[dp.stack for dp in stack_priority_dps])
if meta_dp_state:
# If meta_dp_state exists, then we are reloading a new instance of the stack
# for a new 'dynamically' chosen root
if meta_dp_state.stack_root_name in self.roots_names:
self.root_name = meta_dp_state.stack_root_name
for dp in stack_port_dps:
for vlan in dp.vlans.values():
if vlan.faucet_vips:
self.route_learning = True
edge_count = Counter()
graph = networkx.MultiGraph()
for dp in stack_port_dps:
graph.add_node(dp.name)
for port in dp.stack_ports():
edge_name = Stack.modify_topology(graph, dp, port)
edge_count[edge_name] += 1
for edge_name, count in edge_count.items():
test_config_condition(count != 2, '%s defined only in one direction' % edge_name)
if graph.size() and self.name in graph:
self.graph = graph
for dp in graph.nodes():
path_to_root_len = len(self.shortest_path(self.root_name, src_dp=dp))
test_config_condition(
path_to_root_len == 0, '%s not connected to stack' % dp)
if self.longest_path_to_root_len() > 2:
self.root_flood_reflection = True
@staticmethod
def modify_topology(graph, dp, port, add=True):
"""Add/remove an edge to the stack graph which originates from this dp and port."""
def canonical_edge(dp, port):
peer_dp = port.stack['dp']
peer_port = port.stack['port']
sort_edge_a = (
dp.name, port.name, dp, port)
sort_edge_z = (
peer_dp.name, peer_port.name, peer_dp, peer_port)
sorted_edge = sorted((sort_edge_a, sort_edge_z))
edge_a, edge_b = sorted_edge[0][2:], sorted_edge[1][2:]
return edge_a, edge_b
def make_edge_name(edge_a, edge_z):
edge_a_dp, edge_a_port = edge_a
edge_z_dp, edge_z_port = edge_z
return '%s:%s-%s:%s' % (
edge_a_dp.name, edge_a_port.name,
edge_z_dp.name, edge_z_port.name)
def make_edge_attr(edge_a, edge_z):
edge_a_dp, edge_a_port = edge_a
edge_z_dp, edge_z_port = edge_z
return {
'dp_a': edge_a_dp, 'port_a': edge_a_port,
'dp_z': edge_z_dp, 'port_z': edge_z_port}
edge = canonical_edge(dp, port)
edge_a, edge_z = edge
edge_name = make_edge_name(edge_a, edge_z)
edge_attr = make_edge_attr(edge_a, edge_z)
edge_a_dp, _ = edge_a
edge_z_dp, _ = edge_z
if add:
graph.add_edge(
edge_a_dp.name, edge_z_dp.name,
key=edge_name, port_map=edge_attr)
elif (edge_a_dp.name, edge_z_dp.name, edge_name) in graph.edges:
graph.remove_edge(edge_a_dp.name, edge_z_dp.name, edge_name)
return edge_name
def modify_link(self, dp, port, add=True):
"""Update the stack topology according to the event"""
return Stack.modify_topology(self.graph, dp, port, add)
def hash(self):
"""Return hash of a topology graph"""
return hash(tuple(sorted(self.graph.degree())))
def get_node_link_data(self):
"""Return network stacking graph as a node link representation"""
return networkx.readwrite.json_graph.node_link_data(self.graph)
def add_port(self, port):
"""Add a port to this stack"""
self.ports.append(port)
def any_port_up(self):
"""Return true if any stack port is UP"""
for port in self.ports:
if port.is_stack_up():
return True
return False
def down_ports(self):
"""Return tuple of not running stack ports"""
return tuple(port for port in self.ports if not port.is_stack_up())
def canonical_up_ports(self, ports=None):
"""Obtains list of UP stack ports in canonical order"""
if ports is None:
ports = self.ports
return self.canonical_port_order([port for port in ports if port.is_stack_up()])
def shortest_path(self, dest_dp, src_dp=None):
"""Return shortest path to a DP, as a list of DPs."""
if src_dp is None:
src_dp = self.name
if self.graph:
try:
return sorted(networkx.all_shortest_paths(self.graph, src_dp, dest_dp))[0]
except (networkx.exception.NetworkXNoPath, networkx.exception.NodeNotFound):
pass
return []
def shortest_path_to_root(self, src_dp=None):
"""Return shortest path to root DP, as list of DPs."""
return self.shortest_path(self.root_name, src_dp=src_dp)
def is_root(self):
"""Return True if this DP is the root of the stack."""
return self.name == self.root_name
def is_root_candidate(self):
"""Return True if this DP could be a root of the stack."""
return self.name in self.roots_names
def is_edge(self):
"""Return True if this DP is a stack edge."""
return (not self.is_root()
and self.longest_path_to_root_len() == len(self.shortest_path_to_root()))
def shortest_path_port(self, dest_dp):
"""Return first port on our DP, that is the shortest path towards dest DP."""
shortest_path = self.shortest_path(dest_dp)
if len(shortest_path) > 1:
peer_dp = shortest_path[1]
peer_dp_ports = self.peer_up_ports(peer_dp)
if peer_dp_ports:
return peer_dp_ports[0]
return None
def peer_up_ports(self, peer_dp):
"""Return list of stack ports that are up towards a peer."""
return self.canonical_port_order([
port for port in self.ports if port.running() and (
port.stack['dp'].name == peer_dp)])
def longest_path_to_root_len(self):
"""Return length of the longest path to root in the stack."""
if not self.graph or not self.root_name:
return None
len_paths_to_root = [
len(self.shortest_path(self.root_name, src_dp=dp))
for dp in self.graph.nodes()]
if len_paths_to_root:
return max(len_paths_to_root)
return None
def is_in_path(self, src_dp, dst_dp):
"""Return True if the current DP is in the path from src_dp to dst_dp
Args:
src_dp (str): DP name
dst_dp (str): DP name
Returns:
bool: True if self is in the path from the src_dp to the dst_dp.
"""
path = self.shortest_path(dst_dp, src_dp=src_dp)
return self.name in path
def peer_symmetric_up_ports(self, peer_dp):
"""Return list of stack ports that are up towards us from a peer"""
# Sort adjacent ports by canonical port order
return self.canonical_port_order([
port.stack['port'] for port in self.ports if port.running() and (
port.stack['dp'].name == peer_dp)])
def shortest_symmetric_path_port(self, peer_dp):
"""Return port on our DP that is the first port of the adjacent DP towards us"""
shortest_path = self.shortest_path(self.name, src_dp=peer_dp)
if len(shortest_path) == 2:
adjacent_up_ports = self.peer_symmetric_up_ports(peer_dp)
if adjacent_up_ports:
return adjacent_up_ports[0].stack['port']
return None
|
StarcoderdataPython
|
3209900
|
<reponame>pnarsina/w251_chess_objectid_n_rl
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch
from gym_chess_env import ChessBoard_gym
from agent_chess_pytorch import DQN
import numpy as np
import math
import chess
# In[2]:
class Gen_Legal_move:
def __init__(self, model_weights="checkpoint.pth-4rook_best-adamw.tar"):
super(Gen_Legal_move, self).__init__()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DQN(8,8,112).to(device)
model = load_from_saved_model(model,model_weights)
def load_from_saved_model(model, path = "checkpoint.pth.tar"):
checkpoint = torch.load(path)
model.load_state_dict(checkpoint['state_dict'])
return(model)
def generate_legal_moves(board, num_moves):
state = torch.from_numpy(env.reset()).float()
env = ChessBoard_gym()
env.set_board(board)
starting_pos_FEN = env.get_FEN()
observation_space = 64
state_model_input = torch.reshape(state, [1, observation_space])
action_id = model(state_model_input).argmax(1)[0].detach()
legal_move_ids = []
for i in range(0,num_moves):
next_state,reward, _, _ = env.step(action_id)
next_state_model_input = torch.from_numpy(next_state).float()
next_state_model_input = torch.reshape(next_state_model_input, [1, observation_space])
action_id = actions_list.argmax(1)[0].detach()
legal_move_ids.append(action_id)
return(legal_move_ids)
|
StarcoderdataPython
|
113091
|
<gh_stars>0
import math
from geopy.distance import vincenty
from geopy.point import Point
from os import path
from slayer import file_utils, constants
from datetime import datetime
from isodate import parse_datetime, parse_duration, datetime_isoformat
import pytz
import pandas as pd
def lat2y(a):
return 180.0 / math.pi * math.log(
math.tan(math.pi / 4.0 + a * (math.pi / 180.0) / 2.0))
def y2lat(a):
return 180.0/math.pi*(2.0*math.atan(math.exp(a*math.pi/180.0))-math.pi/2.0)
def convert_lat(std_data):
new_lat = std_data[constants.lat_column].apply(lat2y)
std_data[constants.lat_column] = new_lat
return std_data
def convert_time_intervals(time_intervals):
return [[parse_datetime(time_int[0]), parse_datetime(time_int[1])]
for time_int in time_intervals]
def filter_df_time_intervals(data, time_intervals):
dfs = [data[time_int[0]:time_int[1]] for time_int in time_intervals]
return pd.concat(dfs)
def index_datetime(std_data, tz='UTC'):
std_data.index = pd.to_datetime(std_data[constants.start_date_column],
infer_datetime_format=True)
std_data.index = std_data.index.tz_localize('UTC').tz_convert(tz)
std_data.sort_index(inplace=True)
return std_data
def clap_time_intervals(time_intervals, slice_duration):
intervals = convert_time_intervals(time_intervals)
grouper = time_grouper(slice_duration)
def clap(boundary): return pd.DataFrame(index=[boundary]).groupby(grouper)
def extract(boundary): return next(iter(boundary.groups))
clapped_time_intervals = [[datetime_isoformat(extract(clap(boundary)))
for boundary in time_int]
for time_int in intervals]
return clapped_time_intervals
def time_grouper(slice_duration):
# yearly durations
if slice_duration[-1] == 'Y':
return pd.TimeGrouper('AS')
# monthly durations
elif slice_duration[-1] == 'M' and 'PT' not in slice_duration:
return pd.TimeGrouper(slice_duration.replace('P', '') + 'S')
# all other durations
else:
return pd.TimeGrouper(parse_duration(slice_duration))
def approximated_time_intervals(tz):
source_intetval = [datetime(1970, 1, 2, 0, 0, 0),
datetime(1970, 1, 3, 0, 0, 0)]
utc_interval = [pytz.timezone(tz).localize(dt).astimezone(pytz.UTC) for dt in source_intetval]
return [datetime_isoformat(dt) for dt in utc_interval]
def fit_bbox(bottom_right_lon, bottom_right_lat, top_left_lon, top_left_lat, cellsize):
size, (min_lon, min_lat), step = get_bbox_geometry(constants.Bbox(bottom_right_lat=bottom_right_lat,
bottom_right_lon=bottom_right_lon,
top_left_lat=top_left_lat,
top_left_lon=top_left_lon), cellsize)
new_bottom_right_lon = min_lon + step * size[0]
new_top_left_lat = y2lat(min_lat + step * size[1])
return new_bottom_right_lon, bottom_right_lat, top_left_lon, new_top_left_lat
def get_bbox_geometry_by_meters(bbox, cell_size, resolution=None):
min_lat, max_lat = lat2y(bbox.min_lat), lat2y(bbox.max_lat)
if not resolution:
width = vincenty(Point(latitude=bbox.min_lat, longitude=bbox.min_lon),
Point(latitude=bbox.min_lat, longitude=bbox.max_lon)).meters
height = vincenty(Point(latitude=bbox.min_lat, longitude=bbox.min_lon),
Point(latitude=bbox.max_lat, longitude=bbox.min_lon)).meters
x_size = math.ceil(width / cell_size)
y_size = math.ceil(height / cell_size)
size = (x_size, y_size)
else:
size = (resolution['x'], resolution['y'])
print('Volume Resolution: {}'.format(size))
step = round(abs((bbox.max_lon - bbox.min_lon) / size[0]), 5)
return size, (bbox.min_lon, min_lat), step
def get_bbox_geometry_by_degree(self, bbox, cell_size, resolution=None):
if not resolution:
width = bbox.max_lon - bbox.min_lon
height = bbox.max_lat - bbox.min_lat
resolution_x = math.ceil(width / cell_size)
resolution_y = math.ceil(height / cell_size)
size = (resolution_x, resolution_y)
else:
size = (resolution['x'], resolution['y'])
step = round(cell_size, 5)
return size, (bbox.min_lon, bbox.min_lat), step
def export_slice(data_slice, value_type, dataset, subset_id, timestamp):
slice_dir = file_utils.slices_dirpath(dataset, subset_id)
filepath = path.join(slice_dir, "{}.raw".format(timestamp))
data_slice.astype(value_type).tofile(filepath)
|
StarcoderdataPython
|
1728241
|
from gbmgeometry.utils.plotting.space_plot import animate_in_space, plot_in_space
from gbmgeometry import PositionInterpolator
from gbmgeometry.utils.plotting.sky_point import balrog_to_skypoints
from gbmgeometry.utils.package_utils import get_path_of_data_file
def test_space_plot(interpolator):
tmin, tmax = interpolator.minmax_time()
plot_in_space(
interpolator,
tmin,
show_detector_pointing=True,
show_moon=True,
show_sun=True,
show_stars=True,
)
def test_space_ani(interpolator):
plot_in_space(
interpolator,
10,
show_detector_pointing=True,
show_moon=True,
show_sun=True,
show_stars=True,
)
def test_point_space_plotting():
pi = PositionInterpolator.from_trigdat(get_path_of_data_file("balrog_trig.fits"))
skypoints = balrog_to_skypoints(
get_path_of_data_file("balrog.fits"),
new_nside=2 ** 5,
cmap="viridis",
as_point=True,
)
def test_ray_space_plotting():
pi = PositionInterpolator.from_trigdat(get_path_of_data_file("balrog_trig.fits"))
skypoints = balrog_to_skypoints(
get_path_of_data_file("balrog.fits"),
new_nside=2 ** 5,
cmap="viridis",
as_point=False,
)
plot_in_space(
pi,
0,
sky_points=skypoints,
show_detector_pointing=True,
show_moon=True,
earth_time="day",
show_stars=True,
)
|
StarcoderdataPython
|
3355115
|
<gh_stars>0
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorsketch utilities.
"""
import numpy as np
import tensorflow.compat.v1 as tf
# String utilities
def count_leading_whitespace(string):
return len(string) - len(string.lstrip(" "))
def shorten(string, num_lines=4):
strings = string.split("\n")
if len(strings) <= num_lines:
return string
head = strings[:num_lines - 2]
mid = " " * count_leading_whitespace(strings[num_lines - 2]) + "...,"
tail = strings[-1]
return "\n".join(head + [mid, tail])
def indent(string, spaces=4):
strings = string.split("\n")
return "\n".join([" " * spaces + string for string in strings])
# Tensor utilities
def pack(x):
if isinstance(x, tuple):
return x
else:
return (x,)
# pylint: disable=syntax-error
def shapes_to_zeros(*maybe_typed_shapes):
tensors = []
for maybe_typed_shape in maybe_typed_shapes:
if elem_isinstance(maybe_typed_shape, int):
tensors.append(tf.zeros(maybe_typed_shape))
else:
shape, dtype = maybe_typed_shape
tensors.append(tf.zeros(shape, dtype))
return tuple(tensors)
# List utilities
def elem_isinstance(lst, cls):
return all([isinstance(x, cls) for x in lst])
# Layer utilities
def compute_fan(kernel):
shape = kernel.shape
receptive_field = np.prod(kernel.shape[:-2]) # returns 1 if kernel is 2D
fan_in = int(receptive_field * shape[-2])
fan_out = int(receptive_field * shape[-1])
return fan_in, fan_out
def compute_out_dims(in_dims, kernel_size, stride,
padding, output_padding,
dilation):
"""Computes the output dimensions of convolution.
The formulas below are based on what Keras does.
Args:
in_dims: number of input dimensions.
kernel_size: size of kernel.
stride: size of stride.
padding: amount of padding on both ends of input.
output_padding: padding adjustment for disambiguating out_dims.
dilation: amount of dilation for convolution.
Returns:
The computed value of output dimensions.
"""
kernel_size = (kernel_size - 1) * dilation + 1
if output_padding is None:
if padding == "same":
out_dims = in_dims * stride
elif padding == "valid":
out_dims = in_dims * stride + max(kernel_size - stride, 0)
else:
if padding == "same":
out_dims = ((in_dims - 1) * stride + output_padding)
elif padding == "valid":
out_dims = ((in_dims - 1) * stride + kernel_size + output_padding)
return out_dims
# Tensor utilities
def assign_moving_average(target, value, momentum):
target.assign(momentum * target + (1 - momentum) * value)
# tf.function utilities
class Function(object):
"""A python function wrapper to support tf.function with resetting.
"""
def __init__(self, python_function):
self.tf_function = tf.function(python_function)
self.python_function = python_function
def reset(self):
self.tf_function = tf.function(self.python_function)
def __call__(self, *args, **kwargs):
return self.tf_function(*args, **kwargs)
def advanced_function(function):
return Function(function)
def reset_tf_function(tf_function):
return tf.function(tf_function.python_function)
|
StarcoderdataPython
|
3396288
|
from django.contrib import admin
from .models import Color
admin.site.register(Color)
|
StarcoderdataPython
|
1768417
|
<reponame>azuline/hey-bro-check-log
"""This module contains the generation functions for dicts of to-match lines
when marking up log files.
"""
import html
import re
from heybrochecklog.shared import format_pattern as fmt_ptn
def eac_track_matches(translation):
"""Generate the list of to-match lines from translations."""
source_one = {
'log4': ['1269', '1270', '1217', '1299', '1227', '1218'],
'good': ['1220', '1281'],
'crc': ['1271', '1272'],
}
source_two = {
'log3': ['1221'],
'badish': ['1330', '1283', '1210', '1211'],
'bad': ['1212', '1213', '1214', '1215', '1216', '1228'],
}
return {
**generate_match_type(translation, source_one),
**generate_match_type(translation, source_two, append='.*'),
}
def xld_track_matches():
"""Return a dictionary containing the XLD matches."""
return {
'full_line': [
['log4', 'Statistics'],
['good', '->Accurately ripped'],
['badish', '->Track not present in AccurateRip database'],
['bad', '->Rip may not be accurate'],
['bad', 'List of damaged sector positions +:'],
['badish', r'\(\d+\) \d{2}:\d{2}:\d{2}'],
['log3', r'\/.+\.(?:[Ff][Ll][Aa][Cc]|[Ww][Aa][Vv]|[Mm][Pp]3|[Aa][Aa][Cc])'],
],
'crc': [r'CRC32 hash \(test run\)', 'CRC32 hash'],
'statistics': {
'bad': [
'Read error',
r'Skipped \(treated as error\)',
'Inconsistency in error sectors',
'Damaged sector count',
],
'badish': [
r'Jitter error \(maybe fixed\)',
'Retry sector count',
r'Edge jitter error \(maybe fixed\)',
r'Atom jitter error \(maybe fixed\)',
r'Drift error \(maybe fixed\)',
r'Dropped bytes error \(maybe fixed\)',
r'Duplicated bytes error \(maybe fixed\)',
],
},
}
def eac_footer_matches(translation):
"""Matches for the EAC footer block."""
source = {
'good': ['1336', '1222', '1225'],
'badish': ['1333', '1334', '1344', '1335'],
'bad': ['1284', '1224'],
'log4 log5': ['1275'],
}
matches = generate_match_type(translation, source)
# AccurateRip stuff
source = {'good': ['1340'], 'badish': ['1339', '1341'], 'bad': ['1342', '1343']}
matches = generate_match_type(translation, source, matches=matches, prepend='.+')
if '1290' in translation:
ar_prepend = r'{} +\d+ +'.format(translation['1290'])
source = {'good': ['1277'], 'badish': ['1279'], 'bad': ['1276', '1278']}
matches = generate_match_type(
translation, source, matches=matches, prepend=ar_prepend, append='.+'
)
# Checksum stuff
if '1325' in translation:
matches['good'].append('==== {} [0-9A-F]+ ===='.format(translation['1325']))
# EAC HAS A TYPO FOR "NO ERRORS OCCURED" WTF
if translation['1222'] == 'No errors occurred':
matches['good'].append('No errors occured')
return matches
def xld_footer_matches():
"""Matches for the XLD footer block."""
return {
'good': ['No errors occurred', 'End of status report'],
'badish': ['Some inconsistencies found'],
}
def xld_ar_summary():
"""Matches for the XLD AccurateRip Summary block."""
return {
'good': [
r'Track \d+ : OK.+',
html.escape('->All tracks accurately ripped.*'),
],
'badish': [
r'Track \d+ : NG.+',
'Disc not found in AccurateRip DB',
r'->\d+ tracks? accurately ripped, \d+ tracks? not',
],
'log4 log5': ['AccurateRip Summary'],
}
def generate_match_type(translation, source, matches=None, prepend='', append=''):
"""Function to generate the match types."""
matches = {} if not matches else matches
for match_type in source.keys():
if match_type not in matches:
matches[match_type] = []
for line_id in source[match_type]:
if line_id in translation:
match = prepend + re_paren(translation[line_id]) + append
matches[match_type].append(html.escape(match))
return matches
def re_paren(line):
"""Regex the comma. Quality docstring."""
line = re.sub(r'\(', r'\(', fmt_ptn(line))
return re.sub(r'\)', r'\)', line)
|
StarcoderdataPython
|
1668466
|
<reponame>rdelosreyes/myctapipe
import sys
import argparse
from matplotlib import colors, pyplot as plt
import numpy as np
from ctapipe.io.hessio import hessio_event_source
from pyhessio import *
from ctapipe.core import Container
from ctapipe.io.containers import RawData, CalibratedCameraData
from ctapipe import visualization, io
from astropy import units as u
from ctapipe.calib.camera.mc import *
fig = plt.figure(figsize=(16, 7))
cmaps = [plt.cm.jet, plt.cm.winter,
plt.cm.ocean, plt.cm.bone, plt.cm.gist_earth, plt.cm.hot,
plt.cm.cool, plt.cm.coolwarm]
def display_telescope(event, tel_id):
global fig
ntels = len(event.dl1.tels_with_data)
fig.clear()
plt.suptitle("EVENT {} {:.1e} TeV @({:.1f},{:.1f})deg @{:.1f} m".format(
event.dl1.event_id, get_mc_shower_energy(),
get_mc_shower_altitude(), get_mc_shower_azimuth(),
np.sqrt(pow(get_mc_event_xcore(), 2) +
pow(get_mc_event_ycore(), 2))))
print("\t draw cam {}...".format(tel_id))
x, y = event.meta.pixel_pos[tel_id]
geom = io.CameraGeometry.guess(x * u.m, y * u.m)
npads = 1
# Only create two pads if there is timing information extracted
# from the calibration
if not event.dl1.tel[tel_id].tom is None:
npads = 2
ax = plt.subplot(1, npads, npads-1)
disp = visualization.CameraDisplay(geom, ax=ax,
title="CT{0}".format(tel_id))
disp.pixels.set_antialiaseds(False)
disp.autoupdate = False
disp.pixels.set_cmap('seismic')
chan = 0
signals = event.dl1.tel[tel_id].pe_charge
disp.image = signals
disp.add_colorbar()
if npads == 2:
ax = plt.subplot(1, npads, npads)
disp = visualization.CameraDisplay(geom,
ax=ax,
title="CT{0}".format(tel_id))
disp.pixels.set_antialiaseds(False)
disp.autoupdate = False
disp.pixels.set_cmap('gnuplot')
chan = 0
disp.image = event.dl1.tel[tel_id].tom
disp.add_colorbar()
if __debug__:
print("All sum = %.3f\n" % sum(event.dl1.tel[tel_id].pe_charge))
def camera_calibration(filename, parameters, disp_args, level):
"""
Parameters
----------
filename MC filename with raw data (in ADC samples)
parameters Parameters to be passed to the different calibration functions
(described in each function separately inside mc.py)
disp_args Either: per telescope per event or
all telescopes of the event (currently dissabled)
level Output information of the calibration level results
Returns
-------
A display (see function display_telescope)
"""
TAG = sys._getframe().f_code.co_name+">"
# Load dl1 container
container = Container("calibrated_hessio_container")
container.add_item("dl1", RawData())
container.meta.add_item('pixel_pos', dict())
# loop over all events, all telescopes and all channels and call
# the calc_peds function defined above to do some work:
nt = 0
for event in hessio_event_source(filename):
nt = nt+1
# Fill DL1 container headers information. Clear also telescope info.
container.dl1.run_id = event.dl0.run_id
container.dl1.event_id = event.dl0.event_id
container.dl1.tel = dict() # clear the previous telescopes
container.dl1.tels_with_data = event.dl0.tels_with_data
if __debug__:
print(TAG, container.dl1.run_id, "#%d" % nt,
container.dl1.event_id,
container.dl1.tels_with_data,
"%.3e TeV @ (%.0f,%.0f)deg @ %.3f m" %
(get_mc_shower_energy(), get_mc_shower_altitude(),
get_mc_shower_azimuth(),
np.sqrt(pow(get_mc_event_xcore(), 2) +
pow(get_mc_event_ycore(), 2))))
for telid in event.dl0.tels_with_data:
print(TAG, "Calibrating.. CT%d\n" % telid)
# Get per telescope the camera geometry
x, y = event.meta.pixel_pos[telid]
geom = io.CameraGeometry.guess(x * u.m, y * u.m)
# Get the calibration data sets (pedestals and single-pe)
ped = get_pedestal(telid)
calib = get_calibration(telid)
# Integrate pixels traces and substract pedestal
# See pixel_integration_mc function documentation in mc.py
# for the different algorithms options
int_adc_pix, peak_adc_pix = pixel_integration_mc(event,
ped, telid,
parameters)
# Convert integrated ADC counts into p.e.
# selecting also the HG/LG channel (currently hard-coded)
pe_pix = calibrate_amplitude_mc(int_adc_pix, calib,
telid, parameters)
# Including per telescope metadata in the DL1 container
if telid not in container.meta.pixel_pos:
container.meta.pixel_pos[telid] = event.meta.pixel_pos[telid]
container.dl1.tels_with_data = event.dl0.tels_with_data
container.dl1.tel[telid] = CalibratedCameraData(telid)
container.dl1.tel[telid].pe_charge = np.array(pe_pix)
container.dl1.tel[telid].tom = np.array(peak_adc_pix[0])
# FOR THE CTA USERS:
# From here you can include your code.
# It should take as input the last data level calculated here (DL1)
# or call reconstruction algorithms (reco module) to be called.
# For example: you could ask to calculate the tail cuts cleaning
# using the tailcuts_clean in reco/cleaning.py module
#
# if 'tail_cuts' in parameters:
# clean_mask = tailcuts_clean(geom,
# image=np.array(pe_pix),pedvars=1,
# picture_thresh=parameters['tail_cuts'][0],
# boundary_thresh=parameters['tail_cuts'][1])
# container.dl1.tel[telid].pe_charge = np.array(pe_pix) *
# np.array(clean_mask)
# container.dl1.tel[telid].tom = np.array(peak_adc_pix[0]) *
# np.array(clean_mask)
#
sys.stdout.flush()
# Display
if 'event' in disp_args:
ello = input("See evt. %d?<[n]/y/q> " % container.dl1.event_id)
if ello == 'y':
if 'telescope' in disp_args:
for telid in container.dl1.tels_with_data:
ello = input(
"See telescope/evt. %d?[CT%d]<[n]/y/q/e> " %
(container.dl1.event_id, telid))
if ello == 'y':
display_telescope(container, telid)
plt.pause(0.1)
elif ello == 'q':
break
elif ello == 'e':
return None
else:
continue
else:
plt.pause(0.1)
elif ello == 'q':
return None
if __name__ == '__main__':
TAG = sys._getframe().f_code.co_name+">"
# Declare and parse command line option
parser = argparse.ArgumentParser(
description='Tel_id, pixel id and number of event to compute.')
parser.add_argument('--f', dest='filename',
required=True, help='filename MC file name')
args = parser.parse_args()
plt.show(block=False)
# Function description of camera_calibration options, given here
# Integrator: samples integration algorithm (equivalent to hessioxxx
# option --integration-sheme)
# -options: full_integration,
# simple_integration,
# global_peak_integration,
# local_peak_integration,
# nb_peak_integration
# nsum: Number of samples to sum up (is reduced if exceeding available
# length). (equivalent to first number in
# hessioxxx option --integration-window)
# nskip: Number of initial samples skipped (adapted such that interval
# fits into what is available). Start the integration a number of
# samples before the peak. (equivalent to second number in
# hessioxxx option --integration-window)
# sigamp: Amplitude in ADC counts [igain] above pedestal at which a
# signal is considered as significant (separate for high gain/low gain).
# (equivalent to hessioxxx option --integration-threshold)
# clip_amp: Amplitude in p.e. above which the signal is clipped.
# (equivalent to hessioxxx option --clip_pixel_amplitude (default 0))
# lwt: Weight of the local pixel (0: peak from neighbours only,
# 1: local pixel counts as much as any neighbour).
# (option in pixel integration function in hessioxxx)
# display: optionaly you can display events (all telescopes present on it)
# or per telescope per event. By default the last one.
# The first one is currently deprecated.
# level: data level from which information is displayed.
# The next call to camera_calibration would be equivalent of producing
# DST0 MC file using:
# hessioxxx/bin/read_hess -r 4 -u --integration-scheme 4
# --integration-window 7, 3 --integration-threshold 2, 4
# --dst-level 0 <MC_prod2_filename>
calibrated_camera = camera_calibration(
args.filename,
parameters={"integrator": "nb_peak_integration",
"nsum": 7,
"nskip": 3,
"sigamp": [2, 4],
"clip_amp": 0,
"lwt": 0},
disp_args={'event', 'telescope'}, level=1)
sys.stdout.flush()
print(TAG, "Closing file...")
close_file()
|
StarcoderdataPython
|
64481
|
def fib(n):
"return nth term of Fibonacci sequence"
a, b = 0, 1
i = 0
while i<n:
a, b = b, a+b
i += 1
return b
def linear_recurrence(n, (a,b)=(2,0), (u0, u1)=(1,1)):
"""return nth term of the sequence defined by the
linear recurrence
u(n+2) = a*u(n+1) + b*u(n)"""
i = 0
u, v = u0, u1
while i<n:
w = a*v + b*u
u, v = v, w
i +=1
return w
|
StarcoderdataPython
|
3297968
|
<gh_stars>0
from unittest import TestCase
from surfactant_example.contributed_ui.surfactant_contributed_ui import (
SURFACTANT_PLUGIN_ID
)
from surfactant_example.contributed_ui.templates import (
ExecutionLayerTemplate, IngredientTemplate)
from surfactant_example.data.gromacs_database import GromacsDatabase
class TestExecutionLayerTemplate(TestCase):
def setUp(self):
self.gromacs_database = GromacsDatabase()
self.layer_template = ExecutionLayerTemplate(
plugin_id=SURFACTANT_PLUGIN_ID,
)
def test___init__(self):
self.assertEqual(
"force.bdss.surfactant.plugin.example.v0.factory",
self.layer_template.id
)
self.assertEqual(3, len(self.layer_template.ingredient_templates))
self.assertEqual(3, self.layer_template.n_ingredients)
self.assertEqual(
3, len(self.layer_template.surfactant_template_list)
)
self.assertEqual(
3, len(self.layer_template.secondary_surfactant_list)
)
self.assertEqual(
self.layer_template.surfactant_template_list[0],
self.layer_template.primary_surfactant_template
)
self.assertEqual(
self.layer_template.empty_surfactant_template,
self.layer_template.secondary_surfactant_template
)
self.assertEqual(
2,
len(self.layer_template.primary_surfactant_template
.ingredient.fragments)
)
self.assertIsNone(
self.layer_template.secondary_surfactant_template.ingredient
)
self.assertEqual(
2, len(self.layer_template.salt_template.ingredient.fragments)
)
self.assertEqual(
1, len(self.layer_template.solvent_template.ingredient.fragments)
)
def test_id(self):
self.layer_template.plugin_id = 'force.bdss.surfactant.plugin.v1'
new_id = "force.bdss.surfactant.plugin.v1.factory"
self.assertEqual(new_id, self.layer_template.id)
self.assertEqual(
new_id, self.layer_template.primary_surfactant_template.plugin_id
)
self.assertEqual(
new_id, self.layer_template.secondary_surfactant_template.plugin_id
)
self.assertEqual(
new_id, self.layer_template.salt_template.plugin_id
)
self.assertEqual(
new_id, self.layer_template.solvent_template.plugin_id
)
def test_secondary_surfactant_list(self):
dpc_template = self.layer_template.secondary_surfactant_list[-1]
self.layer_template.secondary_surfactant_template = (
dpc_template
)
self.assertEqual(4, self.layer_template.n_ingredients)
self.layer_template.primary_surfactant_template = (
dpc_template
)
self.assertNotIn(
dpc_template,
self.layer_template.secondary_surfactant_list
)
self.assertEqual(
self.layer_template.empty_surfactant_template,
self.layer_template.secondary_surfactant_template
)
self.assertEqual(3, self.layer_template.n_ingredients)
def test_create_database_templates(self):
templates = self.layer_template.create_database_templates()
self.assertEqual(3, len(templates))
names = ['Sodium Dodecyl Sulfate',
'Sodium Chloride',
'Water']
input_slots = [[], [], []]
output_slots = [[{'name': 'sodium_dodecyl_sulfate_ingredient'}],
[{'name': 'sodium_chloride_ingredient'}],
[{'name': 'water_ingredient'}]]
for index, template in enumerate(templates):
keys = list(template.keys())
self.assertListEqual(
['id', 'model_data'], keys
)
self.assertEqual(
"force.bdss.surfactant.plugin.example.v0.factory.database",
template['id']
)
self.assertEqual(
'Model', template['model_data']['input_mode']
)
self.assertEqual(
names[index], template['model_data']['name']
)
self.assertEqual(
input_slots[index],
template['model_data']['input_slot_info']
)
self.assertEqual(
output_slots[index],
template['model_data']['output_slot_info']
)
def test_create_formulation_template(self):
template = self.layer_template.create_formulation_template()
keys = list(template.keys())
self.assertListEqual(
['id', 'model_data'], keys
)
self.assertEqual(
"force.bdss.surfactant.plugin.example.v0.factory.formulation",
template['id']
)
self.assertEqual(
{
"n_surfactants": 1,
"input_slot_info": [
{"name": "sodium_dodecyl_sulfate_ingredient"},
{"name": "sodium_dodecyl_sulfate_conc"},
{"name": "sodium_chloride_ingredient"},
{"name": "sodium_chloride_conc"},
{"name": "water_ingredient"}
],
"output_slot_info": [{"name": "formulation"}]
},
template['model_data']
)
def test_create_simulation_template(self):
template = self.layer_template.create_simulation_template()
keys = list(template.keys())
self.assertListEqual(
['id', 'model_data'], keys
)
self.assertEqual(
"force.bdss.surfactant.plugin.example.v0.factory.simulation",
template['id']
)
self.assertEqual(
{
"name": "surfactant_experiment",
"size": 500,
"dry_run": False,
"input_slot_info": [{"name": "formulation"}],
"output_slot_info": [{"name": "results"}]
},
template['model_data']
)
def test_create_viscosity_template(self):
template = self.layer_template.create_viscosity_template()
keys = list(template.keys())
self.assertListEqual(
['id', 'model_data'], keys
)
self.assertEqual(
"force.bdss.surfactant.plugin.example.v0.factory.viscosity",
template['id']
)
self.assertEqual(
{
"input_slot_info": [{"name": "results"}],
"output_slot_info": [{"name": "viscosity"}]
},
template['model_data']
)
def test_create_micelle_template(self):
template = self.layer_template.create_micelle_template()
keys = list(template.keys())
self.assertListEqual(
['id', 'model_data'], keys
)
self.assertEqual(
"force.bdss.surfactant.plugin.example.v0.factory.micelle",
template['id']
)
self.assertEqual(
{
'method': 'atomic',
"fragment_symbols": ['SDS'],
"r_thresh": 0.98,
"input_slot_info": [{'name': 'formulation'},
{"name": "results"}],
"output_slot_info": [{"name": "micelle"}]
},
template['model_data']
)
dpc_template = self.layer_template.secondary_surfactant_list[1]
self.layer_template.secondary_surfactant_template = (
dpc_template
)
template = self.layer_template.create_micelle_template()
self.assertEqual(
{
'method': 'atomic',
"fragment_symbols": ['SDS', 'DPC'],
"r_thresh": 0.98,
"input_slot_info": [{'name': 'formulation'},
{"name": "results"}],
"output_slot_info": [{"name": "micelle"}]
},
template['model_data']
)
def test_create_cost_template(self):
template = self.layer_template.create_cost_template()
keys = list(template.keys())
self.assertListEqual(
['id', 'model_data'], keys
)
self.assertEqual(
"force.bdss.surfactant.plugin.example.v0.factory.cost",
template['id']
)
self.assertEqual(
{
"input_slot_info": [{"name": "formulation"}],
"output_slot_info": [{"name": "cost"}]
},
template['model_data']
)
def test_create_template(self):
template = self.layer_template.create_template()
factory_list = [
['database', 'database', 'database'],
['formulation'],
['simulation'],
['micelle', 'cost']
]
self.assertEqual(len(factory_list), len(template))
for factories, layer_template in zip(factory_list, template):
self.assertEqual(
len(factories), len(layer_template['data_sources']))
for index, factory in enumerate(factories):
keys = list(layer_template['data_sources'][index].keys())
self.assertListEqual(
['id', 'model_data'], keys
)
def test_create_template_secondary_surfactant(self):
ingredient_names = [
'Dodecyl Phosphocholine',
'Sodium Laureth Sulfate']
variable_names = [
'dodecyl_phosphocholine',
'sodium_laureth_sulfate']
for ingredient_name, variable_name in zip(
ingredient_names, variable_names):
ingredient = self.gromacs_database.get_ingredient(
ingredient_name)
ingredient_template = IngredientTemplate(
plugin_id=self.layer_template.id,
ingredient=ingredient)
self.layer_template.surfactant_template_list.append(
ingredient_template)
self.layer_template.secondary_surfactant_template = (
self.layer_template.surfactant_template_list[-1]
)
self.assertEqual(4, self.layer_template.n_ingredients)
template = self.layer_template.create_template()
first_layer_ds = template[0]['data_sources']
second_layer_ds = template[1]['data_sources']
self.assertEqual(4, len(first_layer_ds))
self.assertEqual(
[{"name": f"{variable_name}_ingredient"}],
first_layer_ds[1]['model_data']['output_slot_info'])
self.assertEqual(1, len(second_layer_ds))
self.assertIn(
{"name": f"{variable_name}_ingredient"},
second_layer_ds[0]['model_data']['input_slot_info'])
self.assertIn(
{"name": f"{variable_name}_conc"},
second_layer_ds[0]['model_data']['input_slot_info'])
|
StarcoderdataPython
|
4801783
|
<filename>renderer/__init__.py<gh_stars>10-100
__version__ = '0.1.3'
default_app_config = 'renderer.apps.RenderAppConfig'
|
StarcoderdataPython
|
54319
|
<filename>czsc/utils/__init__.py<gh_stars>1-10
# coding: utf-8
from .echarts_plot import kline_pro, heat_map
from .ta import KDJ, MACD, EMA, SMA
from .io import read_pkl, save_pkl, read_json, save_json
from .log import create_logger
from .word_writer import WordWriter
def x_round(x: [float, int], digit=4):
"""用去尾法截断小数
:param x: 数字
:param digit: 保留小数位数
:return:
"""
if isinstance(x, int):
return x
try:
digit_ = pow(10, digit)
x = int(x * digit_) / digit_
except:
print(f"x_round error: x = {x}")
return x
|
StarcoderdataPython
|
95131
|
<filename>niftynet/layer/rand_flip.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import warnings
import numpy as np
from niftynet.layer.base_layer import RandomisedLayer
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", RuntimeWarning)
class RandomFlipLayer(RandomisedLayer):
"""
Add a random flipping layer as pre-processing.
"""
def __init__(self,
flip_axes,
flip_probability=0.5,
name='random_flip'):
"""
:param flip_axes: a list of indices over which to flip
:param flip_probability: the probability of performing the flip
(default = 0.5)
:param name:
"""
super(RandomFlipLayer, self).__init__(name=name)
self._flip_axes = flip_axes
self._flip_probability = flip_probability
self._rand_flip = None
def randomise(self, spatial_rank=3):
spatial_rank = int(np.floor(spatial_rank))
self._rand_flip = np.random.random(
size=spatial_rank) < self._flip_probability
def _apply_transformation(self, image):
assert self._rand_flip is not None, "Flip is unset -- Error!"
for axis_number, do_flip in enumerate(self._rand_flip):
if axis_number in self._flip_axes and do_flip:
image = np.flip(image, axis=axis_number)
return image
def layer_op(self, inputs, interp_orders=None, *args, **kwargs):
if inputs is None:
return inputs
if isinstance(inputs, dict) and isinstance(interp_orders, dict):
for (field, image_data) in inputs.items():
assert (all([i < 0 for i in interp_orders[field]]) or
all([i >= 0 for i in interp_orders[field]])), \
'Cannot combine interpolatable and non-interpolatable data'
if interp_orders[field][0]<0:
continue
inputs[field] = self._apply_transformation(image_data)
else:
inputs = self._apply_transformation(inputs)
return inputs
|
StarcoderdataPython
|
1795728
|
<filename>microsim/utilities.py
# Contains some useful utility functionality
import os
from urllib.request import urlopen
import requests
import tarfile
import pandas as pd
from typing import List
from tqdm import tqdm
from microsim.column_names import ColumnNames
class Optimise:
"""
Functions to optimise the memory use of pandas dataframes.
From https://medium.com/bigdatarepublic/advanced-pandas-optimize-speed-and-memory-a654b53be6c2
"""
@staticmethod
def optimize(df: pd.DataFrame, datetime_features: List[str] = []):
return Optimise._optimize_floats(Optimise._optimize_ints(Optimise._optimize_objects(df, datetime_features)))
@staticmethod
def _optimize_floats(df: pd.DataFrame) -> pd.DataFrame:
floats = df.select_dtypes(include=['float64']).columns.tolist()
df[floats] = df[floats].apply(pd.to_numeric, downcast='float')
return df
@staticmethod
def _optimize_ints(df: pd.DataFrame) -> pd.DataFrame:
ints = df.select_dtypes(include=['int64']).columns.tolist()
df[ints] = df[ints].apply(pd.to_numeric, downcast='integer')
return df
@staticmethod
def _optimize_objects(df: pd.DataFrame, datetime_features: List[str]) -> pd.DataFrame:
for col in df.select_dtypes(include=['object']):
if col not in datetime_features:
num_unique_values = len(df[col].unique())
num_total_values = len(df[col])
if float(num_unique_values) / num_total_values < 0.5:
df[col] = df[col].astype('category')
else:
df[col] = pd.to_datetime(df[col])
return df
def check_durations_sum_to_1(individuals, activities):
total_duration = [0.0] * len(individuals) # Add up all the different activity durations
for activity in activities:
total_duration = total_duration + individuals.loc[:, f"{activity}{ColumnNames.ACTIVITY_DURATION}"]
if not (total_duration.apply(lambda x: round(x, 5)) == 1.0).all():
print("Some activity durations don't sum to 1", flush=True)
print(total_duration[total_duration != 1.0], flush=True)
raise Exception("Some activity durations don't sum to 1")
# data fetching functions
def download_data(url : str):
"""Download data utility function
Args:
url (str, optional): A url to an archive file. Defaults to "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz".
"""
response = requests.get(url, stream=True)
# specify target_path as name of tarfile downloaded by splitting url
# and retrieving last item
target_path = os.path.join(url.split('/')[-1])
# Create a progress bar
file_size = int(urlopen(url).info().get('Content-Length', -1))
pbar = tqdm(total=file_size, initial=0, unit='B', unit_scale=True, desc=url.split('/')[-1])
if response.status_code == 200:
with open(target_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return target_path
def unpack_data(archive : str):
"""unpack tar data archive
Args:
archive (str): A string directory path to archive file using
"""
tar_file = tarfile.open(archive)
tar_file.extractall(".")
def data_setup(url : str = "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz"):
"""A wrapper function for downloading and unpacking Azure stored devon_data
Args:
archive (str): A string directory path to archive file using
url (str, optional): A url to an archive file. Defaults to "https://ramp0storage.blob.core.windows.net/rampdata/devon_data.tar.gz".
"""
archive_file = download_data(url = url)
unpack_data(archive = archive_file)
|
StarcoderdataPython
|
1606964
|
<reponame>line-mind/error_solver
import os
import time
import pytest
from ..data import get_file_path
from .error_solver import *
def test_repr():
path = get_file_path('wire_load.ef')
solver = ErrorSolver.from_file(path)
repr(solver)
def test_from_file():
path = get_file_path('wire_load.ef')
solver = ErrorSolver.from_file(path)
assert len(solver.get_equations()) == 4
assert len(solver.get_partials()) == 4
def test_get_equations():
path = get_file_path('wire_load.ef')
solver = ErrorSolver.from_file(path)
assert len(solver.get_equations('wind_pressure')) == 3
assert len(solver.get_equations('wind_velocity')) == 4
def test_get_partials():
path = get_file_path('wire_load.ef')
solver = ErrorSolver.from_file(path)
assert len(solver.get_partials('wind_pressure')) == 3
assert len(solver.get_partials('wind_velocity')) == 4
def test_check_determinancy1():
path = get_file_path('cylinder.ef')
solver = ErrorSolver.from_file(path)
values = {
'height': 12,
'radius': 5,
'area': 78.539816,
'volume': 942.477796
}
errors = {
'height': 0.05,
}
with pytest.raises(ValueError):
solver.solve(values, errors)
def test_check_determinancy2():
path = get_file_path('cylinder.ef')
solver = ErrorSolver.from_file(path)
values = {
'height': 12,
'radius': 5,
'area': 78.539816,
'volume': 942.477796
}
errors = {
'height': 0.05,
'radius': 0.05,
'area': 0.05,
}
with pytest.raises(ValueError):
solver.solve(values, errors)
def test_solve1():
path = get_file_path('cylinder.ef')
solver = ErrorSolver.from_file(path)
values = {
'height': 12,
'radius': 5,
'area': 78.539816,
'volume': 942.477796
}
errors = {
'height': 0.05,
'radius': 0.05
}
df = solver.solve(values, errors)
assert pytest.approx(df['value']['volume'], 0.01) == 942.477796
assert pytest.approx(df['error']['volume'], 0.01) == 22.78
assert pytest.approx(df['pct_error']['volume'], 0.01) == 2.42
assert df['is_calc']['volume'] == True
def test_solve2():
path = get_file_path('cylinder.ef')
solver = ErrorSolver.from_file(path)
values = {
'height': 12,
'radius': 5,
'area': 78.539816,
'volume': 942.477796
}
errors = {
'height': 0.05,
'radius': 0.05
}
const = {
'height': 0
}
df = solver.solve(values, errors, const)
assert pytest.approx(df['value']['volume'], 0.01) == 942.477796
assert pytest.approx(df['error']['volume'], 0.01) == 22.78
assert pytest.approx(df['pct_error']['volume'], 0.01) == 2.42
assert df['is_calc']['volume'] == True
def test_write_python():
path = get_file_path('wire_load.ef')
solver = ErrorSolver.from_file(path)
path = get_file_path('_wire_load_test_mod.py')
solver.write_python(path)
time.sleep(3)
assert os.path.exists(path)
os.remove(path)
def test_bad_values():
path = get_file_path('cylinder.ef')
solver = ErrorSolver.from_file(path)
values = {
'height': 12,
'radius': 5,
'area': 78.539816,
'volume': 900
}
errors = {
'height': 0.05,
'radius': 0.05
}
with pytest.raises(ValueError):
solver.solve(values, errors)
def test_missing_values():
path = get_file_path('cylinder.ef')
solver = ErrorSolver.from_file(path)
values = {
'height': 12,
'radius': 5,
'area': 78.539816,
}
errors = {
'height': 0.05,
'radius': 0.05
}
with pytest.raises(ValueError):
solver.solve(values, errors)
def test_restricted_symbols():
equations = [
'V = I * R'
]
values = {
'I': 1,
'R': 2,
'V': 2,
}
errors = {
'I': 0.05,
'R': 0.05,
}
solver = ErrorSolver(equations)
with pytest.raises(ValueError):
solver.solve(values, errors)
|
StarcoderdataPython
|
86686
|
import logging
import requests
import json
from itertools import chain, islice
from datetime import datetime
from google.cloud import datastore
from flask import Flask, request
from requests_oauthlib import OAuth1Session
from collections import namedtuple
app = Flask(__name__)
log = logging.getLogger('werkzeug')
Sensor = namedtuple('Sensor', ['id', 'name', 'max_timestamp'])
def get_sensors(telldus):
url = 'https://api.telldus.com/json/sensors/list'
res = telldus.get(url)
if res.status_code != 200:
raise Exception('failed', res.content)
sensors = [
get_sensor(s)
for s in json.loads(res.content)['sensor']
]
return sensors
def get_sensor(s):
sensor_id = int(s['id'])
sensor_name = s['name']
ds = datastore.Client()
entity = ds.get(ds.key('sensors', str(sensor_id)))
sensor = Sensor(sensor_id, sensor_name, entity.get('max_timestamp', 0))
log.info("found sensor %s", sensor)
return sensor
def get_sensor_values(telldus, sensor):
url = 'https://api.telldus.com/json/sensor/history'
log.info('fetching sensor values for %d from %d', sensor.id, sensor.max_timestamp)
res = telldus.get(url, params={'id': sensor.id, 'from': sensor.max_timestamp})
if res.status_code != 200:
raise Exception('failed', res.content)
events = json.loads(res.content)['history']
for event in events:
data_points = event['data']
for data_point in data_points:
data_point['ts'] = event['ts']
data_point['sensor'] = sensor.id
yield data_point
def prepare_sensor_value(data_point):
sensor = data_point['sensor']
timestamp = datetime.utcfromtimestamp(data_point['ts'])
name = data_point['name']
typ = {'temp': u'temperature'}.get(name, name)
value = data_point['value']
key = '%d-%s-%d' % (sensor, typ, data_point['ts'])
return key, {
'sensor': sensor,
'timestamp': timestamp,
'type': typ,
'value': float(value)
}
def update_sync_time(sensor, max_timestamp, last_sync):
ds = datastore.Client()
ds_key = ds.key('sensors', str(sensor.id))
entity = datastore.Entity(key=ds_key)
value = {
'last_sync': last_sync,
'max_timestamp': max_timestamp,
'name': sensor.name,
'id': sensor.id
}
entity.update(value)
ds.put(entity)
return Sensor(sensor.id, sensor.name, max_timestamp)
def store_sensor_values(sensor, rows):
ds = datastore.Client()
created_at = datetime.utcnow()
max_ts = sensor.max_timestamp
entities = []
for row in rows:
key, value = prepare_sensor_value(row)
max_ts = max(max_ts, row['ts'])
ds_key = ds.key('sensor_values', key)
entity = datastore.Entity(key=ds_key)
value['created_at'] = created_at
entity.update(value)
entities.append(entity)
for chunk in chunks(entities, size=500):
ds.put_multi(chunk)
update_sync_time(sensor, max_ts, created_at)
return len(entities)
def chunks(iterable, size=10):
iterator = iter(iterable)
for first in iterator:
yield chain([first], islice(iterator, size - 1))
def get_config():
ds = datastore.Client()
client_key = ds.get(ds.key('settings', 'CLIENT_KEY'))['value']
client_secret = ds.get(ds.key('settings', 'CLIENT_SECRET'))['value']
resource_owner_key = ds.get(
ds.key('settings', 'RESOURCE_OWNER_KEY'))['value']
resource_owner_secret = ds.get(
ds.key('settings', 'RESOURCE_OWNER_SECRET'))['value']
return client_key, client_secret, resource_owner_key, resource_owner_secret
@app.route('/sync', methods=['GET'])
def sync():
client_key, client_secret, resource_owner_key, resource_owner_secret = get_config(
)
telldus = OAuth1Session(
client_key,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret)
sensors = get_sensors(telldus)
stored_total = 0
for sensor in sensors:
stored = store_sensor_values(sensor, get_sensor_values(telldus, sensor))
stored_total += stored
log.info('synced %d values for sensor %d', stored, sensor.id)
log.info('synced %d values in total', stored_total)
return 'synced %d sensor values' % stored_total, 200
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
|
StarcoderdataPython
|
1678807
|
<reponame>goomhow/stock-manage-xgboost
import pandas as pd
from sklearn import metrics
from sklearn.externals import joblib
from xgboost.sklearn import XGBClassifier
import os
import time
from datetime import datetime
rm_col= ['ACC_TYPE',
'ACT_FLAG',
'AVG_MON_AMT_A',
'AVG_MON_AMT_B',
'AVG_MON_AMT_C',
'BILL_OWE_AMT_A',
'BILL_OWE_AMT_C',
'BIL_FLAG',
'BROADBAND_TICKET_FLAG',
'CHARGE_FT_BEFORE_B',
'CYCLE_CHARGE_A',
'CYCLE_CHARGE_B',
'DOWN_HDAY_HH15_19_BRD_BND_FLUX_A',
'DOWN_HDAY_HH15_19_BRD_BND_FLUX_B',
'DOWN_HDAY_HH15_19_BRD_BND_FLUX_C',
'DOWN_HDAY_HH19_22_BRD_BND_FLUX_A',
'DOWN_HDAY_HH19_22_BRD_BND_FLUX_B',
'DOWN_HDAY_HH19_22_BRD_BND_FLUX_C',
'DOWN_HDAY_HH9_15_BRD_BND_FLUX_A',
'DOWN_HDAY_HH9_15_BRD_BND_FLUX_B',
'DOWN_HDAY_HH9_15_BRD_BND_FLUX_C',
'DOWN_WDAY_HH15_19_BRD_BND_FLUX_A',
'DOWN_WDAY_HH15_19_BRD_BND_FLUX_B',
'DOWN_WDAY_HH15_19_BRD_BND_FLUX_C',
'DOWN_WDAY_HH19_22_BRD_BND_FLUX_A',
'DOWN_WDAY_HH19_22_BRD_BND_FLUX_B',
'DOWN_WDAY_HH19_22_BRD_BND_FLUX_C',
'DOWN_WDAY_HH9_15_BRD_BND_FLUX_C',
'FIN_OWE_AMT_A',
'FIN_OWE_AMT_C',
'HDAY_BRD_BND_DAYS_A',
'HDAY_BRD_BND_DAYS_B',
'HDAY_HH15_19_BRD_BND_CNT_A',
'HDAY_HH15_19_BRD_BND_CNT_B',
'HDAY_HH15_19_BRD_BND_CNT_C',
'HDAY_HH15_19_BRD_BND_DUR_A',
'HDAY_HH15_19_BRD_BND_DUR_B',
'HDAY_HH15_19_BRD_BND_DUR_C',
'HDAY_HH19_22_BRD_BND_CNT_A',
'HDAY_HH19_22_BRD_BND_CNT_B',
'HDAY_HH19_22_BRD_BND_CNT_C',
'HDAY_HH19_22_BRD_BND_DUR_A',
'HDAY_HH19_22_BRD_BND_DUR_B',
'HDAY_HH19_22_BRD_BND_DUR_C',
'HDAY_HH9_15_BRD_BND_CNT_A',
'HDAY_HH9_15_BRD_BND_CNT_B',
'HDAY_HH9_15_BRD_BND_CNT_C',
'HDAY_HH9_15_BRD_BND_DUR_A',
'HDAY_HH9_15_BRD_BND_DUR_B',
'HDAY_HH9_15_BRD_BND_DUR_C',
'OWE_AMT_A',
'OWE_AMT_B',
'OWE_AMT_C',
'OWE_DUR_A',
'OWE_DUR_B',
'PAY_FLAG',
'PRTL_AMT_A',
'PRTL_AMT_B',
'PRTL_MONS',
'REF_TYPE',
'STOP_DUR',
'UNIT_CHARGE_A',
'UP_HDAY_HH15_19_BRD_BND_FLUX_A',
'UP_HDAY_HH15_19_BRD_BND_FLUX_B',
'UP_HDAY_HH15_19_BRD_BND_FLUX_C',
'UP_HDAY_HH19_22_BRD_BND_FLUX_A',
'UP_HDAY_HH19_22_BRD_BND_FLUX_B',
'UP_HDAY_HH19_22_BRD_BND_FLUX_C',
'UP_HDAY_HH9_15_BRD_BND_FLUX_A',
'UP_HDAY_HH9_15_BRD_BND_FLUX_B',
'UP_HDAY_HH9_15_BRD_BND_FLUX_C',
'UP_WDAY_HH15_19_BRD_BND_FLUX_B',
'UP_WDAY_HH15_19_BRD_BND_FLUX_C',
'UP_WDAY_HH19_22_BRD_BND_FLUX_A',
'UP_WDAY_HH19_22_BRD_BND_FLUX_B',
'UP_WDAY_HH19_22_BRD_BND_FLUX_C',
'UP_WDAY_HH9_15_BRD_BND_FLUX_C',
'USER_TYPE_ID',
'VIP_FLAG',
'WDAY_HH15_19_BRD_BND_CNT_A',
'WDAY_HH15_19_BRD_BND_CNT_B',
'WDAY_HH15_19_BRD_BND_CNT_C',
'WDAY_HH19_22_BRD_BND_CNT_A',
'WDAY_HH19_22_BRD_BND_CNT_B',
'WDAY_HH19_22_BRD_BND_CNT_C',
'WDAY_HH19_22_BRD_BND_DUR_A',
'WDAY_HH19_22_BRD_BND_DUR_B',
'WDAY_HH19_22_BRD_BND_DUR_C',
'WDAY_HH9_15_BRD_BND_CNT_A',
'WDAY_HH9_15_BRD_BND_CNT_B']
def running_time(func):
def wrapper(**param):
startTime = time.time()
x = func(**param)
endTime = time.time()
msecs = int(endTime - startTime)/60
print("time is %d min" %msecs)
return x
return wrapper
@running_time
def get_transformed_data(fname='bd_train.csv',frac=0.1):
xName = fname.split(".")[0]+'_x.csv'
if os.path.exists(xName):
data = pd.read_csv(xName, index_col='PRD_INST_ID')
else:
data = pd.read_csv(fname, index_col='PRD_INST_ID')
data = data[data.LABEL > -1]
common = [i[:-2] for i in data.columns if i.endswith('_A')]
NAME_A = [i+'_A' for i in common]
NAME_B = [i+'_B' for i in common]
NAME_C = [i+'_C' for i in common]
A = data[NAME_A].rename(columns=lambda x: x[:-2])
B = data[NAME_B].rename(columns=lambda x: x[:-2])
C = data[NAME_C].rename(columns=lambda x: x[:-2])
B_A = B - A
C_B = C - B
data[NAME_A] = B_A.rename(columns=lambda x: x+'_A')
data[NAME_B] = C_B.rename(columns=lambda x: x+'_B')
data.to_csv(xName, index=True, header=True, index_label='PRD_INST_ID')
data.sort_index(inplace=True)
d_train = data.sample(frac=frac)
X = d_train.drop('LABEL', axis=1)
y = d_train['LABEL']
return data, X, y
@running_time
def evaluate_model(model_params):
model = XGBClassifier(**model_params)
data, X_train, y_train = get_transformed_data(frac=1)
model.fit(X_train, y_train, eval_metric=metrics.f1_score)
joblib.dump(model, 'lossWarnBroadbandModel_{}.pkl'.format(format(datetime.now().strftime('%d%H%M'))))
del data
del X_train
del y_train
data, X_test, y_test = get_transformed_data(fname='bd_train2.csv', frac=1)
y_pred = model.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
print('auc:', metrics.roc_auc_score(y_test, y_pred))
train_report = metrics.classification_report(y_test, y_pred)
print(train_report)
feat_imp = pd.Series(model.get_booster().get_fscore()).sort_values(ascending=False)
print(feat_imp)
return model
def print_evaluate(model,X_test,y_test):
y_pred = model.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
print('auc:', metrics.roc_auc_score(y_test, y_pred))
train_report = metrics.classification_report(y_test, y_pred)
print(train_report)
feat_imp = pd.Series(model.get_booster().get_fscore()).sort_values(ascending=False)
print(feat_imp)
if __name__ == '__main__':
params_a = {'learning_rate': 0.01,
'n_estimators': 2000,
'max_depth': 8,
'min_child_weight': 7,
'gamma': 0.1,
'subsample': 0.9,
'colsample_bytree': 0.6,
'scale_pos_weight': 3,
'n_jobs': 42,
'objective': 'binary:logistic',
'reg_alpha': 0.2,
'reg_lambda': 1}
params_b = {'learning_rate': 0.01,
'n_estimators': 2000,
'max_depth': 10,
'min_child_weight': 8,
'gamma': 0.1,
'subsample': 0.9,
'colsample_bytree': 0.65,
'scale_pos_weight': 5,
'n_jobs': 42,
'objective': 'binary:logistic',
'reg_alpha': 0.005,
'reg_lambda': 0.005}
model_a = XGBClassifier(**params_a)
model_b = XGBClassifier(**params_b)
data, X_train, y_train = get_transformed_data(frac=1)
del data
model_a.fit(X_train, y_train)
joblib.dump(model_a, 'AlossWarnBroadbandModel_{}.pkl'.format(format(datetime.now().strftime('%d%H%M'))))
X_train.drop(columns=rm_col, inplace=True)
model_b.fit(X_train, y_train)
joblib.dump(model_b, 'BlossWarnBroadbandModel_{}.pkl'.format(format(datetime.now().strftime('%d%H%M'))))
del X_train
del y_train
data, X_test, y_test = get_transformed_data(fname='broadband_201806.csv', frac=1)
del data
print("*"*10+"A"+"*"*10)
print_evaluate(model_a, X_test, y_test)
print("*" * 10 + "B" + "*" * 10)
print_evaluate(model_b, X_test.drop(columns=rm_col), y_test)
|
StarcoderdataPython
|
1636148
|
from .base import X11BaseRecipe
class LibSMRecipe(X11BaseRecipe):
def __init__(self, *args, **kwargs):
super(LibSMRecipe, self).__init__(*args, **kwargs)
self.sha256 = '0baca8c9f5d934450a70896c4ad38d06' \
'475521255ca63b717a6510fdb6e287bd'
self.name = 'libSM'
self.version = '1.2.2'
self.depends = ['libICE', 'xorgproto', 'xtrans']
|
StarcoderdataPython
|
1662367
|
<reponame>daniilstudent/lab_6
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Условие задачи: Использовать словарь, содержащий следующие ключи: фамилия, имя; номер телефона;
#дата рождения (список из трех чисел). Написать программу, выполняющую следующие
#действия: ввод с клавиатуры данных в список, состоящий из словарей заданной структуры;
#записи должны быть размещены по алфавиту; вывод на экран информации о людях, чьи
#дни рождения приходятся на месяц, значение которого введено с клавиатуры; если таких
#нет, выдать на дисплей соответствующее сообщение.
import sys
if __name__ == '__main__':
# Список
mydicts = []
# Запрос команды с терминала
while True:
command = input(">>> ").lower()
# Выполнене действия в соответствии с командой
if command == 'exit':
break
# Команда при которой надо внести данные
elif command == 'add':
# Запрос данных
name1 = input("Фамилия: ")
name2 = input("Имя: ")
phone = input("Номер телефона: ")
date = list(input("Дата рождения: ").split())
# Создание словаря
mydict = {
'name1': name1,
'name2': name2,
'phone': phone,
'date': date,
}
# Добавление словаря в список
mydicts.append(mydict)
# Отсортировать список по алфавиту
if len(mydicts) > 1:
mydicts.sort(key=lambda item: item.get('name1', ''))
# Команда при которой происходит открытие таблицы
elif command == 'list':
# Задаем форму таблицы
line = '+-{}-+-{}-+-{}-+-{}-+-{}-+'.format(
'-' * 4,
'-' * 30,
'-' * 20,
'-' * 15,
'-' * 20
)
print(line)
# Заголовок таблицы
print(
'| {:^4} | {:^30} | {:^20} | {:^15} | {:^20} |'.format(
"No",
"Фамилия",
"Имя",
"Номер телефона",
"Дата рождения"
)
)
print(line)
# Вывести данные в таблицу
for idx, mydict in enumerate(mydicts, 1):
print(
'| {:>4} | {:<30} | {:<20} | {:>15} | {:>20} |'.format(
idx,
mydict.get('name1'),
mydict.get('name2'),
mydict.get('phone'),
'.'.join(mydict.get('date'))
)
)
print(line)
elif command.startswith('select'):
# Ввод числа месяца. Для проверки на наличие такого месяца в их дате рождения
was = input('Введите число месяца: ' )
count = 0
for mydict in mydicts:
if mydict.get('date')[1] == was:
count += 1
print(
'{:>4}: {}'.format(count, mydict.get('name1', ''))
)
# Если счетчик равен 0, то людей таких людей не найдено.
if count == 0:
print("Не найдено такого месяца в датах рождения.")
elif command == 'help':
# Вывести справку о работе с программой.
print("Список команд:\n")
print("add - данные ;")
print("list - вывести список;")
print("select <месяцы> - запросить месяц на который приходится день рождения;")
print("help - отобразить справку;")
print("exit - завершить работу с программой.")
else:
print(f"Неизвестная команда {command}", file=sys.stderr)
|
StarcoderdataPython
|
3238204
|
<reponame>vidkidz/crossbridge
import overload_extendc
f = overload_extendc.Foo()
if f.test(3) != 1:
raise RuntimeError
if f.test("hello") != 2:
raise RuntimeError
if f.test(3.5,2.5) != 3:
raise RuntimeError
if f.test("hello",20) != 1020:
raise RuntimeError
if f.test("hello",20,100) != 120:
raise RuntimeError
# C default args
if f.test(f) != 30:
raise RuntimeError
if f.test(f,100) != 120:
raise RuntimeError
if f.test(f,100,200) != 300:
raise RuntimeError
|
StarcoderdataPython
|
1618522
|
import datetime
import bson
from fastapi_mongodb.helpers import AsyncTestCase
from fastapi_mongodb.models import BaseCreatedUpdatedModel, BaseDBModel
class TestBaseDBModel(AsyncTestCase):
class TestCreatedUpdatedModel(BaseDBModel, BaseCreatedUpdatedModel):
test: str
class TestModel(BaseDBModel):
test: str
def setUp(self) -> None:
self.model_class = BaseDBModel
def test_from_db_default(self):
data = {"_id": bson.ObjectId()}
result = self.model_class.from_db(data=data)
self.assertEqual(data["_id"], result.id)
def test_from_db_none(self):
data = None
result = self.model_class.from_db(data=data)
self.assertIsNone(result)
def test_from_db_no_id(self):
data = {self.faker.pystr(): self.faker.pystr()}
result = self.model_class.from_db(data=data)
self.assertEqual(self.model_class(id=None), result)
def test_to_db(self):
expected_object_id = bson.ObjectId()
object_id_mock = self.patch_obj(target=bson, attribute="ObjectId", return_value=expected_object_id)
expected_utc_now = datetime.datetime.utcnow()
datetime_mock = self.create_patch(target="fastapi_mongodb.models.datetime")
datetime_mock.datetime.utcnow.return_value = expected_utc_now
model = self.TestCreatedUpdatedModel(test=self.faker.pystr())
result = model.to_db()
object_id_mock.assert_called_once()
self.assertEqual(expected_object_id, result["_id"])
self.assertEqual(
expected_object_id.generation_time.replace(tzinfo=None),
result["created_datetime"],
)
self.assertEqual(expected_utc_now, result["updated_datetime"])
self.assertEqual(model.test, result["test"])
def test_to_db_from_db_flow(self):
expected_object_id = bson.ObjectId()
self.patch_obj(target=bson, attribute="ObjectId", return_value=expected_object_id)
to_db_data = {"test": self.faker.pystr()}
expected_result = {"_id": expected_object_id} | to_db_data
to_db_result = self.TestModel(**to_db_data).to_db()
from_db_result = self.TestModel.from_db(data=to_db_result).to_db()
self.assertEqual(expected_result, from_db_result)
def test_to_db_from_db_flow_by_alias_false(self):
expected_object_id = bson.ObjectId()
to_db_data = {"test": self.faker.pystr(), "id": expected_object_id}
expected_result = {"_id": expected_object_id} | to_db_data
to_db_result = self.TestModel(**to_db_data).to_db(by_alias=False)
from_db_result = self.TestModel.from_db(data=to_db_result).to_db(by_alias=False)
expected_result.pop("id")
self.assertEqual(expected_result, from_db_result)
|
StarcoderdataPython
|
3380928
|
<reponame>RaoniSilvestre/Exercicios-Python<gh_stars>1-10
print('\033[31m=-'*20)
print(' CALCULADOR DE P.A.')
print('=-'*20)
print('')
print('Esse programa vai mostrar os 10 primeiros\nvalores da Progressão Aritimétrica que \nvocê escolher >:D')
print('')
primeiro = int(input('Primeiro termo: '))
razao = int(input('Razão: '))
termo = primeiro
cont = 1
limite = 0
mais = 10
while mais != 0:
limite = mais + limite
while cont <= limite:
print('{} -> '.format(termo),end='')
termo += + razao
cont += 1
mais = int(input('PAUSE\nQuantos você quer mostrar a mais?'))
print('FIM DO PROGRAMA')
|
StarcoderdataPython
|
7166
|
# -*- coding: utf-8 -*-
# flake8: noqa
from flask import Flask
from flask_themes2 import Themes
import config
from util.auth import is_admin
from util.converter import RegexConverter
from util.csrf import generate_csrf_token
app = Flask(__name__.split('.')[0])
app.secret_key = config.SECRET_KEY
app.url_map.converters['regex'] = RegexConverter
app.jinja_env.globals['config'] = config
app.jinja_env.globals['csrf_token'] = generate_csrf_token
app.jinja_env.globals['is_admin'] = is_admin
Themes(app, app_identifier='yelplove')
# if debug property is present, let's use it
try:
app.debug = config.DEBUG
except AttributeError:
app.debug = False
import views
|
StarcoderdataPython
|
103782
|
# LCD via i2c driver for MicroPython (on ESP8266)
# Copyright (c) 2016 Dries007
# License: MIT
#
# Only tested with PCF8574T and a 16*2 LCD
import time
_BIT0 = const(1 << 0)
_BIT1 = const(1 << 1)
_BIT2 = const(1 << 2)
_BIT3 = const(1 << 3)
_BIT4 = const(1 << 4)
_BIT5 = const(1 << 5)
_BIT6 = const(1 << 6)
_BIT7 = const(1 << 7)
class LCD:
def __init__(self, i2c, address=0x3F, check=True, bit_rs=0, bit_rw=1, bit_enable=2, bit_led=3, bit_data=4):
self.i2c = i2c
self.address = address
self.bit_rs = 1 << bit_rs
self.bit_rw = 1 << bit_rw
self.bit_enable = 1 << bit_enable
self.bit_led = 1 << bit_led
self.current_led = 1 << bit_led
self.shift_data = bit_data
if check and address not in i2c.scan():
raise Exception('LCD init failed: No device on address %x' % address)
def led(self, value=None):
old = self.current_led == self.bit_led
if value is not None:
self.current_led = self.bit_led if value else 0
self.i2c.writeto(self.address, bytes((self.current_led,)))
return old
def clear(self):
self.write_byte(0b1, rs=0)
time.sleep(.01)
def home(self):
self.write_byte(0b10, rs=0)
time.sleep(.01)
def pos(self, col, row=0):
self.write_byte(_BIT7 | (col & 0b111111) | ((row & 1) << 6), rs=0)
def custom_char(self, char, data):
self.write_byte(_BIT6 | ((7 & char) << 3), rs=0)
self.write(data, rs=1)
def display_control(self, enabled=True, cursor=True, blink=True):
byte = 0b00001000
if enabled: byte |= _BIT2
if cursor: byte |= _BIT1
if blink: byte |= _BIT0
self.write_byte(byte, rs=0)
def write_nibble(self, data, rs):
self.i2c.writeto(self.address, bytes((
self.current_led | rs | (data << self.shift_data),
self.current_led | rs | self.bit_enable | (data << self.shift_data),
self.current_led | rs | (data << self.shift_data)
)))
time.sleep(.0001)
def write_byte(self, data, rs):
self.write(bytes((data, )), rs=rs)
def write(self, data, rs):
for byte in data:
self.write_nibble((byte >> 4) & 0x0F, rs=rs)
self.write_nibble(byte & 0x0F, rs=rs)
def print(self, text):
self.clear()
lines = text.split(b'\n', 2)
self.write(lines[0], self.bit_rs)
if len(lines) == 2:
self.pos(0, 1)
self.write(lines[1], self.bit_rs)
def init(self):
time.sleep(.005)
self.write_byte(0b00110011, rs=0) # Force display in 8 bit mode
time.sleep(.005)
self.write_nibble(0b0010, rs=0) # Set 4 bit mode
time.sleep(.005)
self.write_byte(0b00101000, rs=0) # Function set: 4 Bit, 2 Lines, Font 5*8
time.sleep(.005)
self.display_control(True, True, True) # Display control: Display on, cursor on, blink on
time.sleep(.005)
self.clear()
|
StarcoderdataPython
|
161278
|
import numpy as np
import cv2
import tensorflow as tf
from scipy import ndimage
import sys
import os
import math
def getBestShift(img):
"""
params - image to get shifts of
returns - finds the best shifts to do on the image and returns the x and y coordinates of shifts
"""
cy,cx = ndimage.measurements.center_of_mass(img)
print cy,cx
rows,cols = img.shape
shiftx = np.round(cols/2.0-cx).astype(int)
shifty = np.round(rows/2.0-cy).astype(int)
return shiftx,shifty
def shift(img,sx,sy):
"""
params - image , xshift and yshift
returns - shifts the image by x and y shift
"""
rows,cols = img.shape
M = np.float32([[1,0,sx],[0,1,sy]])
shifted = cv2.warpAffine(img,M,(cols,rows))
return shifted
def angle_cos(p0, p1, p2):
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_squares(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt) and cv2.contourArea(cnt) < 100000:
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1:
squares.append(cnt)
return squares
def learn_image(rect):
"""
This is the tensorflow mnist neural network placeholders
"""
checkpoint_dir = "cps/"
learnt = False
x = tf.placeholder("float", [None, 784])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b)
y_ = tf.placeholder("float", [None,10])
saver = tf.train.Saver()
sess = tf.Session()
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print 'No checkpoint found'
exit(1)
"""
This is the original colored image and gray pre-processed image.
"""
color_complete = rect
gray_complete = rect
_, gray_complete = cv2.threshold(255-gray_complete, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
"""
This is the digitized_image array filled with -1's
"""
digit_image = -np.ones(gray_complete.shape)
height, width = gray_complete.shape
"""
crop into several images
"""
for cropped_width in range(100, 300, 20):
for cropped_height in range(100, 300, 20):
for shift_x in range(0, width-cropped_width, cropped_width/4):
for shift_y in range(0, height-cropped_height, cropped_height/4):
gray = gray_complete[shift_y:shift_y+cropped_height,shift_x:shift_x + cropped_width]
"""
This checks if the image is almost empty: which means it contains less than 20 non-zero values
"""
if np.count_nonzero(gray) <= 20:
continue
"""
This checks if we are cutting a digit somwhere .i.e. it checks if there is white border or not
"""
if (np.sum(gray[0]) != 0) or (np.sum(gray[:,0]) != 0) or (np.sum(gray[-1]) != 0) or (np.sum(gray[:,-1]) != 0):
continue
"""
Saving the top-left and bottom-right positions of each image to draw the rectangles later
"""
top_left = np.array([shift_y, shift_x])
bottom_right = np.array([shift_y+cropped_height, shift_x + cropped_width])
"""
This removes rows and columns from image which are completely black. This helps to crop the images
which contain the digits.
"""
while np.sum(gray[0]) == 0:
top_left[0] += 1
gray = gray[1:]
while np.sum(gray[:,0]) == 0:
top_left[1] += 1
gray = np.delete(gray,0,1)
while np.sum(gray[-1]) == 0:
bottom_right[0] -= 1
gray = gray[:-1]
while np.sum(gray[:,-1]) == 0:
bottom_right[1] -= 1
gray = np.delete(gray,-1,1)
actual_w_h = bottom_right-top_left
"""
This checks if the rectangle that we have currently selected contain more than 20% of the
actual image then we can say we have already found that digit
"""
rectangle = digit_image[top_left[0]:bottom_right[0],top_left[1]:bottom_right[1]]
if (np.count_nonzero(rectangle+1) >
0.2*actual_w_h[0]*actual_w_h[1]):
continue
"""
Converts the image to 28x28 array to feed into our neural network by applying padding
"""
rows,cols = gray.shape
compl_dif = abs(rows-cols)
half_Sm = compl_dif/2
half_Big = half_Sm if half_Sm*2 == compl_dif else half_Sm+1
if rows > cols:
gray = np.lib.pad(gray,((0,0),(half_Sm,half_Big)),'constant')
else:
gray = np.lib.pad(gray,((half_Sm,half_Big),(0,0)),'constant')
gray = cv2.resize(gray, (20, 20))
gray = np.lib.pad(gray,((4,4),(4,4)),'constant')
"""
This gets the best shifting x and y and shifts the each image in such a way that
the digit is in the center of the image
"""
shiftx,shifty = getBestShift(gray)
shifted = shift(gray,shiftx,shifty)
gray = shifted
"""
This flatten our image array to values between 0 and 1 for the neural network
and makes a prediction.
"""
flatten = gray.flatten() / 255.0
prediction = [tf.reduce_max(y),tf.argmax(y,1)[0]]
pred = sess.run(prediction, feed_dict={x: [flatten]})
print pred
if pred:
learnt = True
"""
This draws a rectangle on each digit and writes the prediction probability and the prediciton
value
"""
cv2.rectangle(color_complete,tuple(top_left[::-1]),tuple(bottom_right[::-1]),color=(0,255,0),
thickness=5)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(color_complete,str(pred[1]),(top_left[1],bottom_right[0]+50),font,fontScale=1.4,
color=(0,255,0),thickness=4)
cv2.putText(color_complete,format(pred[0]*100,".1f")+"%",(top_left[1]+30,bottom_right[0]+60),
font,fontScale=0.8,color=(0,255,0),thickness=2)
"""
Finaly, we save the digitized image( TODO - combine this image with webcam feed by using cv2.overlay methods)
"""
sess.close()
tf.reset_default_graph()
return color_complete,learnt
learnt = False
img = cv2.VideoCapture(0)
while True:
"""
Little bit pre-processing of webcam feed
"""
ret,frame = img.read()
grey = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#cv2.equalizeHist(gray,gray)
"""
detects the different squares in the webcam feed
"""
"""
This is a dirty hack to compute the rectangle to select the square detected and save the subsquent image
TODO - implement it properly (hint - Eucledian Distance(squares array) )
"""
try:
squares = find_squares(grey)
cv2.drawContours(frame, squares, -1, (0,255,0), 3)
cropped_width = math.sqrt( ((squares[0][3][0]-squares[0][0][0])**2) + ((squares[0][3][1]-squares[0][0][1])**2) )
cropped_height = math.sqrt( ((squares[0][1][0]-squares[0][0][0])**2) + ((squares[0][1][1]-squares[0][0][1])**2) )
rect = grey[squares[0][0][1]:squares[0][0][1]+cropped_width,squares[0][0][0]:squares[0][0][0]+cropped_height]
grey[squares[0][0][1]:squares[0][0][1]+cropped_width,squares[0][0][0]:squares[0][0][0]+cropped_height] = rect
cv2.imshow("Pre-feed",frame)
digitized_image = rect
if not learnt:
digitized_image,learnt = learn_image(rect)
#frame[squares[0][0][1]:squares[0][0][1]+cropped_width,squares[0][0][0]:squares[0][0][0]+cropped_height] = digitized_image
except:
e = sys.exc_info()
print e
cv2.imshow("Webcam Feed",digitized_image)
"""
press q to exit
"""
if cv2.waitKey(1) & 0xFF == ord('q'):
break
img.release()
cv2.destroyAllWindows()
|
StarcoderdataPython
|
3213398
|
<filename>keybaseclient/raw_api.py
import base64
import binascii
import hashlib
import hmac
import requests
import scrypt
class InvalidRequestException(Exception):
"""Exception containing information about failed request."""
def __init__(self, message, status=None):
"""Instantiate exception with message and (optional) status object.
Arguments:
message -- error message
status -- keybase.io status object (default None)
"""
super(InvalidRequestException, self).__init__(message)
self.status = status
def _make_request(method, url, params):
"""Send and process an API call to keybase.io.
Arguments:
method -- requests method to use for the call
url -- full URL to call
params -- request parameters to send with the call
Returns:
If successful, full response object
If failed, InvalidRequestException with an error message and potentially
the keybase.io status object
"""
response = method(url, params=params)
if response.status_code != 200:
raise InvalidRequestException(response.text)
response_json = response.json()
if response_json['status']['code'] != 0:
raise InvalidRequestException(response_json['status']['desc'],
response_json['status'])
return response
def get_salt(username):
"""Retrieve salt, token, and session for user with provided username.
Arguments:
username -- username for the desired user
Returns:
If successful, tuple with salt, csrf token and login session
If failed, InvalidRequestException
"""
salt_obj = _make_request(requests.get,
'https://keybase.io/_/api/1.0/getsalt.json',
params={'email_or_username': username}).json()
salt = salt_obj['salt']
csrf_token = salt_obj['csrf_token']
login_session = salt_obj['login_session']
return salt, csrf_token, login_session
def _generate_hmac_pwh(password, salt, login_session):
"""Generate password hash consisting of the password, salt, and session.
Arguments:
password -- password to use as hash key
salt -- hex encoded salt to use as hash key
login_session -- base64 encoded session to hash
Returns:
Hashed login session
"""
pwh = scrypt.hash(password, binascii.unhexlify(salt),
1 << 15, 8, 1, 224)[192:224]
hmac_pwh = hmac.new(pwh, base64.b64decode(login_session),
hashlib.sha512).hexdigest()
return hmac_pwh
def login(username, password):
"""Login user with the given username and password.
Arguments:
username -- username for the user to login
password -- password for the user to login
Returns:
If successful, tuple containing session and user object
If failed, InvalidRequestException
"""
salt, csrf_token, login_session = get_salt(username)
hmac_pwh = _generate_hmac_pwh(password, salt, login_session)
login_obj = _make_request(requests.post,
'https://keybase.io/_/api/1.0/login.json',
params={'email_or_username': username,
'csrf_token': csrf_token,
'hmac_pwh': hmac_pwh,
'login_session': login_session}).json()
return login_obj['session'], login_obj['me']
|
StarcoderdataPython
|
4803943
|
<gh_stars>0
# Copyright 2021 <NAME> <<EMAIL>>. All Rights Reserved.
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representation of the CAD model in Simple Edge configuration.
BRepNet: A topological message passing system for solid models.
https://arxiv.org/pdf/2104.00706.pdf
"""
from typing import List, Tuple
import numpy as np
def simple_edge(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `simple edge` configuration."""
del coedge_to_next
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def assymetric(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `assymetric` configuration."""
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def assymetric_plus(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `assymetric_plus` configuration."""
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def assymetric_plus_plus(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `assymetric++` configuration."""
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
_ne(coedge_to_next, coedge_to_node, coedge_to_edge, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def winged_edge(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `winged edge` configuration."""
coedge_to_prev = np.zeros_like(coedge_to_next)
for (from_ix, to_ix) in enumerate(coedge_to_next):
coedge_to_prev[to_ix] = from_ix
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
_ne(coedge_to_next, coedge_to_node, coedge_to_edge, edges)
_pe(coedge_to_prev, coedge_to_node, coedge_to_edge, edges)
_mne(coedge_to_next, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
_mpe(coedge_to_prev, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
_p(coedge_to_prev, coedge_to_node, edges)
_mn(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mp(coedge_to_prev, coedge_to_node, coedge_to_mate, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def winged_edge_plus(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `winged edge+` configuration."""
coedge_to_prev = np.zeros_like(coedge_to_next)
for (from_ix, to_ix) in enumerate(coedge_to_next):
coedge_to_prev[to_ix] = from_ix
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
_ne(coedge_to_next, coedge_to_node, coedge_to_edge, edges)
_pe(coedge_to_prev, coedge_to_node, coedge_to_edge, edges)
_mne(coedge_to_next, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
_mpe(coedge_to_next, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
_nm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
_p(coedge_to_prev, coedge_to_node, edges)
_pm(coedge_to_prev, coedge_to_node, coedge_to_next, edges)
_mn(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mnm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
_mp(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mpm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def winged_edge_plus_plus(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `winged edge++` configuration."""
coedge_to_prev = np.zeros_like(coedge_to_next)
for (from_ix, to_ix) in enumerate(coedge_to_next):
coedge_to_prev[to_ix] = from_ix
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
_ne(coedge_to_next, coedge_to_node, coedge_to_edge, edges)
_pe(coedge_to_prev, coedge_to_node, coedge_to_edge, edges)
_mne(coedge_to_next, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
_mpe(coedge_to_prev, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
_nmne(coedge_to_node, coedge_to_next, coedge_to_mate, coedge_to_edge,
edges)
_pmpe(coedge_to_node, coedge_to_prev, coedge_to_mate, coedge_to_edge,
edges)
_mpmpe(coedge_to_node, coedge_to_prev, coedge_to_mate, coedge_to_edge,
edges)
_mnmne(coedge_to_node, coedge_to_next, coedge_to_mate, coedge_to_edge,
edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
_nm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
_p(coedge_to_prev, coedge_to_node, edges)
_pm(coedge_to_mate, coedge_to_node, coedge_to_prev, edges)
_mn(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mnm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
_mp(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mpm(coedge_to_mate, coedge_to_node, coedge_to_next, edges)
_nmn(coedge_to_next, coedge_to_mate, coedge_to_node, edges)
_pmp(coedge_to_prev, coedge_to_mate, coedge_to_node, edges)
_mpmp(coedge_to_prev, coedge_to_mate, coedge_to_node, edges)
_mnmn(coedge_to_next, coedge_to_mate, coedge_to_node, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
def _create_graph(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
edges: List[Tuple[int, int]],
):
"""Create the graph."""
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
n_node = faces_num + edges_num + coedges_num
senders = []
receivers = []
for (f, t) in edges:
senders.append(f)
receivers.append(t)
# don't add self-loops more than once
if f != t:
senders.append(t)
receivers.append(f)
assert len(senders) == len(receivers)
n_edge = len(senders)
nodes = np.concatenate(
(np.pad(face_features,
((0, 0),
(0, edge_features.shape[1] + coedge_features.shape[1]))),
np.pad(edge_features,
((0, 0), (face_features.shape[1], coedge_features.shape[1]))),
np.pad(coedge_features,
((0, 0),
(face_features.shape[1] + edge_features.shape[1], 0)))))
return {
"n_node": np.array([n_node], dtype=np.int32),
"n_edge": np.array([n_edge], dtype=np.int32),
"nodes": nodes,
"senders": np.array(senders, dtype=np.int32),
"receivers": np.array(receivers, dtype=np.int32),
}
def _f(
coedge_to_face: np.ndarray,
coedge_to_node: np.ndarray,
face_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""F.
Creates an edge between coedge and corresponding face.
"""
for coedge_ix, face_ix in enumerate(coedge_to_face):
edges.append((coedge_to_node[coedge_ix], face_to_node[face_ix]))
def _mf(
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_face: np.ndarray,
face_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MF.
Creates an edge between coedge and face of the mate of the coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix],
face_to_node[coedge_to_face[coedge_to_ix]]))
def _e(
coedge_to_edge: np.ndarray,
coedge_to_node: np.ndarray,
edge_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""E.
Creates an edge between coedge and corresponding edge.
"""
for coedge_ix, edge_ix in enumerate(coedge_to_edge):
edges.append((coedge_to_node[coedge_ix], edge_to_node[edge_ix]))
def _ne(
coedge_to_next: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""NE.
Creates an edge between coedge and edge of the next coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_next):
edges.append(
(coedge_to_node[coedge_from_ix], coedge_to_edge[coedge_to_ix]))
def _pe(
coedge_to_prev: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""PE.
Creates an edge between coedge and previous edge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_prev):
edges.append(
(coedge_to_node[coedge_from_ix], coedge_to_edge[coedge_to_ix]))
def _mne(
coedge_to_next: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MN.
Creates an edge between coedge and edge of the mate next coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix],
coedge_to_edge[coedge_to_next[coedge_to_ix]]))
def _mpe(
coedge_to_prev: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MP.
Creates an edge between coedge and edge of the mate previous coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix],
coedge_to_edge[coedge_to_prev[coedge_to_ix]]))
def _nmne(
coedge_to_node: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""NMNE.
Creates an edge between coedge and edge of next mate next coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_next):
edges.append(
(coedge_to_node[coedge_from_ix],
coedge_to_edge[coedge_to_next[coedge_to_mate[coedge_to_ix]]]))
def _pmpe(
coedge_to_node: np.ndarray,
coedge_to_prev: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""PMPE.
Creates an edge between coedge and edge of previous mate previous coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_prev):
edges.append(
(coedge_to_node[coedge_from_ix],
coedge_to_edge[coedge_to_prev[coedge_to_mate[coedge_to_ix]]]))
def _mpmpe(
coedge_to_node: np.ndarray,
coedge_to_prev: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""PMPE.
Creates an edge between coedge and edge of previous mate previous coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix], coedge_to_edge[
coedge_to_prev[coedge_to_mate[coedge_to_prev[coedge_to_ix]]]]))
def _mnmne(
coedge_to_node: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_edge: np.ndarray,
edges: List[Tuple[int, int]],
):
"""PMPE.
Creates an edge between coedge and edge of previous mate previous coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix], coedge_to_edge[
coedge_to_next[coedge_to_mate[coedge_to_next[coedge_to_ix]]]]))
def _i(
coedges_num: int,
coedge_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""I.
Creates self-loop for coedge.
"""
for coedge_ix in range(coedges_num):
edges.append((coedge_to_node[coedge_ix], coedge_to_node[coedge_ix]))
def _m(
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""M.
Creates an edge between coedge and corresponding mate coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append(
(coedge_to_node[coedge_from_ix], coedge_to_node[coedge_to_ix]))
def _n(
coedge_to_next: np.ndarray,
coedge_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""N.
Creates an edge between coedge and next coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_next):
edges.append(
(coedge_to_node[coedge_from_ix], coedge_to_node[coedge_to_ix]))
def _p(
coedge_to_prev: np.ndarray,
coedge_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""P.
Creates an edge between coedge and previous coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_prev):
edges.append(
(coedge_to_node[coedge_from_ix], coedge_to_node[coedge_to_ix]))
def _mn(
coedge_to_next: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_mate: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MN.
Creates an edge between coedge and coedge of the mate next coedge.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix],
coedge_to_node[coedge_to_next[coedge_to_ix]]))
def _mp(
coedge_to_prev: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_mate: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MP.
Creates an edge between coedge and coedge of the previous mate.
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix],
coedge_to_node[coedge_to_prev[coedge_to_ix]]))
def _nm(
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_next: np.ndarray,
edges: List[Tuple[int, int]],
):
"""NM.
Creates an edge between coedge and next coedge of the mate
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix],
coedge_to_node[coedge_to_next[coedge_to_ix]]))
def _pm(
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_prev: np.ndarray,
edges: List[Tuple[int, int]],
):
"""PM.
Creates an edge between coedge and previous coedge of the mate
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix],
coedge_to_node[coedge_to_prev[coedge_to_ix]]))
def _mnm(
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_next: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MNM.
Creates an edge between coedge and coedge of mate of next mate
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append(
(coedge_to_node[coedge_from_ix],
coedge_to_node[coedge_to_mate[coedge_to_next[coedge_to_ix]]]))
def _mpm(
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
coedge_to_prev: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MPM.
Creates an edge between coedge and coedge of mate of previous mate
"""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append(
(coedge_to_node[coedge_from_ix],
coedge_to_node[coedge_to_mate[coedge_to_prev[coedge_to_ix]]]))
def _nmn(
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""NMN."""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_next):
edges.append(
(coedge_to_node[coedge_from_ix],
coedge_to_node[coedge_to_next[coedge_to_mate[coedge_to_ix]]]))
def _pmp(
coedge_to_prev: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""PMP."""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_prev):
edges.append(
(coedge_to_node[coedge_from_ix],
coedge_to_node[coedge_to_prev[coedge_to_mate[coedge_to_ix]]]))
def _mpmp(
coedge_to_prev: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MPMP."""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix], coedge_to_node[
coedge_to_prev[coedge_to_mate[coedge_to_prev[coedge_to_ix]]]]))
def _mnmn(
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_node: np.ndarray,
edges: List[Tuple[int, int]],
):
"""MNMN."""
for coedge_from_ix, coedge_to_ix in enumerate(coedge_to_mate):
edges.append((coedge_to_node[coedge_from_ix], coedge_to_node[
coedge_to_next[coedge_to_mate[coedge_to_next[coedge_to_ix]]]]))
|
StarcoderdataPython
|
88893
|
# Copyright (c) 2014, Vienna University of Technology (TU Wien), Department
# of Geodesy and Geoinformation (GEO).
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Vienna University of Technology - Department of
# Geodesy and Geoinformation nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Author: <NAME> <EMAIL>
# Creation date: 2014-08-04
"""
Description of module.
"""
import pandas as pd
import numpy as np
from poets.timedate.dekad import get_dekad_period
def calc_CDI(data, refparam=None, lags=[0, 10]):
"""
Calculates a weighted average over all columns of a pandas DataFrame.
Parameters
----------
data : pandas.DataFrame
Pandas DataFrame containing data to be averaged.
refparam : str, optional
Reference parameter. If not set, parameters will be weighted
equally.
lags : list of int, optional
Time periods to shift parameter against refparam, defaults to [0, 10].
Returns
-------
df : pandas DataFrame
Return the average of data
"""
cols = data.keys()
dat = np.array(data[cols])
dat = np.ma.masked_invalid(dat)
weights = calc_weights(data, refparam, lags)
if refparam is None:
avg = np.ma.average(dat, axis=1)
else:
avg = np.ma.average(dat, axis=1, weights=weights)
df = pd.DataFrame(avg, columns=['CDI'], index=data.index)
return df
def calc_weights(data, refparam, lags=[0, 10], exclude=None):
"""
Calculates the weights of parameters for weighted averaging. Weights
are calculated using correlation and time shift of each parameter
against the reference parameter. Parameters must be direct proportional
to reference parameter!
Parameters
----------
data : pandas.DataFrame
DataFrame containing data in columns.
refparam : str
Reference parameter.
lags : list of int, optional
Time periods to shift parameter against refparam,
defaults to [0, 10].
exclude : string, optional
Variable which should not be used for calculation of the weights.
Returns
-------
sorted_weights : list of int
Weights associated with the parameters in data.
"""
params = data.keys()
maxlag = {}
maxcorr = {}
weights = {}
sorted_weights = []
correlations = calc_correlation(data, refparam, lags, exclude)
for param in params:
if exclude is not None and exclude in param:
continue
maxlag[param] = correlations[param]['lag']
maxcorr[param] = correlations[param]['corr']
for key in maxlag.keys():
weights[key] = (float(maxlag[key])) / sum(maxlag.values()) * 100
for key in maxcorr.keys():
weights[key] = ((weights[key] +
(float(maxcorr[key]) / sum(maxcorr.values())) *
100) / 2)
for param in params:
if exclude is not None and exclude in param:
continue
sorted_weights.append(weights[param])
return sorted_weights
def calc_correlation(data, refparam, lags=[0, 10], exclude=None):
"""
Calculates the correlations between parameters and a reference
parameter given as columns in a DataFrame.
Parameters
----------
data : pandas.DataFrame
DataFrame containing data in columns.
refparam : str
Reference parameter.
lags : list of int, optional
Time periods to shift parameter against refparam,
defaults to [0, 10].
exclude : string, optional
Variable which should not be used for calculation of the correlation.
Returns
-------
correlation : dict
Dictionary containing correlations and max time lags.
"""
correlation = {}
for param in data.keys():
if exclude is not None and exclude in param:
continue
correlation[param] = {'corr': None, 'lag': None}
for i in range(lags[0], lags[1]):
i += abs(lags[0]) + 1
corr = data[param].corr(data[refparam].shift(periods=i),
method='pearson')
if correlation[param]['corr'] is None:
correlation[param]['corr'] = abs(corr)
correlation[param]['lag'] = i
if abs(corr) > abs(correlation[param]['corr']):
correlation[param]['corr'] = abs(corr)
correlation[param]['lag'] = i
if abs(corr) == abs(correlation[param]['corr']):
if abs(i) < abs(correlation[param]['lag']):
correlation[param]['corr'] = abs(corr)
correlation[param]['lag'] = i
return correlation
def calc_DI(data, inverse=False, interest_period=[6, 12, 24], scaled=False,
scale_zero=False, modf_all=False):
"""
Calculates a Drought Index based on an algorithm developed by
<NAME>.
Parameters
----------
data : pandas.DataFrame
Input data as Pandas DataFrame, must come with column names.
inverse : bool
Inverts the input time series; set True if time series is indirect
proportional to the expected output, e.g. Temperature with output
Temperature Drought Index.
interest_period : list of int, optional
interest periods used to calculate drought index,
defaults to [6, 12, 24]
scaled : boolean, optional
If True values will be scaled between 0 and 1.
scale_zero : boolean, optional
If True values will be shifted around zero, defaults to False.
modf_all : boolean, optional
If True values will be modified, independent of their min.
"""
ts_date = data.index
variables = data.keys()
data['period'] = get_dekad_period(ts_date)
for var in variables:
if inverse is True:
data[var] = ((data[var].max() + 1) - data[var])
if modf_all is True:
data['modf'] = data[var] + 1
del data[var]
elif data[var].min() == 0:
data['modf'] = data[var] + 1
del data[var]
else:
data['modf'] = data[var]
del data[var]
data['modf_avg'] = (data.groupby('period').modf
.transform(lambda x: x.mean()))
# Excess
# Dekads below long term average. If the statement is true the
# program return 1
data['exc'] = np.choose((data['modf_avg'] / data['modf']) >= 1,
[0, 1])
# Run length
# Maximum number of successive dekads below long term average
for ip in interest_period:
data['rlen'] = pd.rolling_apply(data['exc'], ip,
(lambda x:
len(max((''.join(str(j)
for j in map(int,
x)))
.split('0')))),
ip)
# get modified run length
max_rlen = data['rlen'].max()
data['rlen'] = (max_rlen + 1) - data['rlen']
# average run lenghts
rlen_avg = (data.groupby('period').modf
.transform(lambda x: x.mean()))
data['form'] = data['rlen'] / rlen_avg
# sumip matrix
# calculates sum of the values for each interest period
data['sumip'] = pd.rolling_apply(data['modf'], ip,
lambda x: np.nansum(x),
round(ip * 0.6))
# average values for each interest period over all years
sumip_avg = (data.groupby('period')['sumip']
.transform(lambda x: x.mean()))
data['nrl'] = data['sumip'] / sumip_avg
# calculating PDI/TDI
data['val'] = data['nrl'] * np.sqrt(data['form'])
# scaled index
dkey = var + '_DI_' + str(ip)
if scaled:
data[dkey] = ((data['val'] - data['val'].min()) /
(data['val'].max() - data['val'].min()))
else:
data[dkey] = data['val']
if scale_zero:
data[dkey] = data[dkey] - data[dkey].mean()
del (data['val'], data['nrl'], data['sumip'], data['rlen'],
data['form'])
# deletes not further relevant columns
del data['modf'], data['modf_avg'], data['exc']
del data['period']
return data
if __name__ == "__main__":
pass
|
StarcoderdataPython
|
1765536
|
# Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _check_for_expected_result(name, schema):
expected_result = None
if "results" in schema:
if name in schema["results"]:
expected_result = schema["results"][name]
return expected_result
def generator_type(*args):
def wrapper(func):
func.types = args
return func
return wrapper
def simple_generator(fn):
"""
Decorator for simple generators that return one value
"""
def wrapped(self, schema):
result = fn(self, schema)
if result is not None:
expected_result = _check_for_expected_result(fn.__name__, schema)
return (fn.__name__, result, expected_result)
return
return wrapped
class BasicGeneratorSet(object):
_instance = None
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"http-method": {
"enum": ["GET", "PUT", "HEAD",
"POST", "PATCH", "DELETE", 'COPY']
},
"admin_client": {"type": "boolean"},
"url": {"type": "string"},
"default_result_code": {"type": "integer"},
"json-schema": {},
"resources": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"name": {"type": "string"},
"expected_result": {"type": "integer"}
}
}
]
}
},
"results": {
"type": "object",
"properties": {}
}
},
"required": ["name", "http-method", "url"],
"additionalProperties": False,
}
def __init__(self):
self.types_dict = {}
for m in dir(self):
if callable(getattr(self, m)) and not'__' in m:
method = getattr(self, m)
if hasattr(method, "types"):
for type in method.types:
if type not in self.types_dict:
self.types_dict[type] = []
self.types_dict[type].append(method)
def validate_schema(self, schema):
if "json-schema" in schema:
jsonschema.Draft4Validator.check_schema(schema['json-schema'])
jsonschema.validate(schema, self.schema)
def generate(self, schema):
"""
Generate an json dictionary based on a schema.
Only one value is mis-generated for each dictionary created.
Any generator must return a list of tuples or a single tuple.
The values of this tuple are:
result[0]: Name of the test
result[1]: json schema for the test
result[2]: expected result of the test (can be None)
"""
LOG.debug("generate_invalid: %s" % schema)
schema_type = schema["type"]
if isinstance(schema_type, list):
if "integer" in schema_type:
schema_type = "integer"
else:
raise Exception("non-integer list types not supported")
result = []
if schema_type not in self.types_dict:
raise Exception("generator (%s) doesn't support type: %s"
% (self.__class__.__name__, schema_type))
for generator in self.types_dict[schema_type]:
ret = generator(schema)
if ret is not None:
if isinstance(ret, list):
result.extend(ret)
elif isinstance(ret, tuple):
result.append(ret)
else:
raise Exception("generator (%s) returns invalid result: %s"
% (generator, ret))
LOG.debug("result: %s" % result)
return result
|
StarcoderdataPython
|
1677402
|
# Test Conan package
# <NAME>, Odant 2019 - 2020
from conans import ConanFile, CMake
class PackageTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
requires = "ninja/1.9.0"
def imports(self):
self.copy("*.pdb", dst="bin", src="bin")
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.so*", dst="bin", src="lib")
def build(self):
cmake = CMake(self, generator="Ninja", msbuild_verbosity='normal')
cmake.verbose = True
cmake.configure()
cmake.build()
self.cmake_is_multi_configuration = cmake.is_multi_configuration
def test(self):
if self.cmake_is_multi_configuration:
self.run("ctest --verbose --build-config %s" % self.settings.build_type)
else:
self.run("ctest --verbose")
|
StarcoderdataPython
|
3327434
|
from .attributes import *
from .image import svg_content
from .primitives import *
|
StarcoderdataPython
|
4805574
|
# -*- coding: utf-8 -*-
# Author:D4Vinci
# Don't touch my code, it's art :D
from __future__ import print_function
import sys, argparse
try: # Instead of using sys to detect python version
input = raw_input
except:
pass
def my_map(fuck,asses):
#Because map behaves differently in python 2 and 3, I decided to write my own fuckin version :3
lols = []
for ass in asses:
lols.append( fuck(ass) ) # LOL
return lols
# As python 3 has no long function anymore, I will do mine
def ip2long(ip, p=3, long_ip=0): # With simple math I could replace the old shitty function with this loop
for part in my_map( int,ip.split(".") ):
long_ip += pow(256,p)*part
p -=1
return [ str(long_ip) ]
def ip2octal(ip):
return [ '.'.join([ format(int(x), '04o') for x in ip.split('.') ]) ]
def ip2hex(ip, length=4):
# With length I will decided which parts to convert
ip_parts = my_map(int,ip.split("."))
return ".".join( my_map( hex,ip_parts[:length] ) + my_map(str,ip_parts[length:]) )
def hex_alike(ip):
result,n = [],6
for x in range(1,5):
result.append( ip2hex(ip,x) )
result.append( "0x"+"0"*8+ip2hex(ip).split(".")[0][2:] )
for p in ip2hex(ip).split(".")[1:]:
result[4] = ".".join([ result[4], "0x"+"0"*n+p[2:] ])
n -= 2
return result + [ip2hex( ip2long(ip)[0] ).replace("L","")]
def ip_urlencoded(ip):
return [ ( "%2E".join([ "%3"+i for i in ip.split(".") ]) ).replace(":","%3A") ]
def ip_as_url(ip):
urls = ["http://howsecureismypassword.net@","http://google.com@accounts@","https://www.facebook.com+settings&tab=privacy@"]
return [ u+ip for u in urls]
def main():
parser = argparse.ArgumentParser(prog='Cuteit.py')
parser.add_argument("ip", help="IP you want to convert")
parser.add_argument("--disable-coloring", action="store_true", help="Disable colored printing")
args = parser.parse_args()
G,B,R,W,M,C,end = '\033[92m','\033[94m','\033[91m','\x1b[37m','\x1b[35m','\x1b[36m','\033[0m'
Bold,underline = "\033[1m","\033[4m"
if args.disable_coloring:
G = B = R = W = M = C = Bold = underline = ''
heart = "<3" # Fuck you windows!
print(end+G+Bold+"Cuteit IP obfuscator made with "+heart+" By Karim 'D4Vinci' Shoair", file=sys.stderr)
ip = args.ip
if ip.count(".")!=3:
print(end+R+Bold+"Sorry, we convert ips only not urls!"+end, file=sys.stderr)
exit(0)
# My own shit regex :D
for shit in ["http://", "https://", "\\", "/"]:
while shit in ip:
ip = ip.replace(shit,"")
ip = ip.split(":")[0]
formats = {"IP to Long":ip2long, "IP to HEX":hex_alike, "IP to Octal":ip2octal, "IP to urlencoded IP":ip_urlencoded}
for form in formats:
print(end+M+Bold+"- Converting "+form+end, file=sys.stderr)
for n,thing in enumerate( formats[form](ip) ):
print(end+G+"\t* Using "+end+R+"http://"+thing+end+G+" form", file=sys.stderr)
for i,shape in enumerate(ip_as_url(thing)):
print(end+W+"\t\t["+str(i)+"] "+end+G+shape, file=sys.stderr)
sys.stdout.flush() # So it prints line by line not hanging
print("", file=sys.stderr)
sys.stdout.flush()
class lib:
def __init__(self, ip):
self.ip = ip
self.hex = ip2hex(self.ip)
self.long = ip2long(self.ip)[0]
self.oct = ip2octal(self.ip)[0]
self.urlencoded = ip_urlencoded(self.ip)[0]
self.hex_parts = hex_alike(self.ip)[:-1]
self.in_urls = lambda ip : ip_as_url(ip)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3383339
|
import numpy as np
from typing import List, Tuple
from mph import GradedMatrix, groebner_bases, presentation_FIrep
def choose_graded_subbasis(matrix: List[List[Tuple[int, int]]], column_grades: List[List[int]], row_grades: List[List[int]]):
dense_matrix = np.zeros(shape=(len(row_grades), len(column_grades)), dtype=np.int32)
for i in range(len(matrix)):
for entry in matrix[i]:
dense_matrix[entry[1], i] = entry[0]
transform = np.random.choice([0, 1], size=(dense_matrix.shape[1], np.random.randint(1, dense_matrix.shape[1]-1) if dense_matrix.shape[1]>2 else 1))
column_grades_ret = []
for i in range(transform.shape[1]):
grade = [0]*len(column_grades[0])
for j in range(transform.shape[0]):
if transform[j, i] != 0:
grade = [max(grade[k], column_grades[j][k]) for k in range(len(grade))]
column_grades_ret.append(grade)
return np.matmul(dense_matrix, transform) % 2, column_grades_ret
def generate_rivet_output(high_matrix, column_grades_h, low_matrix, column_grades_l):
with open("input_file", "w") as f:
out = "firep\nparameter 1\nparameter 2\n" + str(high_matrix.shape[1]) +" " + str(low_matrix.shape[1]) + " " + str(low_matrix.shape[0])+"\n"
for column_index in range(high_matrix.shape[1]):
out += str(column_grades_h[column_index][0]) +" "+str(column_grades_h[column_index][1])+" ; "
for row_index in range(high_matrix.shape[0]):
if high_matrix[row_index, column_index] != 0:
out += str(row_index)+" "
out += "\n"
for column_index in range(low_matrix.shape[1]):
out += str(column_grades_l[column_index][0]) +" "+str(column_grades_l[column_index][1])+" ; "
for row_index in range(low_matrix.shape[0]):
if low_matrix[row_index, column_index] != 0:
out += str(row_index)+" "
out += "\n"
f.write(out)
def rivet_benchmark(m: int, n: int, n_parameters: int=2, density: float=0.5, grade_range:int=1000, log_level:str="silent"):
# Create a random matrix as the low_matrix in the FIrep
low_matrix = np.random.choice([0, 1], size=(m, n), p=[density, 1-density])
# Choose random grades for the columns
column_grades_l = [ np.random.choice(grade_range, size=(n_parameters,)) for _ in range(n) ]
# Compute the kernel of this graded matrix
output = groebner_bases(low_matrix, column_grades_l, log_level=log_level)
if len(output[1].column_grades) == 0:
return 0
# Choose a random subbasis of the kernel as the high_matrix in the FIrep
high_matrix, column_grades_h = choose_graded_subbasis(output[1].matrix, output[1].column_grades, output[1].row_grades)
generate_rivet_output(high_matrix, column_grades_h, low_matrix, column_grades_l)
ret = presentation_FIrep(high_matrix, column_grades_h, low_matrix, column_grades_l, log_level=log_level)
def random_FIrep_presentation(m: int, n: int, n_parameters: int=3, density: float=0.5, grade_range:int=100):
# Create a random matrix as the low_matrix in the FIrep
low_matrix = np.random.choice([0, 1], size=(m, n), p=[density, 1-density])
# Choose random grades for the columns
column_grades_l = [ np.random.choice(grade_range, size=(n_parameters,)) for _ in range(n) ]
# Compute the kernel of this graded matrix
output = groebner_bases(low_matrix, column_grades_l)
if len(output[1].column_grades) == 0:
return 0
# Choose a random subbasis of the kernel as the high_matrix in the FIrep
high_matrix, column_grades_h = choose_graded_subbasis(output[1].matrix, output[1].column_grades, output[1].row_grades)
return presentation_FIrep(high_matrix, column_grades_h, low_matrix, column_grades_l)
def random_map_gbs(m:int, n:int, n_parameters: int=3, density:float=0.5, grade_range:int=100):
random_matrix = np.random.choice([0, 1], size=(m, n), p=[density, 1-density])
# Choose random grades for the columns
column_grades = [ np.random.choice(grade_range, size=(n_parameters,)) for _ in range(n) ]
# Compute Groebner bases for the image and kernel of this graded matrix
image_gb, kernel_gb = groebner_bases(random_matrix, column_grades)
sparse_random_matrix = [[] for _ in range(random_matrix.shape[1])]
non_zero_entries = np.nonzero(random_matrix)
for entry in list(zip(non_zero_entries[1], non_zero_entries[0])):
sparse_random_matrix[entry[0]].append((random_matrix[entry[1], entry[0]], entry[1]))
return GradedMatrix(sparse_random_matrix, column_grades, row_grades = [ [ 0 for _ in range(len(column_grades[0])) ] for _ in range(random_matrix.shape[0])]), image_gb, kernel_gb
|
StarcoderdataPython
|
1665093
|
<gh_stars>0
# Generated by Django 2.1.7 on 2019-04-01 00:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('resource', models.CharField(max_length=255, verbose_name='Resource')),
('action', models.CharField(max_length=255, verbose_name='Action')),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=255, verbose_name='Label')),
('permissions', models.ManyToManyField(to='users.Permission')),
],
),
migrations.AddField(
model_name='profile',
name='roles',
field=models.ManyToManyField(to='users.Role'),
),
]
|
StarcoderdataPython
|
3214920
|
import pandas as pd
import numpy as np
import os
master_data = pd.DataFrame()
for file in os.listdir('../clean/'):
if(file == 'macro' or file == 'all_clean_data.csv'):
print("Not a file to be appended.")
continue
print("Appending",file)
data = pd.read_csv('../clean/{}'.format(file),encoding='utf-8',index_col=False, dtype=str)
data['SOURCE_FILE']=file
master_data = master_data.append(data, ignore_index=True)
print("Master data now has",len(master_data),"rows")
print("Writing master to csv (this may take a while)")
master_data.to_csv('../clean/all_clean_data.csv',encoding='utf-8', index=False)
|
StarcoderdataPython
|
1717233
|
import os
import glob
import time
import sys
import datetime
from influxdb import InfluxDBClient
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
host = ""
port = 8086
user = ""
password = ""
dbname = ""
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
client = InfluxDBClient(host, port, user, password, dbname)
measurement = "pool_temperature" # the name of the measurement you'd like to use
location = "poolside"
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
iso = time.ctime()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
data = [
{
"measurement": measurement,
"tags": {
"location": location,
},
"fields": {
"temperature" : temp_c
}
}
]
client.write_points(data)
return temp_c
read_temp()
|
StarcoderdataPython
|
68806
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 IBM Corp
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/collector/dispatcher/database.py
"""
from oslo.config import cfg
from datetime import datetime
from ceilometer.collector.dispatcher import database
from ceilometer.publisher import rpc
from ceilometer.tests import base as tests_base
from ceilometer.storage import base
class TestDispatcherDB(tests_base.TestCase):
def setUp(self):
super(TestDispatcherDB, self).setUp()
self.dispatcher = database.DatabaseDispatcher(cfg.CONF)
self.ctx = None
def test_valid_message(self):
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = rpc.compute_signature(
msg,
cfg.CONF.publisher_rpc.metering_secret,
)
self.dispatcher.storage_conn = self.mox.CreateMock(base.Connection)
self.dispatcher.storage_conn.record_metering_data(msg)
self.mox.ReplayAll()
self.dispatcher.record_metering_data(self.ctx, msg)
self.mox.VerifyAll()
def test_invalid_message(self):
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
}
msg['message_signature'] = 'invalid-signature'
class ErrorConnection:
called = False
def record_metering_data(self, data):
self.called = True
self.dispatcher.storage_conn = ErrorConnection()
self.dispatcher.record_metering_data(self.ctx, msg)
assert not self.dispatcher.storage_conn.called, \
'Should not have called the storage connection'
def test_timestamp_conversion(self):
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
'timestamp': '2012-07-02T13:53:40Z',
}
msg['message_signature'] = rpc.compute_signature(
msg,
cfg.CONF.publisher_rpc.metering_secret,
)
expected = {}
expected.update(msg)
expected['timestamp'] = datetime(2012, 7, 2, 13, 53, 40)
self.dispatcher.storage_conn = self.mox.CreateMock(base.Connection)
self.dispatcher.storage_conn.record_metering_data(expected)
self.mox.ReplayAll()
self.dispatcher.record_metering_data(self.ctx, msg)
def test_timestamp_tzinfo_conversion(self):
msg = {'counter_name': 'test',
'resource_id': self.id(),
'counter_volume': 1,
'timestamp': '2012-09-30T15:31:50.262-08:00',
}
msg['message_signature'] = rpc.compute_signature(
msg,
cfg.CONF.publisher_rpc.metering_secret,
)
expected = {}
expected.update(msg)
expected['timestamp'] = datetime(2012, 9, 30, 23, 31, 50, 262000)
self.dispatcher.storage_conn = self.mox.CreateMock(base.Connection)
self.dispatcher.storage_conn.record_metering_data(expected)
self.mox.ReplayAll()
self.dispatcher.record_metering_data(self.ctx, msg)
|
StarcoderdataPython
|
1678794
|
from subprocess import check_call
import os
import shutil as sh
from glob import glob
import nbformat as nbf
from nbclean import NotebookCleaner
from tqdm import tqdm
import numpy as np
SITE_ROOT = os.path.expanduser('~/github/forks/python/teaching/dsep/jupyterhub-for-education-template')
SITE_NAVIGATION = os.path.join(SITE_ROOT, '_data', 'navigation.yml')
TEMPLATE_PATH = os.path.expanduser('~/github/forks/python/teaching/dsep/jupyterhub-for-education-template/assets/templates/jekyllmd.tpl')
TEXTBOOK_FOLDER_NAME = 'textbook'
NOTEBOOKS_FOLDER_NAME = 'notebooks'
TEXTBOOK_FOLDER = os.path.join(SITE_ROOT, TEXTBOOK_FOLDER_NAME)
NOTEBOOKS_FOLDER = os.path.join(SITE_ROOT, NOTEBOOKS_FOLDER_NAME)
IMAGES_FOLDER = os.path.join(SITE_ROOT, 'images')
MARKDOWN_FILE = os.path.join(SITE_ROOT, 'SUMMARY.md')
def _markdown_to_files(path_markdown, indent=2):
"""Takes a markdown file containing chapters/sub-headings and
converts it to a file structure we can use to build a side bar."""
with open(path_markdown, 'r') as ff:
lines = ff.readlines()
files = []
for line in lines:
if line.strip().startswith('* '):
title = _between_symbols(line, '[', ']')
link = _between_symbols(line, '(', ')')
spaces = len(line) - len(line.lstrip(' '))
level = spaces / indent
files.append((title, link, level))
return files
def _between_symbols(string, c1, c2):
"""Will return empty string if nothing is between c1 and c2."""
for char in [c1, c2]:
if char not in string:
raise ValueError("Couldn't find charachter {} in string {}".format(
char, string))
return string[string.index(c1)+1:string.index(c2)]
def _clean_notebook(notebook):
cleaner = NotebookCleaner(notebook)
cleaner.remove_cells(empty=True)
cleaner.remove_cells(search_text="# HIDDEN")
cleaner.clear('stderr')
cleaner.save(notebook)
return notebook
if __name__ == '__main__':
# --- Collect the files we'll convert over ---
files = _markdown_to_files(MARKDOWN_FILE)
for ix_file, (title, link, level) in tqdm(list(enumerate(files))):
if len(link) == 0:
continue
if not os.path.exists(link):
raise ValueError("Could not find file {}".format(link))
# Collecting and renaming files/folders
filename = os.path.basename(link)
new_folder = os.path.dirname(link).replace(NOTEBOOKS_FOLDER_NAME, TEXTBOOK_FOLDER_NAME)
new_file_path = os.path.join(new_folder, filename.replace('.ipynb', '.md'))
# Collect previous/next md file for pagination
if ix_file == 0:
prev_file_link = ''
prev_file_title = ''
else:
prev_file_title, prev_file_link, _ = files[ix_file-1]
prev_file_link = prev_file_link.replace(NOTEBOOKS_FOLDER_NAME, TEXTBOOK_FOLDER_NAME).replace('.ipynb', '')
if ix_file == len(files) - 1:
next_file_link = ''
next_file_title = ''
else:
next_file_title, next_file_link, _ = files[ix_file+1]
next_file_link = next_file_link.replace(NOTEBOOKS_FOLDER_NAME, TEXTBOOK_FOLDER_NAME).replace('.ipynb', '')
if not os.path.isdir(new_folder):
os.makedirs(new_folder)
# Create a temporary version of the notebook we can modify
tmp_notebook = link + '_TMP'
sh.copy2(link, tmp_notebook)
# Clean up the file before converting
_clean_notebook(tmp_notebook)
# Run nbconvert moving it to the output folder
build_call = '--FilesWriter.build_directory={}'.format(new_folder)
images_call = '--NbConvertApp.output_files_dir={}'.format(
os.path.join(IMAGES_FOLDER, new_folder))
check_call(['jupyter', 'nbconvert', '--log-level="CRITICAL"',
'--to', 'markdown', '--template', TEMPLATE_PATH,
images_call, build_call, tmp_notebook])
# Images: replace relative image paths to baseurl paths
IMG_STRINGS = [ii*'../' + IMAGES_FOLDER for ii in range(4)]
with open(new_file_path, 'r') as ff:
lines = ff.readlines()
for ii, line in enumerate(lines):
for IMG_STRING in IMG_STRINGS:
line = line.replace(IMG_STRING, '{{ site.baseurl }}/images')
lines[ii] = line
# Front-matter YAML
yaml = []
yaml += ['---']
yaml += ['layout: textbook']
yaml += ['interact_link: {}'.format(link.lstrip('./'))]
yaml += ['previous:']
yaml += [' url: {}'.format(prev_file_link.lstrip('.'))]
yaml += [' title: {}'.format(prev_file_title)]
yaml += ['next:']
yaml += [' url: {}'.format(next_file_link.lstrip('.'))]
yaml += [' title: {}'.format(next_file_title)]
yaml += ['sidebar:']
yaml += [' nav: sidebar-textbook']
yaml += ['---']
yaml = [ii + '\n' for ii in yaml]
lines = yaml + lines
# Add an extra slash to the inline math before `#` since Jekyll strips it
inline_replace_chars = ['#']
for ii, line in enumerate(lines):
dollars = np.where(['$' == char for char in line])[0]
# Make sure we have at least two dollar signs and they
# Aren't right next to each other
if len(dollars) > 2 and all(ii > 1 for ii in (dollars[1:] - dollars[:1])):
for char in inline_replace_chars:
lines[ii] = line.replace('\\#', '\\\\#')
# Write the result
with open(new_file_path, 'w') as ff:
ff.writelines(lines)
os.remove(tmp_notebook)
# Generate sidebar
sidebar_text = ['sidebar-textbook:']
sp = ' '
chapter_ix = 1
for ix_file, (title, link, level) in tqdm(list(enumerate(files))):
if level > 0 and len(link) == 0:
continue
if level == 0:
title = '{}. {}'.format(chapter_ix, title)
chapter_ix += 1
new_link = link.replace(NOTEBOOKS_FOLDER_NAME, TEXTBOOK_FOLDER_NAME).replace('.ipynb', '').strip('.')
space = ' ' if level == 0 else ' '
level = int(level)
sidebar_text.append(space + '- title: {}'.format(title))
sidebar_text.append(space + ' class: level_{}'.format(level))
if len(link) > 0:
sidebar_text.append(space + ' url: {}'.format(new_link))
if ix_file != (len(files) - 1) and level < files[ix_file + 1][-1]:
sidebar_text.append(space + ' children:')
sidebar_text = [ii + '\n' for ii in sidebar_text]
with open(SITE_NAVIGATION, 'r') as ff:
lines = ff.readlines()
text_start = np.where(['# --- Textbook sidebar ---' in line for line in lines])[0][0]
lines = lines[:text_start+1]
lines += sidebar_text
with open(SITE_NAVIGATION, 'w') as ff:
ff.writelines(lines)
print('Done!')
|
StarcoderdataPython
|
1727303
|
from django import forms
from categories.models import Category
from django.conf import settings
class CategoryForm(forms.ModelForm):
cat_type = forms.ChoiceField(choices=settings.TR_TYPES, required=False, label='Category type')
name = forms.CharField(required=False, label='Name')
class Meta:
model = Category
exclude = ('owner', )
|
StarcoderdataPython
|
3378829
|
<reponame>maximilianschaller/genforce
# python3.7
"""Defines loss functions."""
import os
import torch
import numpy as np
import torch.nn.functional as F
import sys
sys.path.append(os.getcwd())
from fourier import fourier_dissimilarity
__all__ = ['FourierRegularizedLogisticGANLoss']
apply_loss_scaling = lambda x: x * torch.exp(x * np.log(2.0))
undo_loss_scaling = lambda x: x * torch.exp(-x * np.log(2.0))
class LogisticGANLoss(object):
"""Contains the class to compute logistic GAN loss."""
def __init__(self, runner, d_loss_kwargs=None, g_loss_kwargs=None):
"""Initializes with models and arguments for computing losses."""
self.d_loss_kwargs = d_loss_kwargs or dict()
self.g_loss_kwargs = g_loss_kwargs or dict()
self.r1_gamma = self.d_loss_kwargs.get('r1_gamma', 10.0)
self.r2_gamma = self.d_loss_kwargs.get('r2_gamma', 0.0)
runner.running_stats.add(
f'g_loss', log_format='.3f', log_strategy='AVERAGE')
runner.running_stats.add(
f'd_loss', log_format='.3f', log_strategy='AVERAGE')
if self.r1_gamma != 0:
runner.running_stats.add(
f'real_grad_penalty', log_format='.3f', log_strategy='AVERAGE')
if self.r2_gamma != 0:
runner.running_stats.add(
f'fake_grad_penalty', log_format='.3f', log_strategy='AVERAGE')
@staticmethod
def preprocess_image(images, lod=0, **_unused_kwargs):
"""Pre-process images."""
if lod != int(lod):
downsampled_images = F.avg_pool2d(
images, kernel_size=2, stride=2, padding=0)
upsampled_images = F.interpolate(
downsampled_images, scale_factor=2, mode='nearest')
alpha = lod - int(lod)
images = images * (1 - alpha) + upsampled_images * alpha
if int(lod) == 0:
return images
return F.interpolate(
images, scale_factor=(2 ** int(lod)), mode='nearest')
@staticmethod
def compute_grad_penalty(images, scores):
"""Computes gradient penalty."""
image_grad = torch.autograd.grad(
outputs=scores.sum(),
inputs=images,
create_graph=True,
retain_graph=True)[0].view(images.shape[0], -1)
penalty = image_grad.pow(2).sum(dim=1).mean()
return penalty
def d_loss(self, runner, data):
"""Computes loss for discriminator."""
G = runner.models['generator']
D = runner.models['discriminator']
reals = self.preprocess_image(data['image'], lod=runner.lod)
reals.requires_grad = True
labels = data.get('label', None)
latents = torch.randn(reals.shape[0], runner.z_space_dim).cuda()
latents.requires_grad = True
# TODO: Use random labels.
fakes = G(latents, label=labels, **runner.G_kwargs_train)['image']
real_scores = D(reals, label=labels, **runner.D_kwargs_train)
fake_scores = D(fakes, label=labels, **runner.D_kwargs_train)
d_loss = F.softplus(fake_scores).mean()
d_loss += F.softplus(-real_scores).mean()
runner.running_stats.update({'d_loss': d_loss.item()})
real_grad_penalty = torch.zeros_like(d_loss)
fake_grad_penalty = torch.zeros_like(d_loss)
if self.r1_gamma:
real_grad_penalty = self.compute_grad_penalty(reals, real_scores)
runner.running_stats.update(
{'real_grad_penalty': real_grad_penalty.item()})
if self.r2_gamma:
fake_grad_penalty = self.compute_grad_penalty(fakes, fake_scores)
runner.running_stats.update(
{'fake_grad_penalty': fake_grad_penalty.item()})
return (d_loss +
real_grad_penalty * (self.r1_gamma * 0.5) +
fake_grad_penalty * (self.r2_gamma * 0.5))
def g_loss(self, runner, data): # pylint: disable=no-self-use
"""Computes loss for generator."""
# TODO: Use random labels.
G = runner.models['generator']
D = runner.models['discriminator']
batch_size = data['image'].shape[0]
labels = data.get('label', None)
latents = torch.randn(batch_size, runner.z_space_dim).cuda()
fakes = G(latents, label=labels, **runner.G_kwargs_train)['image']
fake_scores = D(fakes, label=labels, **runner.D_kwargs_train)
g_loss = F.softplus(-fake_scores).mean()
runner.running_stats.update({'g_loss': g_loss.item()})
return g_loss
class FourierRegularizedLogisticGANLoss(LogisticGANLoss):
def __init__(self, runner, d_loss_kwargs=None, g_loss_kwargs=None):
super(FourierRegularizedLogisticGANLoss, self).__init__(runner, d_loss_kwargs, g_loss_kwargs)
self.adv = self.g_loss_kwargs['adv']
self.lamb = self.g_loss_kwargs['lamb']
self.metric = self.g_loss_kwargs['metric']
self.threshold = self.g_loss_kwargs['threshold']
def g_loss(self, runner, data):
"""Computes loss for generator."""
# TODO: Use random labels.
G = runner.models['generator']
D = runner.models['discriminator']
labels = data.get('label', None)
data['image'] = data['image'] / 255. * 2. - 1.
#latents = runner.inverter.invert(data['image'])
latents = data['latent']
G.net.train()
fakes = G.net.module.synthesis(latents)
fake_scores = D(fakes, label=labels, **runner.D_kwargs_train)
g_loss = F.softplus(-fake_scores).mean()
runner.running_stats.update({'g_loss': g_loss.item()})
fourier_loss = fourier_dissimilarity(fakes, data['image'], self.metric, self.threshold)
fourier_loss = torch.mean(fourier_loss)
runner.running_stats.update({'fourier_loss': fourier_loss.item()})
total_loss = (self.adv * g_loss) + (self.lamb * fourier_loss)
runner.running_stats.update({'total_loss': total_loss.item()})
return total_loss
|
StarcoderdataPython
|
1629073
|
# Generated by Django 3.2.6 on 2021-09-05 20:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('product', '0003_auto_20210905_1139'),
]
operations = [
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('product_name', models.CharField(max_length=100)),
('product_price', models.DecimalField(decimal_places=2, default=1, max_digits=12)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.product')),
],
),
]
|
StarcoderdataPython
|
3372965
|
import unittest
from app.utilities.factory import Factory
class TestClassA(object):
pass
class TestClassB(object):
pass
class TestFactory(unittest.TestCase):
def test_register(self):
factory = Factory()
factory.register("test", TestClassA)
self.assertIsInstance(factory.create("test"), TestClassA)
def test_register_all(self):
factory = Factory()
classes = {
"test-a": TestClassA,
"test-b": TestClassB
}
factory.register_all(classes)
self.assertIsInstance(factory.create("test-a"), TestClassA)
self.assertIsInstance(factory.create("test-b"), TestClassB)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3225550
|
<gh_stars>10-100
##########################################################
## Define variables
##########################################################
num_of_stages_inv = 383
num_of_stages_nand4 = 127
num_of_stages_NOR3 = 127
##########################################################
## hvt NOR3
##########################################################
# Create a new file
f = open("RO_nor4_hvt.v","w")
# write timescale and RO top module statement
f.write("/////////////////// Please synthesize this RO with hvt library /////////////////// \n/////////////////// This RO contains 127 stages /////////////////// \n\n")
f.write("`timescale 1ps / 100fs\n\nmodule RO_nor4_hvt(\ninput wire i_Enable,\ninput wire i_Sel,\noutput wire o_RO_out\n);\n\n")
# declare input and output wires, set dont_touch for synthesis
# f.write("wire i_Enable;\n(* dont_touch = 'true' *) wire i_Sel;\n(* dont_touch = 'true' *) wire o_RO_out;\n\n")
# declare connection wires between logic cells
f.write("/////////////////// Define wires between nand gates ///////////////////\n")
f.write("wire ["+str(num_of_stages_NOR3-2)+":0] w_nr_Conn;\n\n")
# Assign the first control logic
f.write("NAND4X2MA10TH U0(\n.A(i_Enable),\n.B(i_Sel),\n.C(w_nr_Conn[" + str(num_of_stages_NOR3-2) + "]),\n.D(w_nr_Conn[" + str(num_of_stages_NOR3-2) + "]),\n.Y(o_RO_out)\n);\n\n")
# insert the 2nd stage
f.write("NOR3_hvt U1(\n")
f.write(".i_nr_1(o_RO_out),\n.i_nr_2(o_RO_out),\n.i_nr_3(o_RO_out),\n.out(w_nr_Conn[0])\n")
f.write(");\n\n")
# insert modules
for x in range(num_of_stages_NOR3-2):
f.write("NOR3_hvt U" + str(x+2) + "(\n")
f.write(".i_nr_1(w_nr_Conn[" + str(x) + "]),\n.i_nr_2(w_nr_Conn[" + str(x) + "]),\n.i_nr_3(w_nr_Conn[" + str(x) + "]),\n.out(w_nr_Conn[" + str(x+1) + "])\n")
f.write(");\n\n")
# endmodule
f.write("endmodule\n\n")
# NOR3 cell statement
f.write("module NOR3_hvt(\ninput wire i_nr_1,\ninput wire i_nr_2,\ninput wire i_nr_3,\noutput wire out);\n\n")
f.write("NOR3X0P5MA10TH U0(\n")
f.write(".A(i_nr_1),\n.B(i_nr_2),\n.C(i_nr_3),\n.Y(out)\n")
f.write(");\n\n")
f.write("endmodule \n\n")
# close file
f.close()
##########################################################
## LVT NOR3
##########################################################
# Create a new file
f = open("RO_nor4_lvt.v","w")
# write timescale and RO top module statement
f.write("/////////////////// Please synthesize this RO with lvt library /////////////////// \n/////////////////// This RO contains 127 stages /////////////////// \n\n")
f.write("`timescale 1ps / 100fs\n\nmodule RO_nor4_lvt(\ninput wire i_Enable,\ninput wire i_Sel,\noutput wire o_RO_out\n);\n\n")
# declare input and output wires, set dont_touch for synthesis
# f.write("wire i_Enable;\n(* dont_touch = 'true' *) wire i_Sel;\n(* dont_touch = 'true' *) wire o_RO_out;\n\n")
# declare connection wires between logic cells
f.write("/////////////////// Define wires between nand gates ///////////////////\n")
f.write("wire ["+str(num_of_stages_NOR3-2)+":0] w_nr_Conn;\n\n")
# Assign the first control logic
f.write("NAND4X2MA10TL U0(\n.A(i_Enable),\n.B(i_Sel),\n.C(w_nr_Conn[" + str(num_of_stages_NOR3-2) + "]),\n.D(w_nr_Conn[" + str(num_of_stages_NOR3-2) + "]),\n.Y(o_RO_out)\n);\n\n")
# insert the 2nd stage
f.write("NOR3_lvt U1(\n")
f.write(".i_nr_1(o_RO_out),\n.i_nr_2(o_RO_out),\n.i_nr_3(o_RO_out),\n.out(w_nr_Conn[0])\n")
f.write(");\n\n")
# insert modules
for x in range(num_of_stages_NOR3-2):
f.write("NOR3_lvt U" + str(x+2) + "(\n")
f.write(".i_nr_1(w_nr_Conn[" + str(x) + "]),\n.i_nr_2(w_nr_Conn[" + str(x) + "]),\n.i_nr_3(w_nr_Conn[" + str(x) + "]),\n.out(w_nr_Conn[" + str(x+1) + "])\n")
f.write(");\n\n")
# endmodule
f.write("endmodule\n\n")
# NOR3 cell statement
f.write("module NOR3_lvt(\ninput wire i_nr_1,\ninput wire i_nr_2,\ninput wire i_nr_3,\noutput wire out);\n\n")
f.write("NOR3X0P5MA10TL U0(\n")
f.write(".A(i_nr_1),\n.B(i_nr_2),\n.C(i_nr_3),\n.Y(out)\n")
f.write(");\n\n")
f.write("endmodule \n\n")
# close file
f.close()
##########################################################
## RVT NOR3
##########################################################
# Create a new file
f = open("RO_nor4_rvt.v","w")
# write timescale and RO top module statement
f.write("/////////////////// Please synthesize this RO with rvt library /////////////////// \n/////////////////// This RO contains 127 stages /////////////////// \n\n")
f.write("`timescale 1ps / 100fs\n\nmodule RO_nor4_rvt(\ninput wire i_Enable,\ninput wire i_Sel,\noutput wire o_RO_out\n);\n\n")
# declare input and output wires, set dont_touch for synthesis
# f.write("wire i_Enable;\n(* dont_touch = 'true' *) wire i_Sel;\n(* dont_touch = 'true' *) wire o_RO_out;\n\n")
# declare connection wires between logic cells
f.write("/////////////////// Define wires between nand gates ///////////////////\n")
f.write("wire ["+str(num_of_stages_NOR3-2)+":0] w_nr_Conn;\n\n")
# Assign the first control logic
f.write("NAND4X2MA10TR U0(\n.A(i_Enable),\n.B(i_Sel),\n.C(w_nr_Conn[" + str(num_of_stages_NOR3-2) + "]),\n.D(w_nr_Conn[" + str(num_of_stages_NOR3-2) + "]),\n.Y(o_RO_out)\n);\n\n")
# insert the 2nd stage
f.write("NOR3_rvt U1(\n")
f.write(".i_nr_1(o_RO_out),\n.i_nr_2(o_RO_out),\n.i_nr_3(o_RO_out),\n.out(w_nr_Conn[0])\n")
f.write(");\n\n")
# insert modules
for x in range(num_of_stages_NOR3-2):
f.write("NOR3_rvt U" + str(x+2) + "(\n")
f.write(".i_nr_1(w_nr_Conn[" + str(x) + "]),\n.i_nr_2(w_nr_Conn[" + str(x) + "]),\n.i_nr_3(w_nr_Conn[" + str(x) + "]),\n.out(w_nr_Conn[" + str(x+1) + "])\n")
f.write(");\n\n")
# endmodule
f.write("endmodule\n\n")
# NOR3 cell statement
f.write("module NOR3_rvt(\ninput wire i_nr_1,\ninput wire i_nr_2,\ninput wire i_nr_3,\noutput wire out);\n\n")
f.write("NOR3X0P5MA10TR U0(\n")
f.write(".A(i_nr_1),\n.B(i_nr_2),\n.C(i_nr_3),\n.Y(out)\n")
f.write(");\n\n")
f.write("endmodule \n\n")
# close file
f.close()
##########################################################
## SLVT INV
##########################################################
# Create a new file
f = open("RO_inv_slvt.v","w")
# write timescale and RO top module statement
f.write("/////////////////// Please synthesize this RO with lvt library /////////////////// \n/////////////////// This RO contains 383 stages /////////////////// \n\n")
f.write("`timescale 1ps / 100fs\n\nmodule RO_inv_slvt(\ninput wire i_Enable,\ninput wire i_Sel,\noutput wire o_RO_out\n);\n\n")
# declare input and output wires, set dont_touch for synthesis
# f.write("wire i_Enable;\n(* dont_touch = 'true' *) wire i_Sel;\n(* dont_touch = 'true' *) wire o_RO_out;\n\n")
# declare connection wires between logic cells
f.write("/////////////////// Define wires between inverters ///////////////////\n")
f.write("wire ["+str(num_of_stages_inv-1)+":0] w_inv_Conn;\n\n")
# Assign the first control logic
#f.write("assign w_inv_Conn[0] = ~( i_Enable & i_Sel & o_RO_out );\n\n")
f.write("NAND3_X2R_A9PP84TSL_C14 U0(\n.A(i_Enable),\n.B(i_Sel),\n.C(o_RO_out),\n.Y(w_inv_Conn[0])\n);\n\n")
# insert modules
for x in range(num_of_stages_inv-2):
f.write("inv_slvt U" + str(x+1) + "(\n")
f.write(".in(w_inv_Conn[" + str(x) + "]),\n.out(w_inv_Conn[" + str(x+1) + "])\n")
f.write(");\n\n")
# insert the last module
x = num_of_stages_inv-1
f.write("inv_slvt U" + str(x) + "(\n")
f.write(".in(w_inv_Conn[" + str(x-1) + "]),\n.out(o_RO_out)\n")
f.write(");\n\n")
# endmodule
f.write("endmodule\n\n")
# inverter cell statement
f.write("module inv_slvt(\ninput wire in,\noutput wire out\n); \n\n")
# f.write("wire in;\n(* dont_touch = 'true' *) wire out; \n\n")
f.write("INVP_X0P5N_A9PP84TSL_C14 U0(\n")
f.write(".A(in),\n.Y(out)\n")
f.write(");\n\n")
f.write("endmodule \n\n")
# close file
f.close()
##########################################################
## LVT INV
##########################################################
# Create a new file
f = open("RO_inv_lvt.v","w")
# write timescale and RO top module statement
f.write("/////////////////// Please synthesize this RO with slvt library /////////////////// \n/////////////////// This RO contains 383 stages /////////////////// \n\n")
f.write("`timescale 1ps / 100fs\n\nmodule RO_inv_lvt(\ninput wire i_Enable,\ninput wire i_Sel,\noutput wire o_RO_out\n);\n\n")
# declare input and output wires, set dont_touch for synthesis
# f.write("wire i_Enable;\n(* dont_touch = 'true' *) wire i_Sel;\n(* dont_touch = 'true' *) wire o_RO_out;\n\n")
# declare connection wires between logic cells
f.write("/////////////////// Define wires between inverters ///////////////////\n")
f.write("wire ["+str(num_of_stages_inv-1)+":0] w_inv_Conn;\n\n")
# Assign the first control logic
#f.write("assign w_inv_Conn[0] = ~( i_Enable & i_Sel & o_RO_out );\n\n")
f.write("NAND3_X2R_A9PP84TL_C14 U0(\n.A(i_Enable),\n.B(i_Sel),\n.C(o_RO_out),\n.Y(w_inv_Conn[0])\n);\n\n")
# insert modules
for x in range(num_of_stages_inv-2):
f.write("inv_lvt U" + str(x+1) + "(\n")
f.write(".in(w_inv_Conn[" + str(x) + "]),\n.out(w_inv_Conn[" + str(x+1) + "])\n")
f.write(");\n\n")
# insert the last module
x = num_of_stages_inv-1
f.write("inv_lvt U" + str(x) + "(\n")
f.write(".in(w_inv_Conn[" + str(x-1) + "]),\n.out(o_RO_out)\n")
f.write(");\n\n")
# endmodule
f.write("endmodule\n\n")
# inverter cell statement
f.write("module inv_lvt(\ninput wire in,\noutput wire out\n); \n\n")
# f.write("wire in;\n(* dont_touch = 'true' *) wire out; \n\n")
f.write("INVP_X0P5N_A9PP84TL_C14 U0(\n")
f.write(".A(in),\n.Y(out)\n")
f.write(");\n\n")
f.write("endmodule \n\n")
# close file
f.close()
##########################################################
## RVT INV
##########################################################
# Create a new file
f = open("RO_inv_rvt.v","w")
# write timescale and RO top module statement
f.write("/////////////////// Please synthesize this RO with rvt library /////////////////// \n/////////////////// This RO contains 383 stages /////////////////// \n\n")
f.write("`timescale 1ps / 100fs\n\nmodule RO_inv_rvt(\ninput wire i_Enable,\ninput wire i_Sel,\noutput wire o_RO_out\n);\n\n")
# declare input and output wires, set dont_touch for synthesis
# f.write("wire i_Enable;\n(* dont_touch = 'true' *) wire i_Sel;\n(* dont_touch = 'true' *) wire o_RO_out;\n\n")
# declare connection wires between logic cells
f.write("/////////////////// Define wires between inverters ///////////////////\n")
f.write("wire ["+str(num_of_stages_inv-1)+":0] w_inv_Conn;\n\n")
#f.write("assign w_inv_Conn[0] = ~( i_Enable & i_Sel & o_RO_out );\n\n")
f.write("NAND3_X2R_A9PP84TR_C14 U0(\n.A(i_Enable),\n.B(i_Sel),\n.C(o_RO_out),\n.Y(w_inv_Conn[0])\n);\n\n")
# insert modules
for x in range(num_of_stages_inv-2):
f.write("inv_rvt U" + str(x+1) + "(\n")
f.write(".in(w_inv_Conn[" + str(x) + "]),\n.out(w_inv_Conn[" + str(x+1) + "])\n")
f.write(");\n\n")
# insert the last module
x = num_of_stages_inv-1
f.write("inv_rvt U" + str(x) + "(\n")
f.write(".in(w_inv_Conn[" + str(x-1) + "]),\n.out(o_RO_out)\n")
f.write(");\n\n")
# endmodule
f.write("endmodule\n\n")
# inverter cell statement
f.write("module inv_rvt(\ninput wire in,\noutput wire out\n); \n\n")
# f.write("wire in;\n(* dont_touch = 'true' *) wire out; \n\n")
f.write("INVP_X0P5N_A9PP84TR_C14 U0(\n")
f.write(".A(in),\n.Y(out)\n")
f.write(");\n\n")
f.write("endmodule \n\n")
# close file
f.close()
##########################################################
## SLVT NAND4
##########################################################
# Create a new file
f = open("RO_nand4_slvt.v","w")
# write timescale and RO top module statement
f.write("/////////////////// Please synthesize this RO with slvt library /////////////////// \n/////////////////// This RO contains 127 stages /////////////////// \n\n")
f.write("`timescale 1ps / 100fs\n\nmodule RO_nand4_slvt(\ninput wire i_Enable,\ninput wire i_Sel,\noutput wire o_RO_out\n);\n\n")
# declare input and output wires, set dont_touch for synthesis
# f.write("wire i_Enable;\n(* dont_touch = 'true' *) wire i_Sel;\n(* dont_touch = 'true' *) wire o_RO_out;\n\n")
# declare connection wires between logic cells
f.write("/////////////////// Define wires between nand gates ///////////////////\n")
f.write("wire ["+str(num_of_stages_nand4-1)+":0] w_nd_Conn;\n\n")
# Assign the first control logic
f.write("nand4_slvt U0(\n.i_nd_1(i_Enable),\n.i_nd_2(i_Sel),\n.i_nd_3(o_RO_out),\n.i_nd_4(o_RO_out),\n.out(w_nd_Conn[0])\n);\n\n")
# insert modules
for x in range(num_of_stages_nand4-2):
f.write("nand4_slvt U" + str(x+1) + "(\n")
f.write(".i_nd_1(w_nd_Conn[" + str(x) + "]),\n.i_nd_2(w_nd_Conn[" + str(x) + "]),\n.i_nd_3(w_nd_Conn[" + str(x) + "]),\n.i_nd_4(w_nd_Conn[" + str(x) + "]),\n.out(w_nd_Conn[" + str(x+1) + "])\n")
f.write(");\n\n")
# insert last module
x = num_of_stages_nand4-2
f.write("nand4_slvt U" + str(x+1) + "(\n")
f.write(".i_nd_1(w_nd_Conn[" + str(x) + "]),\n.i_nd_2(w_nd_Conn[" + str(x) + "]),\n.i_nd_3(w_nd_Conn[" + str(x) + "]),\n.i_nd_4(w_nd_Conn[" + str(x) + "]),\n.out(o_RO_out)\n")
f.write(");\n\n")
# endmodule
f.write("endmodule\n\n")
# nand4 cell statement
f.write("module nand4_slvt(\ninput wire i_nd_1,\ninput wire i_nd_2,\ninput wire i_nd_3,\ninput wire i_nd_4,\noutput wire out);\n\n")
# f.write("wire i_nd_1;\n(* dont_touch = 'true' *) wire i_nd_2;\n(* dont_touch = 'true' *) wire i_nd_3;\n(* dont_touch = 'true' *) wire i_nd_4;\n(* dont_touch = 'true' *) wire o_nd; \n\n")
f.write("NAND4_X2R_A9PP84TSL_C14 U0(\n")
f.write(".A(i_nd_1),\n.B(i_nd_2),\n.C(i_nd_3),\n.D(i_nd_4),\n.Y(out)\n")
f.write(");\n\n")
f.write("endmodule \n\n")
# close file
f.close()
##########################################################
## LVT NAND4
##########################################################
# Create a new file
f = open("RO_nand4_lvt.v","w")
# write timescale and RO top module statement
f.write("/////////////////// Please synthesize this RO with lvt library /////////////////// \n/////////////////// This RO contains 127 stages /////////////////// \n\n")
f.write("`timescale 1ps / 100fs\n\nmodule RO_nand4_lvt(\ninput wire i_Enable,\ninput i_Sel,\noutput wire o_RO_out\n);\n\n")
# declare input and output wires, set dont_touch for synthesis
# f.write("wire i_Enable;\n(* dont_touch = 'true' *) wire i_Sel;\n(* dont_touch = 'true' *) wire o_RO_out;\n\n")
# declare connection wires between logic cells
f.write("/////////////////// Define wires between nand gates ///////////////////\n")
f.write("wire ["+str(num_of_stages_nand4-1)+":0] w_nd_Conn;\n\n")
# Assign the first control logic
f.write("nand4_lvt U0(\n.i_nd_1(i_Enable),\n.i_nd_2(i_Sel),\n.i_nd_3(o_RO_out),\n.i_nd_4(o_RO_out),\n.out(w_nd_Conn[0])\n);\n\n")
# insert modules
for x in range(num_of_stages_nand4-2):
f.write("nand4_lvt U" + str(x+1) + "(\n")
f.write(".i_nd_1(w_nd_Conn[" + str(x) + "]),\n.i_nd_2(w_nd_Conn[" + str(x) + "]),\n.i_nd_3(w_nd_Conn[" + str(x) + "]),\n.i_nd_4(w_nd_Conn[" + str(x) + "]),\n.out(w_nd_Conn[" + str(x+1) + "])\n")
f.write(");\n\n")
# insert last module
x = num_of_stages_nand4-2
f.write("nand4_lvt U" + str(x+1) + "(\n")
f.write(".i_nd_1(w_nd_Conn[" + str(x) + "]),\n.i_nd_2(w_nd_Conn[" + str(x) + "]),\n.i_nd_3(w_nd_Conn[" + str(x) + "]),\n.i_nd_4(w_nd_Conn[" + str(x) + "]),\n.out(o_RO_out)\n")
f.write(");\n\n")
# endmodule
f.write("endmodule\n\n")
# nand4 cell statement
f.write("module nand4_lvt(\ninput wire i_nd_1,\ninput wire i_nd_2,\ninput wire i_nd_3,\ninput wire i_nd_4,\noutput wire out);\n\n")
# f.write("wire i_nd_1;\n(* dont_touch = 'true' *) wire i_nd_2;\n(* dont_touch = 'true' *) wire i_nd_3;\n(* dont_touch = 'true' *) wire i_nd_4;\n(* dont_touch = 'true' *) wire o_nd; \n\n")
f.write("NAND4_X2R_A9PP84TL_C14 U0(\n")
f.write(".A(i_nd_1),\n.B(i_nd_2),\n.C(i_nd_3),\n.D(i_nd_4),\n.Y(out)\n")
f.write(");\n\n")
f.write("endmodule \n\n")
# close file
f.close()
##########################################################
## RVT NAND4
##########################################################
# Create a new file
f = open("RO_nand4_rvt.v","w")
# write timescale and RO top module statement
f.write("/////////////////// Please synthesize this RO with rvt library /////////////////// \n/////////////////// This RO contains 127 stages /////////////////// \n\n")
f.write("`timescale 1ps / 100fs\n\nmodule RO_nand4_rvt(\ninput wire i_Enable,\ninput wire i_Sel,\noutput wire o_RO_out\n);\n\n")
# declare input and output wires, set dont_touch for synthesis
# f.write("wire i_Enable;\n(* dont_touch = 'true' *) wire i_Sel;\n(* dont_touch = 'true' *) wire o_RO_out;\n\n")
# declare connection wires between logic cells
f.write("/////////////////// Define wires between nand gates ///////////////////\n")
f.write("wire ["+str(num_of_stages_nand4-1)+":0] w_nd_Conn;\n\n")
# Assign the first control logic
f.write("nand4_rvt U0(\n.i_nd_1(i_Enable),\n.i_nd_2(i_Sel),\n.i_nd_3(o_RO_out),\n.i_nd_4(o_RO_out),\n.out(w_nd_Conn[0])\n);\n\n")
# insert modules
for x in range(num_of_stages_nand4-2):
f.write("nand4_rvt U" + str(x+1) + "(\n")
f.write(".i_nd_1(w_nd_Conn[" + str(x) + "]),\n.i_nd_2(w_nd_Conn[" + str(x) + "]),\n.i_nd_3(w_nd_Conn[" + str(x) + "]),\n.i_nd_4(w_nd_Conn[" + str(x) + "]),\n.out(w_nd_Conn[" + str(x+1) + "])\n")
f.write(");\n\n")
# insert last module
x = num_of_stages_nand4-2
f.write("nand4_rvt U" + str(x+1) + "(\n")
f.write(".i_nd_1(w_nd_Conn[" + str(x) + "]),\n.i_nd_2(w_nd_Conn[" + str(x) + "]),\n.i_nd_3(w_nd_Conn[" + str(x) + "]),\n.i_nd_4(w_nd_Conn[" + str(x) + "]),\n.out(o_RO_out)\n")
f.write(");\n\n")
# endmodule
f.write("endmodule\n\n")
# nand4 cell statement
f.write("module nand4_rvt(\ninput wire i_nd_1,\ninput wire i_nd_2,\ninput wire i_nd_3,\ninput wire i_nd_4,\noutput wire out);\n\n")
# f.write("wire i_nd_1;\n(* dont_touch = 'true' *) wire i_nd_2;\n(* dont_touch = 'true' *) wire i_nd_3;\n(* dont_touch = 'true' *) wire i_nd_4;\n(* dont_touch = 'true' *) wire o_nd; \n\n")
f.write("NAND4_X2R_A9PP84TR_C14 U0(\n")
f.write(".A(i_nd_1),\n.B(i_nd_2),\n.C(i_nd_3),\n.D(i_nd_4),\n.Y(out)\n")
f.write(");\n\n")
f.write("endmodule \n\n")
# close file
f.close()
|
StarcoderdataPython
|
3399361
|
<reponame>cschmidat/cleartext
from .encoder_decoder import EncoderDecoder
|
StarcoderdataPython
|
1721812
|
<gh_stars>0
#!/Users/Jonman/anaconda/bin/python3
import asyncio as snc
@snc.coroutine
def handle_echo( reader, writer):
data = yield from reader.read( 100)
msg = data.decode()
peer = writer.get_extra_info( 'peername' )
print( "Received {} from {}".format( msg, peer))
print( "Send: {}".format( msg))
writer.write( data)
yield from writer.drain()
print( "Close the client socket")
writer.close()
loop = snc.get_event_loop()
coro = snc.start_server( handle_echo, '127.0.0.1', 4545, loop=loop)
srvr = loop.run_until_complete( coro)
print('Serving on {}'.format(srvr.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
srvr.close()
loop.run_until_complete(srvr.wait_closed())
loop.close()
|
StarcoderdataPython
|
3274139
|
import csv
import json
import time
#turns the csv into a list of lists [[x, y, z,], [a, b, c]
exampleFile = open('repoffinput.csv')
exampleReader = csv.reader(exampleFile)
exampleData = list(exampleReader)
#variable to hold the data we care about
cleanData = []
#removes header row
del exampleData[0]
#pull the data you care about into a new list. Each entry is a list in the list
for row in exampleData:
#this is the list of the elements for the individual entry
subList = []
#this goes through an plucks out the data that matters
#breaks up the string value, turns it into numbers, and adds to list
a,b = row[0].split(", ")
locList = [float(a), float(b)]
subList.append(locList)
#clean up name and add
if 'The' in row[4]:
fullName = 'The ' + row[5]
else:
fullName = row[5]
subList.append(fullName)
#chinese name
subList.append(row[6])
#psu_en
subList.append(row[7])
#origin_loc
subList.append(row[9])
#sec
subList.append(row[10])
#regloc
subList.append(row[12])
#regdate
subList.append(row[13])
#actarea
subList.append(row[14])
#website_en
subList.append(row[15])
#website_ch
subList.append(row[16])
#once that list for the entry is created, it is added to cleanData
cleanData.append(subList)
#list to hold the list of dictionaries
jsonList = []
#creates the json
for row in cleanData:
#this is the dict for the entry
subDict = {}
subDict['geometry'] = {'type': 'Point', 'coordinates': row[0]}
subDict['type'] = 'Feature'
#this section only adds websites if they exist to the properties entry
propertiesSubDict = {'orgname_en': row[1], 'orgname_ch': row[2], 'psu_en': row[3], 'originloc': row[4], 'sector': row[5], 'regloc': row[6], 'regdate': row[7], 'actarea': row[8]}
if '.' in row[9]:
propertiesSubDict['website_en'] = row[9]
if '.' in row[10]:
propertiesSubDict['website_ch'] = row[10]
subDict['properties'] = propertiesSubDict
#adds the now completed subDict to jsonList
jsonList.append(subDict)
#writes the json
with open('jsonholder.json', 'w') as outfile:
json.dump(jsonList, outfile)
#This is the first part of the input html page
top_half = open('map_top.html', 'r')
#This re-inports the json as a text file because . . . that's how it works
middle = open('jsonholder.json', 'r')
#This is the last part of the input html page
bottom_half = open('map_bottom.html', 'r')
#sets up the regular output
output_page = open('index_map.html', 'w')
##sets up the archive output
timestamp = time.strftime("%Y%m%d")
output_page_archive_filename = "index_map" + timestamp + ".html"
archive_page = open(output_page_archive_filename, 'w')
#writes the html to the new page
for item in top_half:
output_page.write(item)
archive_page.write(item)
#writes the json
for item in middle:
output_page.write(item)
archive_page.write(item)
#writes the end
for item in bottom_half:
output_page.write(item)
archive_page.write(item)
top_half.close()
middle.close()
bottom_half.close()
output_page.close()
archive_page.close()
|
StarcoderdataPython
|
1777699
|
<reponame>ethansaxenian/RosettaDecode
line = my_file.readline() # returns a line from the file
lines = my_file.readlines() # returns a list of the rest of the lines from the file
|
StarcoderdataPython
|
3209444
|
<filename>gc.py<gh_stars>0
#!/usr/bin/env python3
# Write a program that computes the GC% of a DNA sequence
# Format the output for 2 decimal places
# Use all three formatting methods
dna = 'ACAGAGCCAGCAGATATACAGCAGATACTAT' # feel free to change
gc = 0
for i in range(0, len(dna)):
if dna[i] == 'G' or dna[i] == 'C': gc += 1
else: gc += 0
gc_percent = gc / len(dna)
# formating options
print(round(gc_percent, 2))
print('%.2f' % gc_percent)
print('{:.2f}'.format(gc_percent))
print(f'{gc_percent:.2f}')
# print(f'{gc_percent}) I dont know
"""
0.42
0.42
0.42
"""
|
StarcoderdataPython
|
1619295
|
<filename>pystachio/container.py<gh_stars>10-100
import copy
from collections import Iterable, Mapping, Sequence
from inspect import isclass
from .base import Object
from .naming import Namable, frozendict
from .typing import Type, TypeCheck, TypeFactory, TypeMetaclass
class ListFactory(TypeFactory):
PROVIDES = 'List'
@staticmethod
def create(type_dict, *type_parameters):
"""
Construct a List containing type 'klazz'.
"""
assert len(type_parameters) == 1
klazz = TypeFactory.new(type_dict, *type_parameters[0])
assert isclass(klazz)
assert issubclass(klazz, Object)
return TypeMetaclass('%sList' % klazz.__name__, (ListContainer,), {'TYPE': klazz, 'TYPE_PARAMETERS': (klazz.serialize_type(),)})
class ListContainer(Object, Namable, Type):
"""
The List container type. This is the base class for all user-generated
List types. It won't function as-is, since it requires cls.TYPE to be
set to the contained type. If you want a concrete List type, see the
List() function.
"""
__slots__ = ('_values',)
def __init__(self, vals):
self._values = self._coerce_values(copy.copy(vals))
super(ListContainer, self).__init__()
def get(self):
return tuple(v.get() for v in self._values)
def dup(self):
return self.__class__(self._values)
def __hash__(self):
return hash(self.get())
def __repr__(self):
si, _ = self.interpolate()
return '%s(%s)' % (self.__class__.__name__,
', '.join(str(v) for v in si._values))
def __iter__(self):
si, _ = self.interpolate()
return iter(si._values)
def __getitem__(self, index_or_slice):
si, _ = self.interpolate()
return si._values[index_or_slice]
def __contains__(self, item):
si, _ = self.interpolate()
if isinstance(item, self.TYPE):
return item in si._values
else:
return item in si.get()
def __eq__(self, other):
if not isinstance(other, ListContainer): return False
if self.TYPE.serialize_type() != other.TYPE.serialize_type(): return False
si, _ = self.interpolate()
oi, _ = other.interpolate()
return si._values == oi._values
@staticmethod
def isiterable(values):
return isinstance(values, Sequence) and not isinstance(values, str)
def _coerce_values(self, values):
if not ListContainer.isiterable(values):
raise ValueError("ListContainer expects an iterable, got %s" % repr(values))
def coerced(value):
return value if isinstance(value, self.TYPE) else self.TYPE(value)
return tuple([coerced(v) for v in values])
def check(self):
assert ListContainer.isiterable(self._values)
scopes = self.scopes()
for element in self._values:
assert isinstance(element, self.TYPE)
typecheck = element.in_scope(*scopes).check()
if not typecheck.ok():
return TypeCheck.failure("Element in %s failed check: %s" % (self.__class__.__name__,
typecheck.message()))
return TypeCheck.success()
def interpolate(self):
unbound = set()
interpolated = []
scopes = self.scopes()
for element in self._values:
einterp, eunbound = element.in_scope(*scopes).interpolate()
interpolated.append(einterp)
unbound.update(eunbound)
return self.__class__(interpolated), list(unbound)
def find(self, ref):
if not ref.is_index():
raise Namable.NamingError(self, ref)
try:
intvalue = int(ref.action().value)
except ValueError:
raise Namable.NamingError(self, ref)
if len(self._values) <= intvalue:
raise Namable.NotFound(self, ref)
else:
namable = self._values[intvalue]
if ref.rest().is_empty():
return namable.in_scope(*self.scopes())
else:
if not isinstance(namable, Namable):
raise Namable.Unnamable(namable)
else:
return namable.in_scope(*self.scopes()).find(ref.rest())
@classmethod
def type_factory(cls):
return 'List'
@classmethod
def type_parameters(cls):
return cls.TYPE_PARAMETERS
List = TypeFactory.wrapper(ListFactory)
class MapFactory(TypeFactory):
PROVIDES = 'Map'
@staticmethod
def create(type_dict, *type_parameters):
assert len(type_parameters) == 2, 'Type parameters: %s' % repr(type_parameters)
key_klazz, value_klazz = type_parameters
key_klazz, value_klazz = (TypeFactory.new(type_dict, *key_klazz),
TypeFactory.new(type_dict, *value_klazz))
assert isclass(key_klazz) and isclass(value_klazz)
assert issubclass(key_klazz, Object) and issubclass(value_klazz, Object)
return TypeMetaclass('%s%sMap' % (key_klazz.__name__, value_klazz.__name__), (MapContainer,),
{'KEYTYPE': key_klazz, 'VALUETYPE': value_klazz, 'TYPE_PARAMETERS': (key_klazz.serialize_type(), value_klazz.serialize_type())})
# TODO(wickman) Technically it's possible to do the following:
#
# >>> my_map = Map(Boolean,Integer)((True,2), (False,3), (False, 2))
# >>> my_map
# BooleanIntegerMap(True => 2, False => 3, False => 2)
# >>> my_map.get()
# frozendict({False: 2, True: 2})
# >>> my_map[True]
# Integer(2)
# >>> my_map.get()[True]
# 2
# we should filter tuples for uniqueness.
class MapContainer(Object, Namable, Type):
"""
The Map container type. This is the base class for all user-generated
Map types. It won't function as-is, since it requires cls.KEYTYPE and
cls.VALUETYPE to be set to the appropriate types. If you want a
concrete Map type, see the Map() function.
__init__(dict) => translates to list of tuples & sanity checks
__init__(tuple) => sanity checks
"""
__slots__ = ('_map',)
def __init__(self, *args):
"""
Construct a map.
Input:
sequence of tuples _or_ a dictionary
"""
if len(args) == 1 and isinstance(args[0], Mapping):
self._map = self._coerce_map(copy.copy(args[0]))
elif all(isinstance(arg, Iterable) and len(arg) == 2 for arg in args):
self._map = self._coerce_tuple(args)
else:
raise ValueError("Unexpected input to MapContainer: %s" % repr(args))
super(MapContainer, self).__init__()
def get(self):
return frozendict((k.get(), v.get()) for (k, v) in self._map)
def _coerce_wrapper(self, key, value):
coerced_key = key if isinstance(key, self.KEYTYPE) else self.KEYTYPE(key)
coerced_value = value if isinstance(value, self.VALUETYPE) else self.VALUETYPE(value)
return (coerced_key, coerced_value)
def _coerce_map(self, input_map):
return tuple(self._coerce_wrapper(key, value) for key, value in input_map.items())
def _coerce_tuple(self, input_tuple):
return tuple(self._coerce_wrapper(key, value) for key, value in input_tuple)
def __hash__(self):
return hash(self.get())
def __iter__(self):
si, _ = self.interpolate()
return (t[0] for t in si._map)
def __getitem__(self, key):
if not isinstance(key, self.KEYTYPE):
try:
key = self.KEYTYPE(key)
except ValueError:
raise KeyError("%s is not coercable to %s" % self.KEYTYPE.__name__)
# TODO(wickman) The performance of this should be improved.
si, _ = self.interpolate()
for tup in si._map:
if key == tup[0]:
return tup[1]
raise KeyError("%s not found" % key)
def __contains__(self, item):
try:
self[item]
return True
except KeyError:
return False
def dup(self):
return self.__class__(*self._map)
def __repr__(self):
si, _ = self.interpolate()
return '%s(%s)' % (self.__class__.__name__,
', '.join('%s => %s' % (key, val) for key, val in si._map))
def __eq__(self, other):
if not isinstance(other, MapContainer): return False
if self.KEYTYPE.serialize_type() != other.KEYTYPE.serialize_type(): return False
if self.VALUETYPE.serialize_type() != other.VALUETYPE.serialize_type(): return False
si, _ = self.interpolate()
oi, _ = other.interpolate()
return si._map == oi._map
def check(self):
assert isinstance(self._map, tuple)
scopes = self.scopes()
for key, value in self._map:
assert isinstance(key, self.KEYTYPE)
assert isinstance(value, self.VALUETYPE)
keycheck = key.in_scope(*scopes).check()
valuecheck = value.in_scope(*scopes).check()
if not keycheck.ok():
return TypeCheck.failure("%s key %s failed check: %s" % (self.__class__.__name__,
key, keycheck.message()))
if not valuecheck.ok():
return TypeCheck.failure("%s[%s] value %s failed check: %s" % (self.__class__.__name__,
key, value, valuecheck.message()))
return TypeCheck.success()
def interpolate(self):
unbound = set()
interpolated = []
scopes = self.scopes()
for key, value in self._map:
kinterp, kunbound = key.in_scope(*scopes).interpolate()
vinterp, vunbound = value.in_scope(*scopes).interpolate()
unbound.update(kunbound)
unbound.update(vunbound)
interpolated.append((kinterp, vinterp))
return self.__class__(*interpolated), list(unbound)
def find(self, ref):
if not ref.is_index():
raise Namable.NamingError(self, ref)
kvalue = self.KEYTYPE(ref.action().value)
scopes = self.scopes()
for key, namable in self._map:
if kvalue == key:
if ref.rest().is_empty():
return namable.in_scope(*scopes)
else:
if not isinstance(namable, Namable):
raise Namable.Unnamable(namable)
else:
return namable.in_scope(*scopes).find(ref.rest())
raise Namable.NotFound(self, ref)
@classmethod
def type_factory(cls):
return 'Map'
@classmethod
def type_parameters(cls):
return cls.TYPE_PARAMETERS
Map = TypeFactory.wrapper(MapFactory)
|
StarcoderdataPython
|
137155
|
## <NAME>
# By <NAME>
# Noice ca (by Senku)
# add des commandes de fun
import os
import discord
from typing import Optional
from discord.ext import commands
from discord import File
#from PIL import Image, ImageSequence
import asyncio
import json
import random
os.chdir('.')
token = 'the token'
def wrapper(ctx, emoji):
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) == emoji1
return check
def get_prefix(client,message):
with open("prefixe.json", "r") as f:
prefixe = json.load(f)
return prefixe[str(message.guild.id)]
bot = commands.Bot(command_prefix = get_prefix)
bot.remove_command('help')
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.idle, activity=discord.Activity(type=discord.ActivityType.watching, name='la maintenance '))
print("Bot connecté")
@bot.command
async def load(ctx,extension):
bot.load_extension(f'cogs.{extension}')
@bot.command
async def unload(ctx,extension):
bot.unload_extension(f'cogs.{extension}')
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
@bot.event
async def on_guild_join(guild):
with open("prefixe.json", "r") as f:
prefixe = json.load(f)
prefixe[str(guild.id)] = "!!"
with open("prefixe.json", "w") as f:
json.dump(prefixe,f, indent=4)
@bot.event
async def on_guild_remove(guild):
with open('prefixe.json', 'r') as f:
prefixe = json.load(f)
prefixe[str(guild.id)] = '!!'
with open('prefixe.json', 'w') as f:
json.dump(prefixe, f, indent=4)
@bot.command()
async def prefix(ctx, prefix):
if ctx.message.author.guild_permissions.administrator:
with open("prefixe.json", "r") as f:
prefixe = json.load(f)
prefixe[str(ctx.guild.id)] = prefix
await ctx.send("Le préfixe a été changé pour " + prefix)
with open("prefixe.json", "w") as f:
json.dump(prefixe,f, indent=4)
else:
await ctx.send("**:x: Tu n'as pas les droits d'administrateur pour changer le prefixe du bot dans ce serveur.**")
@bot.command() #SAY
async def say(ctx, *, arg: commands.clean_content):
"""
Fonction pour faire parler le bot en reproduisant ce qui est tapé précédemment
:param ctx: le contexte de la commande.
:param *, arg: Le texte a renvoyé avec espaces.
"""
await ctx.message.delete()
await ctx.send(ctx.author.mention + "\n" + arg)
#COMMANDES DE MODERATIONS #COMMANDES DE MODERATIONS #COMMANDES DE MODERATIONS #COMMANDES DE MODERATIONS #COMMANDES DE MODERATIONS #COMMANDES DE MODERATIONS #COMMANDES DE MODERATIONS
@bot.command() #KICK
async def kick(ctx, user : discord.User):
"""
Fonction pour Kick du serveur des utilisateurs
:param ctx: Le contexte de la commande.
:param user: L'utilisateur qui va etre kick.
"""
if ctx.message.author.guild_permissions.administrator:
await ctx.guild.kick(user)
await ctx.send(f"{user} vient d'être kick du serveur !")
else:
await ctx.send("Tu n'as pas les droits d'administrateurs pour kick cet utilisateur du serveur !")
@bot.command() #BAN
async def ban(ctx, user:discord.User):
"""
Fonction pour bannir du serveur des utilisateurs
:param ctx: Le contexte de la commande.
:param user: L'utilisateur qui va etre ban.
"""
if ctx.message.author.guild_permissions.administrator:
await ctx.send(f"**{user} vient d'être banni ! Il a sûrement fait quelque chose de mal c'est triste :pensive:**")
else :
await ctx.send("Tu n'as pas les droits d'administrateurs pour bannir cet utilisateur !")
@bot.command()
async def unban(ctx, user):
"""
Fonction pour débannir du serveur des utilisateurs
:param ctx: Le contexte de la commande.
:param user: L'utilisateur qui va etre débanni.
userName, userId = user.split("#")
bannedUsers = await ctx.guild.bans()
for i in bannedUsers:
if i.user.name == userName and i.user.discriminator == userId:
await ctx.guild.unban(i.user)
await ctx.send(f"{user} à été unban.")
return
#Ici on sait que l'utilisateur na pas ete trouvé
await ctx.send(f"L'utilisateur {user} n'est pas dans la liste des bans")
"""
@bot.command() #CLEAR
async def clear(ctx, nombre : int):
"""
Fonction pour supprimer les derniers messages d'un channel
:param ctx: Le contexte de la commande.
:param nombre: Le nombre de messages qui vont etre supprimés.
"""
messages = await ctx.channel.history(limit = nombre + 1).flatten()
for message in messages:
await message.delete()
# Commandes de mute ajoutées par Runger, modifiez e à votre guise :)
# RUNGER COPYRIGHT DON'T COPY THIS CODE OR I'LL TAPER YOU VREMANT TRE FOR (from Senku)
@bot.command()
async def createMutedRole(ctx):
mutedRole = await ctx.guild.create_role(name = "Muted",
permissions = discord.Permissions(
send_messages = False,
speak = False),
reason = "Création du role Muted pour mute des gens. :tripledab:")
for channel in ctx.guild.channels:
await channel.set_permissions(mutedRole, send_messages = False, speak = False)
return mutedRole
async def getMutedRole(ctx):
roles = ctx.guild.roles
for role in roles:
if role.name == "Muted":
return role
return await createMutedRole(ctx)
@bot.command()
@commands.has_permissions(manage_channels = True)
async def mute(ctx, member : discord.Member, *, reason = "Aucune raison n'a été renseignée."):
mutedRole = await getMutedRole(ctx)
await member.add_roles(mutedRole, reason = reason)
embed = discord.Embed(title = "**MUTE**", description = f"{member.mention} vient d'être mute.")
embed.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
await ctx.send(embed = embed)
@bot.command()
@commands.has_permissions(manage_channels = True)
async def unmute(ctx, member : discord.Member, *, reason = "Aucune raison n'a été renseigné"):
mutedRole = await getMutedRole(ctx)
await member.remove_roles(mutedRole, reason = reason)
embed = discord.Embed(title = "**UNMUTE**", description = f"{member.mention} vient d'être unmute.")
embed.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
await ctx.send(embed = embed)
#COMMANDES SHOP ET ITEM #COMMANDES SHOP ET ITEM #COMMANDES SHOP ET ITEM #COMMANDES SHOP ET ITEM #COMMANDES SHOP ET ITEM #COMMANDES SHOP ET ITEM
@bot.command()
async def open_account(ctx):
"""
Fonction pour créer un compte et l'enregistré dans un json (sauf si déjà fait)
:param ctx: Le contexte de la commande.
"""
users = await get_bank_data()
user = ctx.author
if str(user.id) in users:
await ctx.send("Votre compte a déjà été créé !")
return False
else:
users[str(user.id)] = {}
users[str(user.id)]["monnaie"] = 0
users[str(user.id)]["banque"] = 0
with open("mainbank.json", "w") as f:
json.dump(users,f)
await ctx.send("Bravo votre compte vient d'être enregistré avec succès !")
return True
async def opened_account(ctx):
"""
Fonction presque identique a celle au dessus mais réutilisé pour la verification uniquement
:param ctx: Le contexte de la commande.
"""
users = await get_bank_data()
user = ctx.author
if str(user.id) in users:
return False
else:
users[str(user.id)] = {}
users[str(user.id)]["monnaie"] = 0
users[str(user.id)]["banque"] = 0
with open("mainbank.json", "w") as f:
json.dump(users,f)
return True
@bot.command()
async def money(ctx, Utilisateur: Optional[discord.User]):
"""
Fonction permettant d'afficher l'argent d'un utilisateur dans son portemonnaie et son compte en banque
:param ctx: Le contexte de la commande.
:param Utilisateur: Afficher la money de l'utilisateur qui a été ping.
"""
users = await get_bank_data()
if Utilisateur is None:
user = ctx.author
else:
user = Utilisateur
if str(user.id) in users:
porte_monnaie = users[str(user.id)]["monnaie"]
banque_argent = users[str(user.id)]["banque"]
em = discord.Embed(title = f"Argent de {user.name}", color = discord.Color.green())
em.set_thumbnail(url = user.avatar_url)
em.add_field(name = "Porte Monnaie", value = porte_monnaie)
em.add_field(name = "Compte en Banque", value = banque_argent)
await ctx.send(embed = em)
with open("mainbank.json", "w") as f:
json.dump(users,f)
return True
else:
pass
return False
async def get_bank_data():
"""
Fonction pour chercher dans les données d'un utilisateur (utilisé pour la verification seulement)
"""
with open("mainbank.json", "r") as f:
users = json.load(f)
return users
@bot.command()
async def add_money(ctx, money : int, utilisateur: discord.User):
"""
Fonction pour ajouter de l'argent sur ton compte et l'enregistré dans un json
:param ctx: Le contexte de la commande.
"""
users = await get_bank_data()
user = utilisateur
if str(user.id) in users and ctx.message.author.guild_permissions.administrator:
if money<0:
await ctx.send("**Euh... Pourquoi tu utilises la commande ``add`` pour enlever de l'argent ? ... utilise plutot la commande ``!remove_money`` pour ca**")
elif money==0:
await ctx.send("**Hmmmmm qu'est ce que je suis censé répondre à ça ... Beh ... ducoup rien ne se passe. Bon j'y vais moi ...**")
elif user==ctx.author:
await ctx.send("**Tu serais pas en train d'essayer de toucher à ton compte. Ca m'a tout l'air d'etre de la triche ...**")
else:
users[str(user.id)]["monnaie"] = users[str(user.id)]["monnaie"] + money
with open("mainbank.json", "w") as f:
json.dump(users,f)
await ctx.send(f"**Here comes the MONEY !!! Money money money money money\nTu viens de rajouter {money} dans le compte de {user.mention} Félicitation à toi !**")
return True
elif ctx.message.author.guild_permissions.administrator==False:
await ctx.send("Tu n'as pas les droits d'administrateurs pour gérer ça ... dommage.")
else:
await ctx.send("Cet utilisateur n'a pas de compte on dirait ... dites à cette personne de taper la commande open_account pour ouvrir un compte.")
pass
return False
@bot.command()
async def remove_money(ctx, money : int, utilisateur: discord.User):
"""
Fonction pour ajouter de l'argent sur ton compte et l'enregistré dans un json
:param ctx: Le contexte de la commande.
"""
users = await get_bank_data()
user = utilisateur
if str(user.id) in users and ctx.message.author.guild_permissions.administrator:
if money<0:
await ctx.send("**Euh... Pourquoi tu utilises la commande remove pour donner de l'argent ? ... utilise plutot la commande ``!add_money`` pour ca**")
elif user==ctx.author:
await ctx.send("**Tu serais pas en train d'essayer de toucher à ton compte. Ca m'a tout l'air d'être de la triche ... et quoi me regarde pas comme ça !**")
elif money==0:
await ctx.send("**Hmmmmm qu'est ce que je suis censé répondre à ça ... Beh ... ducoup rien ne se passe. Bon j'y vais moi ...**")
elif money>users[str(user.id)]["monnaie"]:
await ctx.send("**Tu ne peux pas enlever plus d'argent que ce que cette personne a déjà sinon ça va aller dans les négatifs **")
else:
users[str(user.id)]["monnaie"] = users[str(user.id)]["monnaie"] - money
with open("mainbank.json", "w") as f:
json.dump(users,f)
await ctx.send(f"**Tu viens d'enlever {money} du compte de {user.mention}. **")
return True
elif ctx.message.author.guild_permissions.administrator==False:
await ctx.send("Tu n'as pas les droits d'administrateurs pour gérer ça ... dommage.")
else:
await ctx.send("Cet utilisateur n'a pas de compte on dirait ... dites à cette personne de taper la commande open_account pour ouvrir un compte.")
pass
return False
async def get_stats():
"""
Fonction pour chercher dans les données d'un utilisateur (utilisé pour la verification seulement)
"""
with open("stats.json", "r") as f:
users = json.load(f)
return users
#TEST IMAGE
#TEST IMAGE
#TEST IMAGE
#TEST IMAGE
#TEST IMAGE
@bot.command()
async def testimage(ctx):
transparent_foreground = Image.open('Steella.png')
animated_gif = Image.open('Versuscreen.gif')
frames = []
for frame in ImageSequence.Iterator(animated_gif):
frame = frame.copy()
frame.paste(transparent_foreground, mask=transparent_foreground)
frames.append(frame)
frames[0].save('output.gif', save_all=True, append_images=frames[1:])
await ctx.send(file=File('./output.gif'))
#COMMANDES DE FIGHT
#COMMANDES DE FIGHT
#COMMANDES DE FIGHT
#COMMANDES DE FIGHT
#COMMANDES DE FIGHT
@bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def fight (ctx, adversaire: discord.User):
"""
Fonction pour lancer un combat entre deux utilisateurs (commande très complexe)
:param ctx: Le contexte de la commande.
:param adversaire: L'utilisateur que vous allez affronter.
"""
await ctx.send("**C'est l'heure du combat !!! Choisissez le mode de combat (1, 2 ou 3):**\n\n**:one: Match Amicale :**\nAucun des personnages ne mourront le combat se termine lorsqu'il reste 5% des PV à l'un d'entre eux.\n\n**:two: DEATH MATCH :**\nCombattez pour votre vie JUSQU'A LA MORT !!!!! (Reset du personnage qui mourra lors du combat)\n\n**:three: Role Play :**\nUne option qui n'utilisera aucune fonctionnalité du bot a vous de jouez et de définir le gagnant à la fin du combat (vous êtes seuls juges du combat a vous deux de définir le perdant (soyez Fair Play ;) )")
user = ctx.author.id
channel = ctx.message.channel
# Début de l'aventure et réponse du joueur
try:
msg = await bot.wait_for('message', check=lambda message: message.author.id == user and message.channel == channel, timeout=20)
reponse = msg.content
if "1" == reponse :
message = await ctx.send(f"**Le combat opposant {ctx.author.mention} et {adversaire.mention}**")
elif "2" == reponse :
await ctx.send(f"**Un grand combat commence aujourd'hui, une ambiance mortelle se crée aux alentours et une brise glaciale se fait sentir, un match à mort est prêt à debuter le lieu s'emplit peu à peu d'une aura meurtrière qui ne cesse de grandir mais qui gagnera ce combat .....**\n\n**Le combat opposera donc {ctx.author.mention} à {adversaire.mention} une page de l'histoire est en train de s'écrire aujourd'hui !**")
elif "3" == reponse :
await ctx.send("**Vous avez choisi le mode Role Play alors à vous de jouer maintenant !!!**")
else:
await ctx.send("**:x: Réponse incorrect réessaye**")
except asyncio.TimeoutError:
await ctx.send("**Tu mets pas mal de temps ..., reviens une fois que tu te seras décidé ^^ !**")
return False
@bot.command()
async def get_fiche():
"""
Fonction pour chercher dans les données d'un utilisateur (utilisé pour la verification seulement)
"""
with open("fiche.json", "r") as f:
users = json.load(f)
return users
@bot.command()
async def ping(ctx):
await ctx.send(f':ping_pong: **Pong : {round(bot.latency * 1000)} ms**')
@bot.command()
async def fiche(ctx):
await ctx.send(f"**Bienvenue dans le créateur de fiche {ctx.author.mention} n'est-ce pas excitant !!!\nC'est ici que tu vas pouvoir donner vie à ton personnage mais réflechis bien car les changements seront définitifs sauf si vous effectuez le reset de votre personnage en reprenant l'aventure de ZERO.\nSi tu es prêt ALLONS-Y !!!**")
users = await get_fiche()
await ctx.send("**Pour commencer quel sera le prénom de ton personnage ?**")
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
try:
msg = await bot.wait_for('message', check = check, timeout=None)
except asyncio.TimeoutError:
await ctx.send("**Tu as mis un peu de trop de temps à répondre revient plus tard**")
return
prenomperso = msg.content #await ctx.send(msg.attachments[0].url)
users[str(ctx.author.id)] = {}
users[str(ctx.author.id)]["prenom"] = prenomperso
await ctx.send("**Quel sera son nom de famille ?**")
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
try:
msg = await bot.wait_for('message', check = check, timeout=None)
except asyncio.TimeoutError:
await ctx.send("**Tu as mis un peu de trop de temps à répondre revient plus tard**")
return
nomperso = msg.content #await ctx.send(msg.attachments[0].url)
users[str(ctx.author.id)]["nom"] = nomperso
await ctx.send("**Pour mieux voir les choses donne moi une image du visage de ton personnage (de face si possible).**")
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
try:
msg = await bot.wait_for('message', check = check, timeout=None)
except asyncio.TimeoutError:
await ctx.send("**Tu as mis un peu de trop de temps à répondre revient plus tard**")
return
if msg.content == None:
users[str(ctx.author.id)]["visage"] = msg.attachments[0].url
elif msg.content.startswith('https://media.discordapp.net/attachments/'):
users[str(ctx.author.id)]["visage"] = msg.content
else:
await ctx.send("**:x: Réessaye ce n'est pas une image ou un url valide**")
with open("fiche.json", "w") as f:
json.dump(users,f)
#EMBED
#EMBED
#EMBED
#EMBED
#EMBED
#EMBED
@bot.command()
async def em(ctx):
await ctx.send("Dans quel salon voulez vous que j'envoie l'embed ? (Spécifiez le avec #)")
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
try:
msg1 = await bot.wait_for('message', check = check, timeout=30.0)
channel_converter = discord.ext.commands.TextChannelConverter()
try:
channel = await channel_converter.convert(ctx, msg1.content)
except commands.BadArgument:
return await ctx.send(embed=discord.Embed(color=discord.Color.red(), description = "Ce salon n'existe pas ! Veuillez retenter la commande !"))
except asyncio.TimeoutError:
await ctx.send("Trop lent ! Veuillez retentez la commande !")
if not channel.permissions_for(ctx.guild.me).send_messages or not channel.permissions_for(ctx.guild.me).add_reactions:
return await ctx.send(embed=discord.Embed(color=discord.Color.red(), description = f"Je n'ai pas le droit d'envoyer de message sur le salon programmé : {channel}"))
await ctx.send(f"Très bien ! L'embed sera envoyé dans le salon : {channel.mention} !\nQuel sera le **titre** de l'embed?")
def checkMessage(message):
return message.author == ctx.message.author and ctx.message.channel == message.channel
try:
titre = await bot.wait_for("message", timeout = 60, check = checkMessage)
except:
await ctx.send(":x: Veuillez retentez la commande.")
return
message = await ctx.send(f"Voici le titre : **{titre.content}**\nQuel est sa **description** ?")
def checkMessage(message):
return message.author == ctx.message.author and ctx.message.channel == message.channel
try:
recette = await bot.wait_for("message", timeout = 60, check = checkMessage)
except:
await ctx.send(":x: Veuillez retentez la commande.")
return
await ctx.send("Voulez-vous prévoir une autre description ? (Tapez O ou N)")
message = await bot.wait_for('message', check=lambda message: message.author == ctx.author and message.channel == ctx.channel)
answer = message.content
while answer != str('O') and answer != str('N'):
message = await bot.wait_for('message', check=lambda message: message.author == ctx.author and message.channel == ctx.channel)
answer = message.content
if answer.startswith('O'):
message = await ctx.send(f"Veuillez tapez le titre de la seconde description !")
def checkMessage(message):
return message.author == ctx.message.author and ctx.message.channel == message.channel
try:
field = await bot.wait_for("message", timeout = 60, check = checkMessage)
except:
await ctx.send(":x: Veuillez retentez la commande.")
return
message = await ctx.send(f"{field.content} sera donc son titre ! Quelle sera sa description ?")
def checkMessage(message):
return message.author == ctx.message.author and ctx.message.channel == message.channel
try:
fieldes = await bot.wait_for("message", timeout = 60, check = checkMessage)
except:
await ctx.send(":x: Veuillez retentez la commande.")
return
message = await ctx.send(f"Très bien ! Voici donc sa description : **{fieldes.content}**\nQuel est son **pied de page** ?")
def checkMessage(message):
return message.author == ctx.message.author and ctx.message.channel == message.channel
try:
footer = await bot.wait_for("message", timeout = 60, check = checkMessage)
except:
await ctx.send(":x: Veuillez retentez la commande.")
return
await ctx.send("L'embed a été envoyé au salon programmé !", delete_after = 2)
embed = discord.Embed(color=discord.Color.red(), title=f"{titre.content}", description=f"{recette.content}")
embed.add_field(name= f"{field.content}", value=f"{fieldes.content}")
embed.set_footer(text=f"{footer.content}")
await channel.send(embed=embed)
if answer.startswith('N'):
message = await ctx.send(f"Très bien ! Voici donc sa description : **{titre.content}**\nQuel est son **pied de page** ?")
def checkMessage(message):
return message.author == ctx.message.author and ctx.message.channel == message.channel
try:
footer = await bot.wait_for("message", timeout = 60, check = checkMessage)
except:
await ctx.send(":x: Veuillez retentez la commande.")
return
await ctx.send("L'embed a été envoyé au salon programmé !", delete_after = 2)
embed = discord.Embed(color=discord.Color.red(), title=f"{titre.content}", description=f"{recette.content}")
embed.set_footer(text=f"{footer.content}")
await channel.send(embed=embed)
@bot.command()
async def pp(ctx):
await ctx.send(ctx.author.avatar_url)
bot.run(token)
|
StarcoderdataPython
|
1736835
|
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import LabelEncoder
import Dataset
import text_normalization
from pickle import dump, load
from sklearn.model_selection import train_test_split
def loadTrainValData(batchsize=16, num_worker=2, pretraine_path="bert-base-uncased"):
data = pd.read_csv('data/training_data.csv', delimiter=',')
Train_data, Dev_data = train_test_split(data, test_size=0.2, stratify=data[['sarcasm', 'sentiment']], random_state=42, shuffle=True)
Dev_data.to_csv('data/dev_set.csv')
Train_data['tweet'] = Train_data['tweet'].apply(lambda x: text_normalization.clean(x))
Dev_data['tweet'] = Dev_data['tweet'].apply(lambda x: text_normalization.clean(x))
print(f'Training data size {Train_data.shape}')
print(f'Validation data size {Dev_data.shape}')
DF_train = Dataset.TrainDataset(Train_data, pretraine_path)
DF_dev = Dataset.TrainDataset(Dev_data, pretraine_path)
DF_train_loader = DataLoader(dataset=DF_train, batch_size=batchsize, shuffle=True,
num_workers=num_worker)
DF_dev_loader = DataLoader(dataset=DF_dev, batch_size=batchsize, shuffle=False,
num_workers=num_worker)
return DF_train_loader, DF_dev_loader
def loadTestData(batchsize=16, num_worker=2, pretraine_path="bert-base-uncased"):
Test_data = pd.read_csv('data/test_set.csv', delimiter=',')
print(f'Test data size {Test_data.shape}')
Test_data['tweet'] = Test_data['tweet'].apply(lambda x: text_normalization.clean(x))
DF_test = Dataset.TestDataset(Test_data, pretraine_path)
DF_test_loader = DataLoader(dataset=DF_test, batch_size=batchsize, shuffle=False,
num_workers=num_worker)
return DF_test_loader
def loadTrainValData_v2(batchsize=16, num_worker=2, pretraine_path="bert-base-uncased"):
Train_data = pd.read_csv('data/ArSarcasm_train.csv', delimiter=',')
Train_data['tweet'] = Train_data['tweet'].apply(lambda x: text_normalization.clean(x))
Dev_data = pd.read_csv('data/ArSarcasm_test.csv', delimiter=',')
Dev_data['tweet'] = Dev_data['tweet'].apply(lambda x: text_normalization.clean(x))
print(f'Training data size {Train_data.shape}')
print(f'Validation data size {Dev_data.shape}')
DF_train = Dataset.TrainDataset(Train_data, pretraine_path)
DF_dev = Dataset.TrainDataset(Dev_data, pretraine_path)
DF_train_loader = DataLoader(dataset=DF_train, batch_size=batchsize, shuffle=True,
num_workers=num_worker)
DF_dev_loader = DataLoader(dataset=DF_dev, batch_size=batchsize, shuffle=False,
num_workers=num_worker)
return DF_train_loader, DF_dev_loader
|
StarcoderdataPython
|
3240076
|
<reponame>HrushikeshShukla/multilingual_chatbot
#!/usr/bin/python3 # This is client.py file
import socket
# create a socket object
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# get local machine name
host = socket.gethostname()
port = 9999
# connection to hostname on the port.
s.connect((host, port))
# Receive no more than 1024 bytes
ms = s.recv(2048)
msg=ms.decode('utf-8')
if msg== '11':
print('connected')
##selecting the language
print("***Welcome to multilinugal chatbot.***")
print("***बहुभाषिक चॅटबॉटमध्ये आपले स्वागत आहे.***")
print("\n \nPlease Select language:\nकृपया भाषा निवडा:")
indx=int(input("\n मराठीसाठी 1 दाबा,Press 2 for english: "))
if indx==1:
print("Starting marathi version.\n मराठी आवृत्ती सुरू करीत आहे.")
msg1='1'
s.send(msg1.encode("utf-8"))
while msg1 != '0010':
msg1=input('आपण :')
if msg1=='00':
print('disconnect initiated')
s.send(msg1.encode("utf-8"))
continue
s.send(msg1.encode("utf-8"))
ms=s.recv(8192)
msg=ms.decode('utf-8')
print('रोबोट :'+msg)
else:
print("Starting english version.")
msg1='2'
s.send(msg1.encode("utf-8"))
while msg1 != '0010':
msg1=input('You :')
if msg1=='00':
print('disconnect initiated')
s.send(msg1.encode("utf-8"))
continue
s.send(msg1.encode("utf-8"))
ms=s.recv(8192)
msg=ms.decode('utf-8')
print('bot :'+msg)
s.close()
#Now run this server.py in the background and then run the above client.py to see the result.
|
StarcoderdataPython
|
1604382
|
<filename>Miniprojekt och Studio/Mini3/mini3Flash.py
import numpy as np
import matplotlib.pyplot as plt
from VLE import *
P = 1520 # mmHg
T = 87 # C
Xf = 0.35
tol = 0.001
Tb_m = 64.7
Tb_e = 77.1
A1 = 7.87863
B1 = 1473.11
C1 = 230.0
A2 = 7.09803
B2 = 1238.71
C2 = 217.0
ABC1 = [A1, B1, C1]
ABC2 = [A2, B2, C2]
Lambda12 = 0.62551
Lambda21 = 0.49384
Xsum = []
#X1 = []
#X2 = []
X = list(np.linspace(0,99,100))
#print(X)
for L in X:
xsum, x1, x2 = flash2(100, L, Xf, P, T, ABC1, ABC2)
Xsum.append(xsum)
#X1.append(x1)
#X2.append(x2)
# print(Xsum)
plt.plot(X, Xsum)
plt.show()
|
StarcoderdataPython
|
3263097
|
<filename>scripts/x_model_gen.py
"""
This is a small script for generating the initial Go model from the
olca-schema yaml files. To run this script you need to have PyYAML
installed:
pip install pyyaml
You also have to configure the YAML_DIR in this script to point to
the directory where the YAML files are located:
# clone the olca-schema repository to some folder
cd <folder>
git clone https://github.com/GreenDelta/olca-schema.git
# <folder>/olca-schema/yaml is the path for the YAML_DIR
After this you can run this script. It will print the generated structs
to the console:
python x_model_gen.py > [.. path to generated file].go
"""
YAML_DIR = 'C:/Users/Besitzer/Downloads/olca-schema/yaml'
import yaml
from os import listdir
def print_class(class_model):
name = class_model['name']
print('// %s http://greendelta.github.io/olca-schema/html/%s.html' % (name, name))
t = 'type %s struct {\n' % name
if 'superClass' in class_model:
t += '\t%s\n' % class_model['superClass']
if 'properties' in class_model:
for prop in class_model['properties']:
t += '\t' + convert_property(prop) + '\n'
t += '}\n'
print(t)
print_constructor(class_model)
def convert_property(prop):
name = prop['name']
t = name[0].upper() + name[1:]
type = prop['type']
if type == 'integer':
t += ' int' + (' `json:"%s"`' % name)
elif type == 'double':
t += ' float64' + (' `json:"%s"`' % name)
elif type == 'boolean':
t += ' bool' + (' `json:"%s"`' % name)
elif type == 'date' or type == 'dateTime':
t += ' string' + (' `json:"%s,omitempty"`' % name)
elif type == 'List[string]':
t += ' []string' + (' `json:"%s,omitempty"`' % name)
elif type.startswith('List['):
sub = type[5:(len(type)-1)]
t += ' []' + sub + (' `json:"%s,omitempty"`' % name)
else:
t += ' ' + type + (' `json:"%s,omitempty"`' % name)
return t
def print_constructor(class_model):
if 'superClass' not in class_model:
return
name = class_model['name']
s = class_model['superClass']
if s != 'RootEntity' and s != 'CategorizedEntity':
return
t = '// New%s initializes a new %s with the given id and name\n' % (name, name)
v = name[0].lower()
t += 'func New%s(id, name string) *%s {\n' % (name, name)
t += '\t%s := %s{}\n' % (v, name)
t += '\t%s.Context = ContextURL\n' % v
t += '\t%s.Type = "%s"\n' % (v, name)
t += '\t%s.ID = id\n' % v
t += '\t%s.Name = name\n' % v
t += '\treturn &%s\n' % v
t += '}\n'
print(t)
if __name__ == '__main__':
print('package schema\n')
for f in listdir(YAML_DIR):
path = YAML_DIR + '/' + f
with open(path, 'r', encoding='utf-8') as stream:
model = yaml.load(stream)
if 'class' in model:
print_class(model['class'])
|
StarcoderdataPython
|
100070
|
<reponame>rackerlabs/daemonx
# Copyright (c) 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# This file includes code taken from or based on code from:
# https://github.com/openstack/swift
# and
# http://stackoverflow.com/questions/12676393/creating-python-2-7-daemon-with-pep-3143
#
# When the code was taken from swift (and then possibly modified), it's marked
# by the comment "from swift".
from __future__ import with_statement
from ConfigParser import ConfigParser
import errno
import grp
import logging
import logging.handlers
from optparse import OptionParser
import pwd
import os
from random import random
import signal
import socket
import sys
import time
from daemonx.utils import Timeout, TimeoutError
# from swift
class LoggerFileObject(object):
"""
Used to capture stderr/stdout.
"""
def __init__(self, logger):
self.logger = logger
def write(self, value):
value = value.strip()
if value:
if 'Connection reset by peer' in value:
self.logger.error('STDOUT: Connection reset by peer')
else:
self.logger.error('STDOUT: %s', value)
def writelines(self, values):
self.logger.error('STDOUT: %s', '#012'.join(values))
def close(self):
pass
def flush(self):
pass
def __iter__(self):
return self
def next(self):
raise IOError(errno.EBADF, 'Bad file descriptor')
def read(self, size=-1):
raise IOError(errno.EBADF, 'Bad file descriptor')
def readline(self, size=-1):
raise IOError(errno.EBADF, 'Bad file descriptor')
def tell(self):
return 0
def xreadlines(self):
return self
def check_pid(env):
if env['pid']:
# check to see if there is a process with that pid
try:
# there is a process with this pid
os.kill(env['pid'], 0)
print 'Daemon appears to be already running'
sys.exit()
except OSError, e:
# there is not a process with this pid
if not e.errno == errno.ESRCH:
raise
env['pid'] = None
# from swift
def drop_privileges(user):
"""
Sets the userid/groupid of the current process, get session leader, etc.
:param user: User name to change privileges to
"""
if os.geteuid() == 0:
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
os.setgroups(groups)
user = pwd.getpwnam(user)
os.setgid(user[3])
os.setuid(user[2])
os.environ['HOME'] = user[5]
try:
os.setsid()
except OSError:
pass
os.chdir('/') # in case you need to rmdir on where you started the daemon
os.umask(0o22) # ensure files are created with the correct privileges
def get_check_progress_time(conf):
interval = get_interval(conf)
check_progress_time = int(conf.get('check_progress_time', 0))
if check_progress_time:
return max(interval * 1.1, check_progress_time)
return 0
def get_command_line(command_line, dargs_parser, args_parser):
"""
Parses the command line.
Command line should be of the form:
[common daemon args] command [unique daemon args].
Returns common daemon args, command, and unique daemon args.
"""
dargs = dargs_parser.parse_args(command_line)
command = dargs[1][0]
if command not in Daemon.commands:
raise ValueError('Invalid daemon command')
args = args_parser.parse_args(dargs[1][1:])
return dargs, command, args
def get_daemon(env):
return env['daemon_class'](
env['global_conf'], env['conf_section'], env['pid_file_path'],
env['dargs'], env['args'])
def get_interval(conf):
return int(conf.get('interval', 300))
def kill_child_process(pid):
start_time = time.time()
while time.time() - start_time < 5:
try:
ret = os.waitpid(pid, os.WNOHANG)
except OSError, e:
if str(e).find('No such process') == 0:
raise
else:
return
if ret != (0, 0):
break
time.sleep(1)
if ret == (0, 0):
try:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
except OSError, e:
if str(e).find('No such process') == 0:
raise
def kill_children(*args):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
os.killpg(0, signal.SIGTERM)
sys.exit()
def kill_process(pid):
try:
start_time = time.time()
while time.time() - start_time < 5:
os.kill(pid, signal.SIGTERM)
time.sleep(1)
except OSError, e:
if str(e).find('No such process') > 0:
return
try:
start_time = time.time()
while time.time() - start_time < 5:
os.kill(pid, signal.SIGKILL)
time.sleep(1)
except OSError, e:
if str(e).find('No such process') > 0:
return
raise RuntimeError('Unable to kill process %d' % pid)
def get_pid(env):
"""
Reads and returns the daemon's pid from pid file on disk>
Returns None on failure.
"""
try:
with open(env['pid_file_path'], 'r') as fd:
pid = int(fd.read().strip())
except IOError:
pid = None
return pid
def get_project_from_conf_path(conf_path):
if not os.path.isfile(conf_path):
raise ValueError('File path expected')
conf_file = os.path.basename(conf_path)
if not conf_file.endswith('.conf'):
raise ValueError('Conf file should end with .conf')
return conf_file[:-len('.conf')]
# from swift
def list_from_csv(comma_separated_str):
"""
Splits the str given and returns a properly stripped list of the comma
separated values.
"""
if comma_separated_str:
return [v.strip() for v in comma_separated_str.split(',') if v.strip()]
return []
def parse_run_name():
"""
Returns the parts of the run name of the daemon.
The run name should be of the form: project-daemon
This is used to determine the config location/section name.
"""
command = os.path.split(sys.argv[0])[1]
parts = command.split('-')
if len(parts) != 2:
raise ValueError()
return parts
# from swift
def read_config(conf_path):
"""
Reads a config and returns its sections/values.
"""
c = ConfigParser()
if not c.read(conf_path):
print "Unable to read config from %s" % conf_path
sys.exit(1)
conf = {}
for s in c.sections():
conf.update({s: dict(c.items(s))})
conf['__file__'] = conf_path
return conf
def run_worker(env, run_once=False):
daemon = get_daemon(env)
daemon.daemonize()
if run_once:
try:
daemon.run_once()
finally:
env['cls'].delete_pid_file(env)
else:
daemon.run()
sys.exit()
class Daemon(object):
"""
A class for building daemons.
It takes care of things common to all daemons.
"""
commands = 'restart run_once run_once_debug start status stop'.split()
handler4logger = {}
def __init__(self, global_conf, conf_section, pid_file_path, dargs, args):
self.global_conf = global_conf
self.conf_section = conf_section
self.pid_file_path = pid_file_path
self.conf = self.global_conf[conf_section]
self.dargs = dargs
self.args = args
self.logger = self.get_logger(self.conf)
self.interval = get_interval(self.conf)
self.check_progress_time = get_check_progress_time(self.conf)
self.last_progress = None
# from swift
def capture_stdio(self):
"""
Log unhandled exceptions, close stdio, capture stdout and stderr.
"""
# log uncaught exceptions
sys.excepthook = lambda * exc_info: \
self.logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
# FUTURE: make the capture optional?
sys.stdout = LoggerFileObject(self.logger)
sys.stderr = LoggerFileObject(self.logger)
def daemonize(self):
"""
Daemonizes the current process.
"""
self.capture_stdio()
@classmethod
def delete_pid_file(cls, env):
if os.path.exists(env['pid_file_path']):
os.remove(env['pid_file_path'])
@classmethod
def get_args_parser(cls):
"""
Override to parse options unique to your daemon.
Returns an OptionParser.
"""
return OptionParser()
@classmethod
def get_dargs_parser(cls):
"""
Returns an OptionParser for options common to all daemons.
Returns an OptionParser.
"""
# FUTURE: add things that can be overridden on command line
parser = OptionParser()
parser.add_option(
"--eventlet_patch", action="store_false", dest="eventlet_patch",
default=False, help="add eventlet patch")
parser.disable_interspersed_args()
return parser
# from swift
@classmethod
def get_logger(cls, conf):
"""
Returns a logger configured from the conf.
"""
if not conf:
conf = {}
name = conf.get('log_name', 'daemonx')
log_route = conf.get('log_route', name)
logger = logging.getLogger(log_route)
logger.propagate = False
# get_logger will only ever add one SysLog Handler to a logger
if logger in cls.handler4logger:
logger.removeHandler(cls.handler4logger[logger])
# facility for this logger will be set by last call wins
facility = getattr(
logging.handlers.SysLogHandler,
conf.get('log_facility', 'LOG_LOCAL0'),
logging.handlers.SysLogHandler.LOG_LOCAL0)
udp_host = conf.get('log_udp_host')
if udp_host:
udp_port = int(
conf.get('log_udp_port',
logging.handlers.SYSLOG_UDP_PORT))
handler = logging.handlers.SysLogHandler(
address=(udp_host, udp_port), facility=facility)
else:
log_address = conf.get('log_address', '/dev/log')
try:
handler = logging.handlers.SysLogHandler(
address=log_address, facility=facility)
except socket.error, e:
# Either /dev/log isn't a UNIX socket or it does not exist
# at all
if e.errno not in [errno.ENOTSOCK, errno.ENOENT]:
raise e
handler = logging.handlers.SysLogHandler(facility=facility)
logger.addHandler(handler)
cls.handler4logger[logger] = handler
# set the level for the logger
logger.setLevel(
getattr(
logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
return logger
@classmethod
def made_progress(cls, env):
if not env['check_progress_time']:
return True
try:
stat = os.stat(env['pid_file_path'])
return time.time() - stat.st_mtime < env['check_progress_time']
except OSError:
return True
@classmethod
def restart(cls, env):
"""
Restarts the daemon.
"""
if env['pid']:
cls.stop(env)
env['pid'] = None
cls.start(env)
def run(self):
"""
Runs the daemon.
It calls run_forever().
"""
self.run_forever()
@classmethod
def run_command(
cls, conf_path, conf_section, command_line, project=None,
daemon_name=None):
"""
Sends the command specified on the command line to the daemon.
"""
env = {
'cls': cls,
'conf_path': conf_path,
'conf_section': conf_section,
}
# read config
env['global_conf'] = read_config(conf_path)
env['conf'] = env['global_conf'][conf_section]
# get project/daemon name
if not (project and daemon_name):
project = get_project_from_conf_path(env['conf_path'])
daemon_name = env['conf_section']
env['project'] = project
env['daemon_name'] = daemon_name
# get/import class from config
import_target, class_name = \
env['conf']['class'].rsplit('.', 1)
module = __import__(import_target, fromlist=[import_target])
env['daemon_class'] = getattr(module, class_name)
# parse command line, get command to run on daemon
dargs_parser = cls.get_dargs_parser()
args_parser = env['daemon_class'].get_args_parser()
env['dargs'], env['command'], env['args'] = get_command_line(
command_line, dargs_parser, args_parser)
# check command
if env['command'] not in cls.commands:
raise ValueError('Invalid command')
# get user
env['user'] = env['conf']['user']
# get pid file path, pid
env['pid_file_path'] = '/var/run/%s/%s.pid' % \
(env['project'], env['daemon_name'])
# create /var/run/project directory if it doesn't exist
run_dir = '/var/run/%s' % env['project']
if not os.path.exists(run_dir):
os.mkdir(run_dir, 0755)
user = pwd.getpwnam(env['user'])
os.chown(run_dir, user[2], user[3])
env['pid'] = get_pid(env)
# get progress check related values
env['interval'] = get_interval(env['conf'])
env['check_progress_time'] = get_check_progress_time(env['conf'])
if env['check_progress_time']:
env['progress_sleep_time'] = \
max(1, int(.1 * env['check_progress_time']))
else:
if env['command'] == 'run_once':
env['progress_sleep_time'] = 5
else:
env['progress_sleep_time'] = int(.1 * env['interval'])
# drop privs
drop_privileges(env['user'])
# run command
if env['command'] == 'run_once':
method = getattr(env['daemon_class'], 'start')
method(env, run_once=True)
elif env['command'] == 'run_once_debug':
method = getattr(env['daemon_class'], 'start_debug')
method(env)
else:
method = getattr(env['daemon_class'], env['command'])
method(env)
@classmethod
def run_command_from_script(cls):
"""
Runs the command on the daemon.
Project and daemon name are determined from the script run.
"""
project, daemon_name = parse_run_name()
conf_path = '/etc/%s/%s.conf' % (project, project)
cls.run_command(
conf_path, daemon_name, list(sys.argv[1:]), project, daemon_name)
def run_forever(self):
"""
Run the daemon forever.
Sleeps as need be to not run more than once in each interval.
Calls run_once().
"""
time.sleep(random() * self.interval)
while True:
try:
self.run_once()
except Exception:
self.logger.exception('run_once()')
time.sleep(self.interval)
def run_once(self):
"""
Override this to define what the daemon does.
"""
raise NotImplementedError('run_once not implemented')
@classmethod
def run_worker(cls, env, run_once=False):
class State(object):
pass
state = State()
# fork to watcher and worker processes
state.pid = os.fork()
if state.pid > 0:
# watcher process
signal.signal(signal.SIGTERM, kill_children)
while True:
if not cls.made_progress(env):
# kill worker process
kill_child_process(state.pid)
state.pid = os.fork()
if state.pid == 0:
# worker process
os.utime(env['pid_file_path'], None)
run_worker(env, run_once)
if run_once:
try:
with Timeout(env['progress_sleep_time']):
os.waitpid(state.pid, 0)
if not os.path.exists(env['pid_file_path']):
return
except OSError:
return
except TimeoutError:
pass
else:
time.sleep(env['progress_sleep_time'])
else:
# worker process
run_worker(env, run_once)
@classmethod
def start(cls, env, run_once=False):
"""
Starts the daemon.
"""
# check to see if daemon is already running
check_pid(env)
# really close stdin, stdout, stderr
for fd in [0, 1, 2]:
os.close(fd)
# daemonize things
if os.fork() > 0:
return
try:
# write pid
cls.write_pid_file(env)
cls.run_worker(env, run_once)
finally:
env['cls'].delete_pid_file(env)
try:
# write pid
cls.write_pid_file(env)
cls.run_worker(env, run_once)
finally:
env['cls'].delete_pid_file(env)
@classmethod
def start_debug(cls, env):
# check to see if daemon is already running
check_pid(env)
daemon = get_daemon(env)
daemon.run_once()
@classmethod
def status(cls, env):
"""
Prints the status of the daemon.
"""
if env['pid']:
print 'Daemon is running with pid: %d' % env['pid']
else:
print 'Daemon is not running'
@classmethod
def stop(cls, env):
"""
Stops the daemon.
"""
if not env['pid']:
print 'Daemon does not seem to be running'
return
kill_process(env['pid'])
cls.delete_pid_file(env)
def update_progress_marker(self, force=False):
if not self.check_progress_time:
return
update = False
if force:
update = True
elif not self.last_progress:
update = True
elif .1 * self.check_progress_time < time.time() - self.last_progress:
update = True
if update:
try:
os.utime(self.pid_file_path, None)
except OSError:
pass
self.last_progress = time.time()
@classmethod
def write_pid_file(cls, env):
with open(env['pid_file_path'], 'w+') as fd:
pid = os.getpid()
fd.write('%d\n' % pid)
|
StarcoderdataPython
|
1647940
|
import os
import sys
sys.path.append(os.getcwd())
import pickle
import pandas as pd
from hpo.utils import *
from hpo.helpers import *
from hpo.task2vec.task2vec import Task2Vec
from hpo.task2vec.models import get_model
import hpo.task2vec.task_similarity as task_similarity
def calculate_dataset_x_augmentation_embeddings(
datasets_main_dir: str,
dataset_names: list,
probe: str,
skip_layers: int,
method: str,
max_samples: int):
embeddings = []
dataset_keys = []
dataset_dirs = [os.path.join(datasets_main_dir, name) for name in dataset_names]
for name, dataset_dir in zip(dataset_names, dataset_dirs):
logger.info(f"Embedding {name}")
images, labels = get_augmented_train_set(dataset_dir)
classes, counts = np.unique(labels, return_counts = True)
num_classes = len(classes)
logger.info(f'Batch images shape : {images.shape}')
logger.info(f'Batch labels shape : {labels.shape}')
logger.info(f"Unique labels : {num_classes}")
n_aug = get_n_aug(images)
logger.info(f'Augmentating for {n_aug} times...')
images, labels = augment(images, labels, n_aug = n_aug)
logger.info(f'Augmentating completed. Processing...')
images = process_images(images, 224)
labels = torch.from_numpy(labels).long()
logger.info(f'Augmented images shape : {images.size()}')
logger.info(f'Augmented labels shape : {labels.size()}')
dataset = torch.utils.data.TensorDataset(images, labels)
del images
del labels
probe_network = get_model(probe, pretrained=True, num_classes=num_classes).cuda()
task2vec = Task2Vec(probe_network, max_samples=max_samples, skip_layers=skip_layers, method = method, loader_opts = {'batch_size': 100})
embedding, metrics = task2vec.embed(dataset)
embeddings.append(embedding)
dataset_keys.append(name)
logger.info(f"Embedding {name} completed!")
logger.info(metrics.avg)
del dataset
del probe_network
del task2vec
normalized_embeddings, normalization = task_similarity.get_normalized_embeddings(embeddings)
meta_features = dict()
for dataset_key, e in zip(dataset_keys, normalized_embeddings):
meta_features[dataset_key] = {i: e[i] for i in range(len(e))}
return embeddings, meta_features, normalization
def dump_embeddings(embeddings, normalization, task_names, output_dir):
pickle.dump(embeddings, open(os.path.join(output_dir, 'embeddings.pkl'), 'wb'))
pickle.dump(normalization, open(os.path.join(output_dir, 'normalization.pkl'), 'wb'))
pickle.dump(task_names, open(os.path.join(output_dir, 'task_names.pkl'), 'wb'))
def convert_metadata_to_df(metadata):
k, v = list(metadata.items())[0]
columns = sorted(v.keys())
columns_edited = False
features_lists = []
indices = []
for key, values_dict in sorted(metadata.items()):
indices.append(key)
feature_list = [values_dict[k] for k in sorted(values_dict.keys())]
# below loop flattens feature list since there are tuples in it &
# it extends columns list accordingly
for i, element in enumerate(feature_list):
if type(element) is tuple:
# convert tuple to single list elements
slce = slice(i, i + len(element) - 1)
feature_list[slce] = list(element)
if not columns_edited:
columns_that_are_tuples = columns[i]
new_columns = [
columns_that_are_tuples + "_" + str(i) for i in range(len(element))
]
columns[slce] = new_columns
columns_edited = True
features_lists.append(feature_list)
return pd.DataFrame(features_lists, columns=columns, index=indices)
def dump_meta_features_df_and_csv(meta_features, output_path, file_name="metafeatures", samples_along_rows=False, n_samples=None):
if not os.path.isdir(output_path):
os.makedirs(output_path)
if not isinstance(meta_features, pd.DataFrame):
df = convert_metadata_to_df(meta_features)
else:
df = meta_features
df.to_csv(os.path.join(output_path, file_name+".csv"))
logger.info("meta features data dumped to: {}".format(output_path))
def main(args):
### ALL DATASETS ALL AUGMENTATIONS ###
dataset_names = os.listdir(args.dataset_dir)
embeddings, meta_features, normalization = \
calculate_dataset_x_augmentation_embeddings(args.dataset_dir,
dataset_names,
args.probe_network,
args.skip_layers,
args.method,
args.max_samples)
task_names = dataset_names
if args.plot_dist_mat:
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
task_similarity.plot_distance_matrix(embeddings, task_names, savepath = os.path.join(args.output_dir, 'dist_mat.png'))
return embeddings, task_names, meta_features, normalization
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Task2VecPipeline")
parser.add_argument("--dataset_dir", type=str, default = "/work/dlclarge2/ozturk-experiments/few_shot_finalized/")
parser.add_argument("--dataset_group", type=str, default = 'all') # all/training/validation
parser.add_argument("--output_dir", type=str, default = 'experiments/02_22/task2vec_variational/')
parser.add_argument("--plot_dist_mat", type=bool, default = True)
parser.add_argument("--probe_network", type=str, default = 'resnet34')
parser.add_argument("--skip_layers", type=int, default = 0)
parser.add_argument("--method", type=str, default = 'variational')
parser.add_argument("--max_samples", type=int, default = 10000)
args, _ = parser.parse_known_args()
verbosity_level = "INFO"
logger = get_logger(verbosity_level)
#### ORIGINAL RUN SCRIPT ####
embeddings, task_names, meta_features, normalization = main(args)
dump_embeddings(embeddings, normalization, task_names, args.output_dir)
dump_meta_features_df_and_csv(meta_features=meta_features, output_path=args.output_dir)
|
StarcoderdataPython
|
4832895
|
from django.contrib import admin
from .models import PageView
# Register your models here.
class PageViewAdmin(admin.ModelAdmin):
list_display = ['hostname', 'timestamp']
admin.site.register(PageView, PageViewAdmin)
|
StarcoderdataPython
|
3219725
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/setup.py 5023 2010/06/14 22:05:46 scons"
import os
import os.path
import stat
import sys
Version = "2.0.0.final.0"
man_pages = [
'scons.1',
'sconsign.1',
'scons-time.1',
]
(head, tail) = os.path.split(sys.argv[0])
if head:
os.chdir(head)
sys.argv[0] = tail
is_win32 = 0
if not sys.platform == 'win32':
try:
if sys.argv[1] == 'bdist_wininst':
is_win32 = 1
except IndexError:
pass
else:
is_win32 = 1
try:
import distutils
import distutils.core
import distutils.command.install
import distutils.command.install_data
import distutils.command.install_lib
import distutils.command.install_scripts
import distutils.command.build_scripts
except ImportError:
sys.stderr.write("""Could not import distutils.
Building or installing SCons from this package requires that the Python
distutils be installed. See the README or README.txt file from this
package for instructions on where to find distutils for installation on
your system, or on how to install SCons from a different package.
""")
sys.exit(1)
_install = distutils.command.install.install
_install_data = distutils.command.install_data.install_data
_install_lib = distutils.command.install_lib.install_lib
_install_scripts = distutils.command.install_scripts.install_scripts
_build_scripts = distutils.command.build_scripts.build_scripts
class _options(object):
pass
Options = _options()
Installed = []
def set_explicitly(name, args):
"""
Return if the installation directory was set explicitly by the
user on the command line. This is complicated by the fact that
"install --install-lib=/foo" gets turned into "install_lib
--install-dir=/foo" internally.
"""
if args[0] == "install_" + name:
s = "--install-dir="
else:
# The command is something else (usually "install")
s = "--install-%s=" % name
set = 0
length = len(s)
for a in args[1:]:
if a[:length] == s:
set = 1
break
return set
class install(_install):
user_options = _install.user_options + [
('no-scons-script', None,
"don't install 'scons', only install 'scons-%s'" % Version),
('no-version-script', None,
"don't install 'scons-%s', only install 'scons'" % Version),
('install-bat', None,
"install 'scons.bat' script"),
('no-install-bat', None,
"do not install 'scons.bat' script"),
('install-man', None,
"install SCons man pages"),
('no-install-man', None,
"do not install SCons man pages"),
('standard-lib', None,
"install SCons library in standard Python location"),
('standalone-lib', None,
"install SCons library in separate standalone directory"),
('version-lib', None,
"install SCons library in version-numbered directory"),
]
boolean_options = _install.boolean_options + [
'no-scons-script',
'no-version-script',
'install-bat',
'no-install-bat',
'install-man',
'no-install-man',
'standard-lib',
'standalone-lib',
'version-lib'
]
if hasattr(os, 'symlink'):
user_options.append(
('hardlink-scons', None,
"hard link 'scons' to the version-numbered script, don't make a separate 'scons' copy"),
)
boolean_options.append('hardlink-script')
if hasattr(os, 'symlink'):
user_options.append(
('symlink-scons', None,
"make 'scons' a symbolic link to the version-numbered script, don't make a separate 'scons' copy"),
)
boolean_options.append('symlink-script')
def initialize_options(self):
_install.initialize_options(self)
self.no_scons_script = 0
self.no_version_script = 0
self.install_bat = 0
self.no_install_bat = not is_win32
self.install_man = 0
self.no_install_man = is_win32
self.standard_lib = 0
self.standalone_lib = 0
self.version_lib = 0
self.hardlink_scons = 0
self.symlink_scons = 0
# Don't warn about having to put the library directory in the
# search path.
self.warn_dir = 0
def finalize_options(self):
_install.finalize_options(self)
if self.install_bat:
Options.install_bat = 1
else:
Options.install_bat = not self.no_install_bat
if self.install_man:
Options.install_man = 1
else:
Options.install_man = not self.no_install_man
Options.standard_lib = self.standard_lib
Options.standalone_lib = self.standalone_lib
Options.version_lib = self.version_lib
Options.install_scons_script = not self.no_scons_script
Options.install_version_script = not self.no_version_script
Options.hardlink_scons = self.hardlink_scons
Options.symlink_scons = self.symlink_scons
def get_scons_prefix(libdir, is_win32):
"""
Return the right prefix for SCons library installation. Find
this by starting with the library installation directory
(.../site-packages, most likely) and crawling back up until we reach
a directory name beginning with "python" (or "Python").
"""
drive, head = os.path.splitdrive(libdir)
while head:
if head == os.sep:
break
head, tail = os.path.split(head)
if tail.lower()[:6] == "python":
# Found the Python library directory...
if is_win32:
# ...on Win32 systems, "scons" goes in the directory:
# C:\PythonXX => C:\PythonXX\scons
return os.path.join(drive + head, tail)
else:
# ...on other systems, "scons" goes above the directory:
# /usr/lib/pythonX.X => /usr/lib/scons
return os.path.join(drive + head)
return libdir
def force_to_usr_local(self):
"""
A hack to decide if we need to "force" the installation directories
to be under /usr/local. This is because Mac Os X Tiger and
Leopard, by default, put the libraries and scripts in their own
directories under /Library or /System/Library.
"""
return (sys.platform[:6] == 'darwin' and
(self.install_dir[:9] == '/Library/' or
self.install_dir[:16] == '/System/Library/'))
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
if force_to_usr_local(self):
self.install_dir = '/usr/local/lib'
args = self.distribution.script_args
if not set_explicitly("lib", args):
# They didn't explicitly specify the installation
# directory for libraries...
is_win32 = sys.platform == "win32" or args[0] == 'bdist_wininst'
prefix = get_scons_prefix(self.install_dir, is_win32)
if Options.standalone_lib:
# ...but they asked for a standalone directory.
self.install_dir = os.path.join(prefix, "scons")
elif Options.version_lib or not Options.standard_lib:
# ...they asked for a version-specific directory,
# or they get it by default.
self.install_dir = os.path.join(prefix, "scons-%s" % Version)
msg = "Installed SCons library modules into %s" % self.install_dir
Installed.append(msg)
class install_scripts(_install_scripts):
def finalize_options(self):
_install_scripts.finalize_options(self)
if force_to_usr_local(self):
self.install_dir = '/usr/local/bin'
self.build_dir = os.path.join('build', 'scripts')
msg = "Installed SCons scripts into %s" % self.install_dir
Installed.append(msg)
def do_nothing(self, *args, **kw):
pass
def hardlink_scons(self, src, dst, ver):
try: os.unlink(dst)
except OSError: pass
os.link(ver, dst)
def symlink_scons(self, src, dst, ver):
try: os.unlink(dst)
except OSError: pass
os.symlink(os.path.split(ver)[1], dst)
def copy_scons(self, src, dst, *args):
try: os.unlink(dst)
except OSError: pass
self.copy_file(src, dst)
self.outfiles.append(dst)
def report(self, msg, args):
# Wrapper around self.announce, used by older distutils versions.
self.announce(msg % args)
def run(self):
# This "skip_build/build_scripts" block is cut-and-paste from
# distutils.
if not self.skip_build:
self.run_command('build_scripts')
# Custom SCons installation stuff.
if Options.hardlink_scons:
create_basename_script = self.hardlink_scons
elif Options.symlink_scons:
create_basename_script = self.symlink_scons
elif Options.install_scons_script:
create_basename_script = self.copy_scons
else:
create_basename_script = self.do_nothing
if Options.install_version_script:
create_version_script = self.copy_scons
else:
create_version_script = self.do_nothing
inputs = self.get_inputs()
bat_scripts = [x for x in inputs if x[-4:] == '.bat']
non_bat_scripts = [x for x in inputs if x[-4:] != '.bat']
self.outfiles = []
self.mkpath(self.install_dir)
for src in non_bat_scripts:
base = os.path.basename(src)
scons = os.path.join(self.install_dir, base)
scons_ver = scons + '-' + Version
create_version_script(src, scons_ver)
create_basename_script(src, scons, scons_ver)
if Options.install_bat:
if is_win32:
bat_install_dir = get_scons_prefix(self.install_dir, is_win32)
else:
bat_install_dir = self.install_dir
for src in bat_scripts:
scons_bat = os.path.join(bat_install_dir, 'scons.bat')
scons_version_bat = os.path.join(bat_install_dir,
'scons-' + Version + '.bat')
self.copy_scons(src, scons_bat)
self.copy_scons(src, scons_version_bat)
# This section is cut-and-paste from distutils, modulo being
# able
if os.name == 'posix':
try: report = distutils.log.info
except AttributeError: report = self.report
# Set the executable bits (owner, group, and world) on
# all the scripts we just installed.
for file in self.get_outputs():
if self.dry_run:
report("changing mode of %s", file)
else:
mode = ((os.stat(file)[stat.ST_MODE]) | 0555) & 07777
report("changing mode of %s", file)
os.chmod(file, mode)
class build_scripts(_build_scripts):
def finalize_options(self):
_build_scripts.finalize_options(self)
self.build_dir = os.path.join('build', 'scripts')
class install_data(_install_data):
def initialize_options(self):
_install_data.initialize_options(self)
def finalize_options(self):
_install_data.finalize_options(self)
if force_to_usr_local(self):
self.install_dir = '/usr/local'
if Options.install_man:
if is_win32:
dir = 'Doc'
else:
dir = os.path.join('man', 'man1')
self.data_files = [(dir, man_pages)]
man_dir = os.path.join(self.install_dir, dir)
msg = "Installed SCons man pages into %s" % man_dir
Installed.append(msg)
else:
self.data_files = []
description = "Open Source next-generation build tool."
long_description = """Open Source next-generation build tool.
Improved, cross-platform substitute for the classic Make
utility. In short, SCons is an easier, more reliable
and faster way to build software."""
scripts = [
'script/scons',
'script/sconsign',
'script/scons-time',
# We include scons.bat in the list of scripts, even on UNIX systems,
# because we provide an option to allow it be installed explicitly,
# for example if you're installing from UNIX on a share that's
# accessible to Windows and you want the scons.bat.
'script/scons.bat',
]
#if is_win32:
# scripts = scripts + [
# 'script/scons-post-install.py'
# ]
arguments = {
'name' : "scons",
'version' : Version,
'description' : description,
'long_description' : long_description,
'author' : '<NAME>',
'author_email' : '<EMAIL>',
'url' : "http://www.scons.org/",
'packages' : ["SCons",
"SCons.compat",
"SCons.Node",
"SCons.Options",
"SCons.Platform",
"SCons.Scanner",
"SCons.Script",
"SCons.Tool",
"SCons.Tool.MSCommon",
"SCons.Tool.packaging",
"SCons.Variables",
],
'package_dir' : {'' : 'engine'},
'data_files' : [('man/man1', man_pages)],
'scripts' : scripts,
'cmdclass' : {'install' : install,
'install_lib' : install_lib,
'install_data' : install_data,
'install_scripts' : install_scripts,
'build_scripts' : build_scripts}
}
distutils.core.setup(**arguments)
if Installed:
print '\n'.join(Installed)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
StarcoderdataPython
|
91048
|
<reponame>aristoteleo/scribe-py
from . import settings
from datetime import datetime
from time import time as get_time
from platform import python_version
_VERBOSITY_LEVELS_FROM_STRINGS = {'error': 0, 'warn': 1, 'info': 2, 'hint': 3}
def info(*args, **kwargs):
return msg(*args, v='info', **kwargs)
def error(*args, **kwargs):
args = ('Error:',) + args
return msg(*args, v='error', **kwargs)
def warn(*args, **kwargs):
args = ('WARNING:',) + args
return msg(*args, v='warn', **kwargs)
def hint(*args, **kwargs):
return msg(*args, v='hint', **kwargs)
def _settings_verbosity_greater_or_equal_than(v):
if isinstance(settings.verbosity, str):
settings_v = _VERBOSITY_LEVELS_FROM_STRINGS[settings.verbosity]
else:
settings_v = settings.verbosity
return settings_v >= v
def msg(*msg, v=4, time=False, memory=False, reset=False, end='\n',
no_indent=False, t=None, m=None, r=None):
"""Write message to logging output.
Log output defaults to standard output but can be set to a file
by setting `sc.settings.log_file = 'mylogfile.txt'`.
v : {'error', 'warn', 'info', 'hint'} or int, (default: 4)
0/'error', 1/'warn', 2/'info', 3/'hint', 4, 5, 6...
time, t : bool, optional (default: False)
Print timing information; restart the clock.
memory, m : bool, optional (default: Faulse)
Print memory information.
reset, r : bool, optional (default: False)
Reset timing and memory measurement. Is automatically reset
when passing one of ``time`` or ``memory``.
end : str (default: '\n')
Same meaning as in builtin ``print()`` function.
no_indent : bool (default: False)
Do not indent for ``v >= 4``.
"""
# variable shortcuts
if t is not None: time = t
if m is not None: memory = m
if r is not None: reset = r
if isinstance(v, str):
v = _VERBOSITY_LEVELS_FROM_STRINGS[v]
if v == 3: # insert "--> " before hints
msg = ('-->',) + msg
if v >= 4 and not no_indent:
msg = (' ',) + msg
if _settings_verbosity_greater_or_equal_than(v):
if not time and not memory and len(msg) > 0:
_write_log(*msg, end=end)
m = msg
def _write_log(*msg, end='\n'):
"""Write message to log output, ignoring the verbosity level.
This is the most basic function.
Parameters
----------
*msg :
One or more arguments to be formatted as string. Same behavior as print
function.
"""
from .settings import logfile
if logfile == '':
print(*msg, end=end)
else:
out = ''
for s in msg:
out += str(s) + ' '
with open(logfile, 'a') as f:
f.write(out + end)
|
StarcoderdataPython
|
4869
|
import numpy as np
def normalize(x):
return x / np.linalg.norm(x)
def norm_sq(v):
return np.dot(v,v)
def norm(v):
return np.linalg.norm(v)
def get_sub_keys(v):
if type(v) is not tuple and type(v) is not list:
return []
return [k for k in v if type(k) is str]
def to_vec3(v):
if isinstance(v, (float, int)):
return np.array([v, v, v], dtype=np.float32)
elif len(get_sub_keys(v)) > 0:
return v
else:
return np.array([v[0], v[1], v[2]], dtype=np.float32)
def to_str(x):
if type(x) is bool:
return "1" if x else "0"
elif isinstance(x, (list, tuple)):
return vec3_str(x)
else:
return str(x)
def float_str(x):
if type(x) is str:
return '_' + x
else:
return str(x)
def vec3_str(v):
if type(v) is str:
return '_' + v
elif isinstance(v, (float, int)):
return 'vec3(' + str(v) + ')'
else:
return 'vec3(' + float_str(v[0]) + ',' + float_str(v[1]) + ',' + float_str(v[2]) + ')'
def vec3_eq(v, val):
if type(v) is str:
return False
for i in range(3):
if v[i] != val[i]:
return False
return True
def smin(a, b, k):
h = min(max(0.5 + 0.5*(b - a)/k, 0.0), 1.0)
return b*(1 - h) + a*h - k*h*(1.0 - h)
def get_global(k):
if type(k) is str:
return _mandelbruh_GLOBAL_VARS[k]
elif type(k) is tuple or type(k) is list:
return np.array([get_global(i) for i in k], dtype=np.float32)
else:
return k
def set_global_float(k):
if type(k) is str:
_mandelbruh_GLOBAL_VARS[k] = 0.0
return k
def set_global_vec3(k):
if type(k) is str:
_mandelbruh_GLOBAL_VARS[k] = to_vec3((0,0,0))
return k
elif isinstance(k, (float, int)):
return to_vec3(k)
else:
sk = get_sub_keys(k)
for i in sk:
_mandelbruh_GLOBAL_VARS[i] = 0.0
return to_vec3(k)
def cond_offset(p):
if type(p) is str or np.count_nonzero(p) > 0:
return ' - vec4(' + vec3_str(p) + ', 0)'
return ''
def cond_subtract(p):
if type(p) is str or p > 0:
return ' - ' + float_str(p)
return ''
def make_color(geo):
if type(geo.color) is tuple or type(geo.color) is np.ndarray:
return 'vec4(' + vec3_str(geo.color) + ', ' + geo.glsl() + ')'
elif geo.color == 'orbit' or geo.color == 'o':
return 'vec4(orbit, ' + geo.glsl() + ')'
else:
raise Exception("Invalid coloring type")
_mandelbruh_GLOBAL_VARS = {}
|
StarcoderdataPython
|
1667593
|
from django.db import models
from django_extensions.db.models import TimeStampedModel
from instance_selector.edit_handlers import InstanceSelectorPanel
from wagtail.admin.edit_handlers import FieldRowPanel, MultiFieldPanel, FieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
class Employee(TimeStampedModel):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
telephone = models.CharField(max_length=200,
null=True,
blank=True)
mobile = models.CharField(max_length=200,
null=True,
blank=True)
email = models.EmailField()
company = models.ForeignKey('Company',
on_delete=models.CASCADE)
picture = models.ForeignKey('wagtailimages.Image', on_delete=models.SET_NULL,
null=True, blank=True)
linkedin = models.URLField(null=True, blank=True)
panels = [
FieldRowPanel(
[
MultiFieldPanel([
FieldPanel('first_name'),
FieldPanel('last_name'),
FieldPanel('telephone'),
FieldPanel('mobile'),
]),
MultiFieldPanel([
InstanceSelectorPanel('company'),
ImageChooserPanel('picture'),
FieldPanel('linkedin'),
FieldPanel('email'),
]),
]
),
]
def __str__(self):
return f'{self.first_name} {self.last_name} [{self.company}]'
@property
def full_name(self):
return f'{self.first_name} {self.last_name}'
@property
def project_count(self):
return self.projects.count()
class Meta:
verbose_name_plural = 'people'
ordering = ['-created']
|
StarcoderdataPython
|
3383519
|
<reponame>TahaEntezari/ramstk
# -*- coding: utf-8 -*-
#
# ramstk.views.gtk3.program_status.panel.py is part of the RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""GTK3 Program Status Panels."""
# Standard Library Imports
from typing import Dict
# Third Party Imports
import pandas as pd
# noinspection PyPackageValidations,PyPackageRequirements
from matplotlib.patches import Ellipse
from pandas.plotting import register_matplotlib_converters
from pubsub import pub
# RAMSTK Package Imports
from ramstk.views.gtk3 import _
from ramstk.views.gtk3.widgets import RAMSTKPlotPanel
register_matplotlib_converters()
class ProgramStatusPlotPanel(RAMSTKPlotPanel):
"""Panel to display the Verification plan efforts."""
# Define private dictionary class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_record_field = "status_id"
_select_msg = "selected_revision"
_tag = "program_status"
_title = _("Verification Plan Effort")
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self) -> None:
"""Initialize an instance of the Burndown Curve panel."""
super().__init__()
# Initialize private dict instance attributes.
# Initialize private list instance attributes.
# Initialize private scalar instance attributes.
# Initialize public dict instance attributes.
# Initialize public list instance attributes.
self.lst_axis_labels = [_(""), _("Total Time [hours]")]
self.lst_legend = [
_("Minimum Expected Time"),
_("Mean Expected Time"),
_("Maximum Expected Time"),
_("Actual Remaining Time"),
]
# Initialize public scalar instance attributes.
self.plot_title = _("Total Verification Effort")
super().do_make_panel()
# Subscribe to PyPubSub messages.
pub.subscribe(self._do_load_panel, "succeed_calculate_verification_plan")
def _do_load_panel(self, attributes: Dict[str, pd.DataFrame]) -> None:
"""Load the burndown curve with the planned and actual status.
:param attributes: a dict containing a pandas DataFrames() for each of
planned burndown, assessment dates/targets, and the actual
progress.
:return: None
"""
self._do_load_plan(attributes["plan"])
self._do_load_assessment_milestones(
attributes["assessed"], attributes["plan"].loc[:, "upper"].max()
)
self.pltPlot.do_add_line(
x_values=list(attributes["actual"].index),
y_values=list(attributes["actual"].loc[:, "time"]),
marker="o",
)
super().do_load_panel()
def _do_load_assessment_milestones(
self, assessed: pd.DataFrame, y_max: float
) -> None:
"""Add the reliability assessment milestones to the plot.
This method will add a vertical line at all the dates identified as
dates when a reliability assessment is due. Annotated along side
these markers are the reliability targets (lower, mean, upper) for that
assessment date.
:return: None
:rtype: None
"""
_y_max = max(1.0, y_max)
for _date in list(assessed.index):
self.pltPlot.axis.axvline(
x=_date,
ymin=0,
ymax=1.05 * _y_max,
color="k",
linewidth=1.0,
linestyle="-.",
)
self.pltPlot.axis.annotate(
str(self.fmt.format(assessed.loc[pd.to_datetime(_date), "upper"]))
+ "\n"
+ str(self.fmt.format(assessed.loc[pd.to_datetime(_date), "mean"]))
+ "\n"
+ str(self.fmt.format(assessed.loc[pd.to_datetime(_date), "lower"])),
xy=(_date, 0.9 * _y_max),
xycoords="data",
xytext=(-55, 0),
textcoords="offset points",
size=12,
va="center",
bbox=dict(boxstyle="round", fc="#E5E5E5", ec="None", alpha=0.5),
arrowprops=dict(
arrowstyle="wedge,tail_width=1.",
fc="#E5E5E5",
ec="None",
alpha=0.5,
patchA=None,
patchB=Ellipse((2, -1), 0.5, 0.5),
relpos=(0.2, 0.5),
),
)
def _do_load_plan(self, plan: pd.DataFrame) -> None:
"""Load the verification plan burndown curve.
:param plan: the pandas DataFrame() containing the planned task end
dates and remaining hours of work (lower, mean, upper).
:return: None
:rtype: None
"""
self.pltPlot.axis.cla()
self.pltPlot.axis.grid(True, which="both")
self.pltPlot.do_load_plot(
**{
"x_values": list(plan.index),
"y_values": list(plan.loc[:, "lower"]),
"plot_type": "date",
"marker": "g--",
}
)
self.pltPlot.do_load_plot(
**{
"x_values": list(plan.index),
"y_values": list(plan.loc[:, "mean"]),
"plot_type": "date",
"marker": "b-",
}
)
self.pltPlot.do_load_plot(
**{
"x_values": list(plan.index),
"y_values": list(plan.loc[:, "upper"]),
"plot_type": "date",
"marker": "r--",
}
)
|
StarcoderdataPython
|
4831250
|
from rx import from_
def print_number(x):
print('The number is {}'.format(x))
from_(range(10)).subscribe(print_number)
|
StarcoderdataPython
|
3295572
|
import os
import random
from typing import Callable, List, Tuple
from PySide2.QtCore import QObject, QRunnable, QSize, Qt, QThreadPool, Signal, Slot
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QComboBox, QGridLayout, QLabel, QListWidget, QListWidgetItem, QWidget
from ..data import get_file_metadata, get_file_paths, get_files_with_tags
from ..logger import get_logger
from ..settings import ROOT_DIR
logger = get_logger(__name__)
class GalleryView(QWidget):
"""A gallery widget with paginated thumbnails, aligned horizontally and scrollable."""
# Always fit to configured height
thumbnail_width = 10000
thumbnail_height = 100
padding_height = 70
images_per_page = 20
def __init__(self, load_image_callback: Callable):
super().__init__()
self._load_image_callback = load_image_callback
# For async thumbnail loading
self._thread_pool = QThreadPool()
# For pagination
self._all_filenames: List[str] = []
self.setFixedHeight(self.thumbnail_height + self.padding_height)
(layout, self._page_select, self._query_label, self._viewing_label,
self._gallery) = self._layout()
self.setLayout(layout)
def _layout(self) -> Tuple[QGridLayout, QComboBox, QLabel, QLabel, QListWidget]:
layout = QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
page_label = QLabel('Page:')
page_label.setFixedWidth(40)
layout.addWidget(page_label, 0, 0)
# Page select dropdown
page_select = QComboBox()
page_select.currentTextChanged.connect(self._on_page_changed)
page_select.setFixedWidth(60)
layout.addWidget(page_select, 0, 1)
# Query results and image label
query_label = QLabel('Query:')
layout.addWidget(query_label, 0, 2)
viewing_label = QLabel('Viewing:')
layout.addWidget(viewing_label, 0, 3)
# Gallery
gallery = QListWidget()
gallery.setFlow(QListWidget.LeftToRight)
gallery.setWrapping(False)
gallery.setViewMode(QListWidget.IconMode)
gallery.setIconSize(QSize(self.thumbnail_width, self.thumbnail_height))
gallery.currentItemChanged.connect(self._on_item_changed)
layout.addWidget(gallery, 1, 0, 1, 4)
return layout, page_select, query_label, viewing_label, gallery
# -- Public
@property
def page_count(self) -> int:
"""The total number of pages for the current search results."""
count = len(self._all_filenames) // self.images_per_page
if len(self._all_filenames) % self.images_per_page == 0:
return count
return count + 1
def search(self, text: str, shuffle: bool):
tags = text.split()
filenames = get_files_with_tags([t for t in tags if not t.startswith('-')],
[t[1:] for t in tags if t.startswith('-')])
if shuffle:
random.shuffle(filenames)
self.populate(filenames)
self._query_label.setText(
f'Query: {tags} | {len(filenames)} images ({self.page_count} pages)')
def populate(self, filenames: List[str]):
self._all_filenames = filenames
self._page_select.clear()
self._page_select.addItems([str(i) for i in range(1, self.page_count + 1)])
# NOTE: We don't need to call _populate() here since the page change handler does it
# (Which includes the first page)
# -- Callbacks
def _on_page_changed(self, val: str):
if not val or len(self._all_filenames) == 0:
return
# 1-indexed on UI, 0-indexed internally
self._change_page(int(val) - 1)
def _on_item_changed(self, current, _previous):
if not current:
return
filepath = current.data(Qt.StatusTipRole)
self._viewing_label.setText(f'Viewing: {os.path.split(filepath)[-1]}')
self._load_image_callback(filepath)
# -- Helpers
def _change_page(self, idx: int):
start, end = self.images_per_page * idx, self.images_per_page * (idx + 1)
filepaths = get_file_paths(ROOT_DIR, self._all_filenames[start:end])
self._populate(filepaths)
def _populate(self, filepaths: List[str]):
self._gallery.clear()
for i, filepath in enumerate(filepaths):
if not filepath:
continue
# Thumbnail loads are a little slow, so push them to the background
worker = IconWorker(i, filepath)
worker.signal.result.connect(self._set_icon)
self._thread_pool.start(worker)
def _set_icon(self, result: Tuple[int, QIcon, str, str]):
(idx, icon, filepath, label) = result
item = QListWidgetItem(icon, label)
# Store filepath to be retrieved by other components
item.setData(Qt.StatusTipRole, filepath)
self._gallery.addItem(item)
# Signals must be defined on a QObject (or descendant)
class IconWorkerSignal(QObject):
result = Signal(tuple)
class IconWorker(QRunnable):
"""An async worker used to load thumbnails in the background."""
def __init__(self, item_idx: int, filepath: str):
super().__init__()
self._item_idx = item_idx
self._filepath = filepath
self.signal = IconWorkerSignal()
@Slot()
def run(self):
icon = QIcon(self._filepath)
label = self._get_label()
self.signal.result.emit((self._item_idx, icon, self._filepath, label))
def _get_label(self):
filename = os.path.split(self._filepath)[-1]
meta = get_file_metadata(filename)
return f'Tags: {meta["tag_count"]}'
|
StarcoderdataPython
|
3344378
|
from .NNMetricFactory import *
from typing import List, Dict, DefaultDict
import numpy as np
import torch
from collections import defaultdict
from torch.utils.tensorboard import SummaryWriter
from typing import Union
import sys
class RunNNMetrics( object ):
"""
A class for running and writing NNSimpleMetrics during or after training.
NOTE - update function flattens the data. Useful, but not generalizable to more complex models.
"""
def __init__(self, metrics: List[str] = None):
self.metrics = defaultdict()
self.data = defaultdict()
self.creator = NNMetricFactory()
if metrics:
for metric in metrics:
self.metrics[metric] = self.creator.create_metric(metric)
self.pred = []
self.labels = []
self.loss = 0
def update(self, pred: Union[np.ndarray, torch.Tensor], labels: Union[np.ndarray, torch.Tensor], loss_mean: float):
"""
Add predictions and labels to stored results.
loss_mean should be the mean. We'll take an even mean over the batches.
https://discuss.pytorch.org/t/on-running-loss-and-average-loss/107890
"""
if type(pred) != np.ndarray:
#print("Flattening tensor")
p = pred.detach().flatten()
l = labels.detach().flatten()
self.pred.append(p)
self.labels.append(l)
self.loss += loss_mean * p.shape[0]
else:
self.pred.append(pred)
self.labels.append(labels)
self.loss += loss_mean * pred.shape[0]
#print("Updating val metrics")
#print("Loss", self.loss)
def clear(self):
"""
Clear cached results
"""
self.pred.clear()
self.labels.clear()
self.loss = 0
def add_metric(self, metric: str):
"""
Add metric name to list.
"""
if metric not in self.metrics:
self.metrics[metric] = self.creator.create_metric(metric)
def add_metric_class(self, metric: NNSimpleMetric):
"""
Add a NNSimpleMetric to run.
"""
if metric.name not in self.metrics:
self.metrics[metric.name] = metric
def get_stored(self):
"""
Get pred, labels as concatonated data.
:return:
"""
return self.__prepare(self.pred), self.__prepare(self.labels)
def __prepare(self, data):
"""
Prepare stored data by concat if numpy, or cat if tensor, then cpu and then to numpy.
:param data:
:return:
"""
if type(data[0]) == np.ndarray:
return np.concatenate(data)
else:
return torch.cat(data).cpu().numpy()
def run_stored(self, show=False) -> DefaultDict:
"""
Run metrics on stored data, return data.
:return:
"""
return self.run(self.__prepare(self.pred), self.__prepare(self.labels), show)
def run(self, pred: Union[np.ndarray, torch.Tensor], labels: Union[np.ndarray, torch.Tensor], show = False) -> DefaultDict:
"""
Run metrics setup in class. Return data. Data is also stored for future writing.
"""
if type(pred) != np.ndarray:
#print("Flattening tensor")
p = pred.cpu().detach().flatten()
l = labels.cpu().detach().flatten()
else:
p = pred
l = labels
self.data.clear()
for name, metric in self.metrics.items():
data = metric.apply(p, l)
if show:
metric.show()
self.data.update(data)
if self.loss:
self.data['loss'] = self.loss/len(p)
return self.data
def show(self):
for m in self.metrics.values():
if m.has_data():
m.show()
def write(self, writer: SummaryWriter, epoch = None, use_metric_name = True, prefix = "", show=False):
"""
Write to the summary writer. Optionally use the metric name as a group to group metrics.
If predictions and labels are present and havn't run, we run them.
Optionally give epoch if we are tracking these metrics through training.
Returns raw data names and data that is actually single-valued
"""
all_data = defaultdict()
for metric in self.metrics.values():
data = metric.get_data()
for name in data:
#print(type(data[name]))
#print(data[name])
if type(data[name]) == list: continue
try:
if use_metric_name:
final_name = metric.name()+'/'+name
else:
final_name = name
if prefix:
final_name = prefix+"/"+final_name
writer.add_scalar(final_name, data[name], epoch)
all_data[name] = data[name]
#Hard to get length of pieces of data if more than one and not lists, but still ndarray/etc. bah.
except Exception:
continue
if self.loss:
writer.add_scalar(prefix+"/loss", self.data['loss'], epoch)
all_data['loss'] = self.data['loss']
return all_data
|
StarcoderdataPython
|
141174
|
#! /usr/bin/python2
# -*- coding: utf8 -*-
import pykka
import time
from Manager import Manager
class Big_Brother(pykka.ThreadingActor):
def __init__(self):
super(Big_Brother, self).__init__()
self.pool = {'managers':[]}
def start_manager(self, token):
self.pool['managers'].append(Manager.start(token).proxy())
return len(self.pool['managers']) #id of the manager
def run_manager(self, manager_id):
self.pool['managers'][manager_id].runAll()
def learn(self, manager_id, k):
self.pool['managers'][manager_id].learn(k)
def erase_manager_raw_data(self, manager_id):
self.pool['managers'][manager_id].erase_raw_data()
def erase_manager_parsed_data(self, manager_id):
self.pool['managers'][manager_id].erase_parsed_data()
def erase_manager_clusterings(self, manager_id):
self.pool['managers'][manager_id].erase_clusterings()
def stop_manager(self, manager_id):
answer = self.pool['managers'][manager_id].stop_slaves()
answer.get() # block thread
self.pool['managers'][manager_id].stop()
|
StarcoderdataPython
|
166629
|
<reponame>xyz1396/Projects
#!/usr/bin/python
import sys, getopt, warnings, os, re
def getConfigMasterKeyValue (sMasterKey, dictConfigKeyValues) :
dictKeyValueSet = {}
for currentKey, currentValue in dictConfigKeyValues.items() :
if( currentKey.startswith(sMasterKey+"{") and currentKey.endswith("}") and ((len(sMasterKey) + 2) < len(currentKey))) :
coreKey = currentKey[len(sMasterKey)+1: len(currentKey)-1]
dictKeyValueSet [coreKey] = currentValue
return dictKeyValueSet
def parseConfigLine (sLine, sSectionName) :
# sSectionName is a list, but only the first value is used
currentKey = ""
currentValue = ""
if ((sLine[0] == "[") and (sLine[len(sLine)-1] == "]" )) :
sSectionName[0] = sLine
elif (sSectionName[0] == "") :
print "no section name"
else :
twoParts = sLine.split("=")
if (len(twoParts) != 2) :
print "wrong line: "+sLine
else :
currentKey = sSectionName[0] + twoParts[0].rstrip()
currentValue = twoParts[1].lstrip()
#print currentKey+"=>"+currentValue
return [currentKey, currentValue]
def parseConfigKeyValues (filepath) :
configFile = open(filepath)
sSectionName = [""]
dictConfigKeyValues = {}
for sLine in configFile.readlines() :
poundPos = sLine.find("#")
if (poundPos > -1) :
sLine = sLine[0:poundPos]
sLine = sLine.strip()
if (sLine == "") :
continue
else :
#print "!!!"+sLine+"!!!"
currentKey, currentValue = parseConfigLine (sLine, sSectionName)
if (currentKey != "") and (currentValue != "" ):
# print currentKey+"=>"+currentValue
if (dictConfigKeyValues.get(currentKey) == None ):
dictConfigKeyValues[currentKey] = currentValue
else :
print currentKey + " has existed"
configFile.close()
# for currentKey, currentValue in dictConfigKeyValues.items() :
# print currentKey, currentValue
return dictConfigKeyValues
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.