max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
sources/praline/client/project/pipeline/orchestration.py | dansandu/praline | 0 | 12797351 | from praline.client.project.pipeline.cache import Cache
from praline.client.project.pipeline.stage_resources import StageResources
from praline.client.project.pipeline.stages.stage import Stage
from praline.client.repository.remote_proxy import RemoteProxy
from praline.common.algorithm.graph.instance_traversal import multiple_instance_depth_first_traversal
from praline.common.algorithm.graph.simple_traversal import root_last_traversal
from praline.common.file_system import FileSystem, join
from praline.common.tracing import trace
from typing import Any, Dict, List
class MultipleSuppliersError(Exception):
pass
class CyclicStagesError(Exception):
pass
class UnsatisfiableStageError(Exception):
pass
class ResourceNotSuppliedError(Exception):
pass
def get_stage_program_arguments(stage: str, program_arguments: Dict[str, Any]):
arguments = {
'global': program_arguments['global'],
'byStage': program_arguments['byStage'].get(stage, {})
}
return arguments
@trace(parameters=[])
def create_pipeline(target_stage: str,
stages: Dict[str, Stage],
file_system: FileSystem,
program_arguments: Dict[str, Any],
configuration: Dict[str, Any]) -> List[str]:
def on_cycle(cycle: List[str]):
raise CyclicStagesError(f"cyclic dependencies for stages {cycle}")
def visitor(stage_name: str):
requirements_set = stages[stage_name].requirements
required_stages_set = []
for requirements in requirements_set:
required_stages = []
for requirement in requirements:
suppliers = [stage.name for stage in stages.values() if requirement in stage.output]
if not suppliers:
raise UnsatisfiableStageError(f"stage '{stage_name}' cannot be satisfied because no stage supplies resource '{requirement}'")
elif len(suppliers) > 1:
raise MultipleSuppliersError(f"resource '{requirement}' is supplied by multiple stages: {', '.join(suppliers)}")
elif suppliers[0] not in required_stages:
required_stages.append(suppliers[0])
required_stages_set.append(required_stages)
return required_stages_set
def validator(stage: str, subtree: Dict[str, List[str]]):
stage_program_arguments = get_stage_program_arguments(stage, program_arguments)
return stages[stage].predicate(file_system, stage_program_arguments, configuration)
trees = multiple_instance_depth_first_traversal(target_stage, visitor, validator, on_cycle)
if trees:
stage_subtree = trees[0]
stage_order = root_last_traversal(target_stage, lambda n: stage_subtree[n][1])
pipeline = [(stage_subtree[stage][0], stage) for stage in stage_order]
return pipeline
raise UnsatisfiableStageError(f"could not create a pipeline to satisfy stage '{target_stage}'")
@trace
def invoke_stage(target_stage: str, stages: Dict[str, Stage], file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy) -> None:
resources = {}
pipeline = create_pipeline(target_stage, stages, file_system, program_arguments, configuration)
project_directory = file_system.get_working_directory()
cache_path = join(project_directory, 'target', 'cache.pickle')
for activation, stage_name in pipeline:
stage = stages[stage_name]
stage_resources = StageResources(stage_name, activation, {resource : resources[resource] for resource in stage.requirements[activation]}, stage.output)
stage_program_arguments = get_stage_program_arguments(stage_name, program_arguments)
if stage.cacheable:
with Cache(file_system, cache_path) as cache:
cache[stage_name] = stage_cache = cache.get(stage_name, {})
stage.invoker(file_system, stage_resources, stage_cache, stage_program_arguments, configuration, remote_proxy)
else:
stage.invoker(file_system, stage_resources, None, stage_program_arguments, configuration, remote_proxy)
for resource in stage.output:
if resource not in stage_resources:
raise ResourceNotSuppliedError(f"stage '{stage_name}' didn't supply resource '{resource}'")
resources.update(stage_resources.resources)
| 1.9375 | 2 |
results texture.py | wr0gers/PixelMiner | 0 | 12797352 | <filename>results texture.py
import os
import re
import numpy as np
import matplotlib.pyplot as plt
from ccc import concordance_correlation_coefficient
from scipy.stats import wilcoxon, ttest_rel, ttest_ind, mannwhitneyu, ranksums
from scipy.stats import f, shapiro, bartlett, f_oneway, kruskal
from statsmodels.stats.weightstats import ztest
from scipy.stats import binom_test
from radiomics import featureextractor, getTestCase
import SimpleITK as sitk
import pandas as pd
from random import randint
from tqdm import tqdm
from PIL import Image, ImageOps, ImageEnhance
#from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import image
import matplotlib.pyplot as plt
from functools import partial
from get_features_functions import get_features, final, get_results, display_results
from get_features_functions import listfiles, unnormalize, get_all_features
from get_features_functions import rmses, wilcoxons, ttests, para_non_para
from get_features_functions import get_all_features, get_rmses, normalize
def unnormalize(img):
img += 1
img /= 2
img *= (1024 + 3071)
#img -= 1024
#img *= 255
img = img.astype(np.uint8)
print(img.min(), img.max())
return img
def normalize(a):
b = (a - np.min(a))/np.ptp(a)
return b
def rse(x, y):
diff = x - y
sqrd = diff ** 2
sqrt = np.sqrt(sqrd)
return sqrt
n = 0
path = r'H:\Data\W'
files = os.listdir(path)
print(files[14])
arr1 = np.load(os.path.join(path, files[14]))
arr1 = unnormalize(arr1)
print(arr1.dtype, arr1.min(), arr1.max())
print(files[8])
arr2 = np.load(os.path.join(path, files[8]))
features = get_features(arr1, arr2)
features = [key for key in features.keys() if key.find('diagnostic') < 0]
features = [feature[9:] for feature in features]
features = [re.sub(r"(\w)([A-Z])", r"\1 \2", feature) for feature in features]
features = [feature.split('_') for feature in features]
features = np.array(features)
lung_itp = {}
lung_tru = get_all_features(path, 'tru_one', 'lung')
lung_cnn = get_all_features(path, 'PixelCNN', 'lung')
lung_itp['Linear'] = get_all_features(path, 'Linear', 'lung')
lung_itp['BSpline'] = get_all_features(path, 'BSpline', 'lung')
lung_itp['Cosine'] = get_all_features(path, 'Cosine', 'lung')
lung_itp['Nearest'] = get_all_features(path, 'Nearest', 'lung')
lung_results = get_results(lung_tru, lung_cnn, lung_itp)
display_results(lung_results, features)
cnn_diff = rse(lung_tru, lung_cnn)
lin_diff = rse(lung_tru, lung_itp['Linear'])
cos_diff = rse(lung_tru, lung_itp['Cosine'])
ner_diff = rse(lung_tru, lung_itp['Nearest'])
bsp_diff = rse(lung_tru, lung_itp['BSpline'])
t = cnn_diff.shape[0] * cnn_diff.shape[1]
print()
print('Percent Greater:')
print('Linear\t\t\t Win Sinc\t\t\t Nearest\t\t\t BSpline\t\t\t PixelMiner')
print('\t-\t\t\t' , (lin_diff < cos_diff).sum() / t, (lin_diff < ner_diff).sum() / t, (lin_diff < bsp_diff).sum() / t, (lin_diff < cnn_diff).sum() / t)
print((cos_diff < lin_diff).sum() / t, '\t-\t\t\t' , (cos_diff < ner_diff).sum() / t, (cos_diff < bsp_diff).sum() / t, (cos_diff < cnn_diff).sum() / t)
print((ner_diff < lin_diff).sum() / t, (ner_diff < cos_diff).sum() / t, '\t-\t\t\t' , (ner_diff < bsp_diff).sum() / t, (ner_diff < cnn_diff).sum() / t)
print((bsp_diff < lin_diff).sum() / t, (bsp_diff < cos_diff).sum() / t, (bsp_diff < ner_diff).sum() / t, '\t-\t\t\t' , (bsp_diff < cnn_diff).sum() / t)
print((cnn_diff < lin_diff).sum() / t, (cnn_diff < cos_diff).sum() / t, (cnn_diff < ner_diff).sum() / t, (cnn_diff < bsp_diff).sum() / t, '\t-\t\t' )
error = np.array([cnn_diff, lin_diff, cos_diff, ner_diff, bsp_diff])
n_error = np.zeros((5, 50, 51))
for i in range(error.shape[-1]):
n_error[:, :, i] = normalize(error[:, :, i])
print()
print('NRMSE Mean:')
print('PixelMiner:', n_error[0].mean())
print('Linear:', n_error[1].mean())
print('Win Sinc:', n_error[2].mean())
print('Nearest:', n_error[3].mean())
print('BSpline', n_error[4].mean())
print()
print('NRMSE STD:')
print('PixelMiner:', n_error[0].std())
print('Linear:', n_error[1].std())
print('Win Sinc:', n_error[2].std())
print('Nearest:', n_error[3].std())
print('BSpline', n_error[4].std())
ccc_cnn = np.array([concordance_correlation_coefficient(lung_tru[:, i], lung_cnn[:, i]) for i in range(lung_tru.shape[1])])
ccc_lin = np.array([concordance_correlation_coefficient(lung_tru[:, i], lung_itp['Linear'][:, i]) for i in range(lung_tru.shape[1])])
ccc_bsp = np.array([concordance_correlation_coefficient(lung_tru[:, i], lung_itp['BSpline'][:, i]) for i in range(lung_tru.shape[1])])
ccc_ws = np.array([concordance_correlation_coefficient(lung_tru[:, i], lung_itp['Cosine'][:, i]) for i in range(lung_tru.shape[1])])
ccc_nn = np.array([concordance_correlation_coefficient(lung_tru[:, i], lung_itp['Nearest'][:, i]) for i in range(lung_tru.shape[1])])
cccs = np.vstack((ccc_cnn, ccc_bsp, ccc_nn, ccc_ws, ccc_lin))
print('Mean CCC')
print('PixelMiner', cccs[0].mean(), '\n'
'Win Sinc', cccs[3].mean(), '\n'
'BSpline', cccs[1].mean(), '\n'
'Nearest', cccs[2].mean(), '\n'
'Linear', cccs[4].mean())
print()
print('Reproducibility')
thresh = .85
print('PixelMiner', (cccs[0] > thresh).sum() / 51, '\n'
'Win Sinc', (cccs[3] > thresh).sum() / 51, '\n'
'BSpline', (cccs[1] > thresh).sum() / 51, '\n'
'Nearest', (cccs[2] > thresh).sum() / 51, '\n'
'Linear', (cccs[4] > thresh).sum() / 51)
print('Wilcoxons:')
print('Win Sinc:', wilcoxon(n_error[:, 0, :].flatten(), n_error[:, 2, :].flatten()))
print('Linear:', wilcoxon(n_error[:, 0, :].flatten(), n_error[:, 1, :].flatten()))
print('BSpline:', wilcoxon(n_error[:, 0, :].flatten(), n_error[:, 4, :].flatten()))
print('Nearest:', wilcoxon(n_error[:, 0, :].flatten(), n_error[:, 3, :].flatten()))
shape = n_error.shape[0] * n_error.shape[2]
print('Binomial test:')
print('Win Sinc:', binom_test((n_error[:, 0, :] < n_error[:, 2, :]).sum() , shape))
print('Linear:', binom_test((n_error[:, 0, :] < n_error[:, 1, :]).sum() , shape))
print('BSpline:', binom_test((n_error[:, 0, :] < n_error[:, 4, :]).sum() , shape))
print('Nearest:', binom_test((n_error[:, 0, :] < n_error[:, 3, :]).sum() , shape))
| 2.21875 | 2 |
base/command_parse/argparse_readfile_example.py | Bio-MingChen/python_best_practice | 0 | 12797353 | import sys
import argparse
from textwrap import dedent
def main(kwargs):
with kwargs["infile"] as indata,\
kwargs["ofile"] as odata:
for line in indata:
odata.write(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog = "argparse_example", # default is sys.argv[0],
formatter_class = argparse.RawDescriptionHelpFormatter,
description = dedent('''
Please do not mess up this text!
--------------------------------
I have indented it
exactly the way
I want it
'''),
epilog = "Contact:<EMAIL>"
)
parser.add_argument("infile", nargs="?", type=argparse.FileType("r"), default=sys.stdin)
parser.add_argument("ofile", nargs="?", type=argparse.FileType("w"), default=sys.stdout)
args = parser.parse_args()
main(vars(args)) | 2.890625 | 3 |
src/astrometry_azel/io.py | scienceopen/astrometry | 6 | 12797354 | <reponame>scienceopen/astrometry<filename>src/astrometry_azel/io.py
"""
Image stack -> average -> write FITS
Because ImageJ has been a little buggy about writing FITS files, in particular the header
that astrometry.net then crashes on, we wrote this quick script to ingest a variety
of files and average the specified frames then write a FITS.
The reason we average a selected stack of images is to reduce the noise for use in
astrometry.net
The error you might get from an ImageJ saved FITS when reading in:
PyFits, AstroPy, or ImageMagick is:
IOError: Header missing END card.
"""
from __future__ import annotations
from pathlib import Path
import numpy as np
from datetime import datetime
from astropy.io import fits
import logging
try:
import imageio
except ImportError:
imageio = None
try:
import h5py
except ImportError:
h5py = None
try:
from scipy.io import loadmat
except ImportError:
loadmat = None
def meanstack(
infn: Path, Navg: int, ut1: datetime = None, method: str = "mean"
) -> tuple[np.ndarray, datetime]:
infn = Path(infn).expanduser().resolve(strict=True)
# %% parse indicies to load
if isinstance(Navg, slice):
key = Navg
elif isinstance(Navg, int):
key = slice(0, Navg)
elif len(Navg) == 1:
key = slice(0, Navg[0])
elif len(Navg) == 2:
key = slice(Navg[0], Navg[1])
else:
raise ValueError(f"not sure what you mean by Navg={Navg}")
# %% load images
"""
some methods handled individually to improve efficiency with huge files
"""
if infn.suffix == ".h5":
if h5py is None:
raise ImportError("pip install h5py")
img, ut1 = _h5mean(infn, ut1, key, method)
elif infn.suffix in (".fits", ".new"):
# mmap doesn't work with BZERO/BSCALE/BLANK
with fits.open(infn, mode="readonly", memmap=False) as f:
img = collapsestack(f[0].data, key, method)
elif infn.suffix == ".mat":
if loadmat is None:
raise ImportError("pip install scipy")
img = loadmat(infn)
img = collapsestack(img["data"].T, key, method) # matlab is fortran order
else: # .tif etc.
if imageio is None:
raise ImportError("pip install imageio")
img = imageio.imread(infn, as_gray=True)
if img.ndim in (3, 4) and img.shape[-1] == 3: # assume RGB
img = collapsestack(img, key, method)
return img, ut1
def _h5mean(fn: Path, ut1: datetime, key: slice, method: str) -> tuple[np.ndarray, datetime]:
with h5py.File(fn, "r") as f:
img = collapsestack(f["/rawimg"], key, method)
# %% time
if ut1 is None:
try:
ut1 = f["/ut1_unix"][key][0]
except KeyError:
pass
# %% orientation
try:
img = np.rot90(img, k=f["/params"]["rotccw"])
except KeyError:
pass
return img, ut1
def collapsestack(img: np.ndarray, key: slice, method: str) -> np.ndarray:
if img.ndim not in (2, 3, 4):
raise ValueError("only 2D, 3D, or 4D image stacks are handled")
# %% 2-D
if img.ndim == 2:
return img
# %% 3-D
if method == "mean":
func = np.mean
elif method == "median":
func = np.median
else:
raise TypeError(f"unknown method {method}")
colaps = func(img[key, ...], axis=0).astype(img.dtype)
assert colaps.ndim > 0
assert isinstance(colaps, np.ndarray)
return colaps
def writefits(img: np.ndarray, outfn: Path):
outfn = Path(outfn).expanduser()
f = fits.PrimaryHDU(img)
try:
f.writeto(outfn, overwrite=False, checksum=True)
# no close()
print("writing", outfn)
except OSError:
logging.warning(f"did not overwrite existing {outfn}")
def readh5coord(fn: Path) -> tuple[float, float]:
if not fn.suffix == ".h5":
return None
with h5py.File(fn, "r") as f:
try:
latlon = (f["/sensorloc"]["glat"], f["/sensorloc"]["glon"])
except KeyError:
try:
latlon = f["/lla"][:2]
except KeyError:
return None
return latlon
| 2.359375 | 2 |
Scripts/VRM_DeleteLeafBones.py | TheHoodieGuy02/VRoid2UE4_BlenderScripts | 3 | 12797355 | # Obliterate unused leaf bones in VRoid models!
import bpy
context = bpy.context
obj = context.object
# By default, VRM Importer includes leaf bones automatically.
# It's cool and stuff, but it's not necessary for Blender, and will spew out
# scary long warning when imported to UE4.
# Use this script to obliterate those leaf bones in one click.
if obj.type == 'ARMATURE':
armature = obj.data
bpy.ops.object.mode_set(mode='EDIT')
for bone in armature.edit_bones:
if bone.name.endswith("_end") :
armature.edit_bones.remove(bone)
else:
continue
bpy.ops.object.mode_set(mode='OBJECT') | 1.757813 | 2 |
20.armstrongInRange.py | A-Little-Hat/Python-Basics | 0 | 12797356 | s=int(input("enter start "))
e=int(input("enter a end "))
for n in range(s,e+1):
order=len(str(n))
num=0
backup=n
while(n>0):
x=n%10
num+=x**order
n=n//10
if(num==backup):
print(num,"\t",end='') | 3.375 | 3 |
src/app/app.py | Rexhaif/RSA-Core | 2 | 12797357 | <gh_stars>1-10
from flask import Flask, jsonify
from flask_jwt_extended import JWTManager
from .config import config
from .controllers import *
from .utils import ListConverter
from .security import *
application = Flask("rsa-core")
application.config['JWT_SECRET_KEY'] = config['jwt_key']
jwt = JWTManager(application)
application.url_map.converters['list'] = ListConverter
application.register_blueprint(docs)
application.register_blueprint(corps)
application.register_blueprint(read)
application.register_blueprint(manage)
application.register_blueprint(admin)
security_provider = config['security_provider']
@application.route("/")
def index():
return jsonify({"msg": "ok"})
#@application.route("/tokens")
#def tokens():
# return jsonify(security_provider.create_temp_tokens())
if __name__ == "__main__":
application.run(host='0.0.0.0', port=config['port'])
| 2.375 | 2 |
mp/data/datasets/ds_mr_hippocampus_harp.py | MECLabTUDA/OOD-Gen | 0 | 12797358 | # ------------------------------------------------------------------------------
# Hippocampus segmentation task for the HarP dataset
# (http://www.hippocampal-protocol.net/SOPs/index.php)
# ------------------------------------------------------------------------------
import os
import re
import SimpleITK as sitk
import nibabel as nib
import numpy as np
import mp.data.datasets.dataset_utils as du
from mp.data.datasets.dataset_segmentation import SegmentationDataset, SegmentationInstance
from mp.paths import storage_data_path
from mp.utils.mask_bounding_box import mask_bbox_3D
from mp.utils.load_restore import join_path
class HarP(SegmentationDataset):
r"""Class for the segmentation of the HarP dataset,
found at http://www.hippocampal-protocol.net/SOPs/index.php
with the masks as .nii files and the scans as .mnc files.
"""
def __init__(self, subset=None, hold_out_ixs=None):
# Part is either: "Training", "Validation" or "All"
default = {"Part": "All"}
if subset is not None:
default.update(subset)
subset = default
else:
subset = default
if hold_out_ixs is None:
hold_out_ixs = []
global_name = 'HarP'
name = du.get_dataset_name(global_name, subset)
dataset_path = os.path.join(storage_data_path, global_name)
original_data_path = du.get_original_data_path(global_name)
# Build instances
instances = []
folders = []
if subset["Part"] in ["Training", "All"]:
folders.append(("100", "Training"))
if subset["Part"] in ["Validation", "All"]:
folders.append(("35", "Validation"))
for orig_folder, dst_folder in folders:
# Paths with the sub-folder for the current subset
dst_folder_path = os.path.join(dataset_path, dst_folder)
# Copy the images if not done already
if not os.path.isdir(dst_folder_path):
_extract_images(original_data_path, dst_folder_path, orig_folder)
# Fetch all patient/study names
study_names = set(file_name.split('.nii')[0].split('_gt')[0] for file_name
in os.listdir(os.path.join(dataset_path, dst_folder)))
for study_name in study_names:
instances.append(SegmentationInstance(
x_path=os.path.join(dataset_path, dst_folder, study_name + '.nii.gz'),
y_path=os.path.join(dataset_path, dst_folder, study_name + '_gt.nii.gz'),
name=study_name,
group_id=None
))
label_names = ['background', 'hippocampus']
super().__init__(instances, name=name, label_names=label_names,
modality='T1w MRI', nr_channels=1, hold_out_ixs=hold_out_ixs)
def _extract_images(source_path, target_path, subset):
r"""Extracts images, merges mask labels (if specified) and saves the
modified images.
"""
# Folder 100 is for training (100 subjects), 35 subjects are left over for validation
affine = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
images_path = os.path.join(source_path, subset)
labels_path = os.path.join(source_path, f'Labels_{subset}_NIFTI')
# Create directories
if not os.path.isdir(target_path):
os.makedirs(target_path)
files_with_swapped_masks = {"ADNI_007_S_1304_74384_ACPC.mnc",
"ADNI_016_S_4121_280306_ACPC.mnc",
"ADNI_029_S_4279_265980_ACPC.mnc",
"ADNI_136_S_0429_109839_ACPC.mnc"}
# For each MRI, there are 2 segmentation (left and right hippocampus)
for filename in os.listdir(images_path):
# Loading the .mnc file and converting it to a .nii.gz file
minc = nib.load(os.path.join(images_path, filename))
x: np.array = nib.Nifti1Image(np.asarray(minc.dataobj), affine=affine).get_data()
# We need to recover the study name of the image name to construct the name of the segmentation files
match = re.match(r"ADNI_[0-9]+_S_[0-9]+_[0-9]+", filename)
if match is None:
raise Exception(f"A file ({filename}) does not match the expected file naming format")
# For each side of the brain
for side in ("_L", "_R"):
study_name = match[0] + side
y = sitk.ReadImage(os.path.join(labels_path, study_name + ".nii"))
y = sitk.GetArrayFromImage(y)
# Shape expected: (189, 233, 197)
assert x.shape == y.shape
# BUGFIX: Some segmentation have some weird values eg {26896.988, 26897.988} instead of {0, 1}
y = (y - np.min(y.flat)).astype(np.uint32)
# Cropping bounds computed to fit the ground truth
if (side == "_L") ^ (filename in files_with_swapped_masks):
y = y[40: 104, 78: 142, 49: 97]
x_cropped = x[40: 104, 78: 142, 49: 97]
else:
y = y[40: 104, 78: 142, 97: 145]
x_cropped = x[40: 104, 78: 142, 97: 145]
# Need to do move an axis as numpy coordinates are [z, y, x] and SimpleITK's are [x, y, z]
x_cropped = np.moveaxis(x_cropped, [0, 2], [2, 0])
# Changing the study name if needed
if filename in files_with_swapped_masks:
study_name = match[0] + ("_R" if side == "_L" else "_L")
# Save new images so they can be loaded directly
sitk.WriteImage(sitk.GetImageFromArray(y),
join_path([target_path, study_name + "_gt.nii.gz"]))
nib.save(nib.Nifti1Image(x_cropped, affine),
join_path([target_path, study_name + ".nii.gz"]))
| 2.453125 | 2 |
elements/utils.py | jhnnsrs/arbeider | 0 | 12797359 |
def buildRepresentationName(name, nodeid):
if nodeid is not None:
return f"{name} {nodeid}"
else:
return f"{name}"
def buildTransformationName(roi, representation, transformer, input_transformation, nodeid):
if input_transformation is None:
return f"{transformer.name}_rep-{representation.id}_roi-{roi.id}_node-{nodeid}"
else:
return f"{transformer.name}_trans-{input_transformation.id}_rep-{representation.id}_roi-{roi.id}_node-{nodeid}" | 2.75 | 3 |
udacity/deep-learning/assignments/notmnist.py | balazssimon/ml-playground | 0 | 12797360 | # These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
# code changed to Python3
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from sklearn.metrics.pairwise import cosine_similarity
from urllib.request import urlretrieve
import pickle
import IPython
# Config the matlotlib backend as plotting inline in IPython
# %matplotlib inline
url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 1% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
#IPython.display.display_png('notMNIST_large/B/MDEtMDEtMDAudHRm.png')
#IPython.display.display_png('notMNIST_large/J/Nng3b2N0IEFsdGVybmF0ZSBSZWd1bGFyLnR0Zg==.png')
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
def load_dataset(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
# Display a random matrix with a specified figure number and a grayscale colormap
# largeNameA = train_datasets[0]
# print(largeNameA)
# largeDataA = load_dataset(largeNameA)
# img1 = largeDataA[0, :, :]
# plt.matshow(img1, cmap=plt.cm.gray)
# plt.show()
#
# smallNameJ = test_datasets[9]
# print(smallNameJ)
# smallDataJ = load_dataset(smallNameJ)
# img2 = smallDataJ[0, :, :]
# plt.matshow(img2, cmap=plt.cm.gray)
# plt.show()
# Check whether the data is balanced between classes
# for name in train_datasets:
# dataset = load_dataset(name)
# print(name, ' size:', dataset.shape)
#
# for name in test_datasets:
# dataset = load_dataset(name)
# print(name, ' size:', dataset.shape)
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class + tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
# def show_images(dataset, labels, count):
# for i in range(0,count):
# print(labels[i])
# plt.matshow(dataset[i,:,:], cmap=plt.cm.gray)
# plt.show()
# show_images(train_dataset, train_labels, 3)
# show_images(test_dataset, test_labels, 3)
# show_images(valid_dataset, valid_labels, 3)
pickle_file = 'notMNIST.pickle'
if not os.path.exists(pickle_file):
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
indices = np.arange(train_dataset.shape[0])
np.random.shuffle(indices)
train_dataset = train_dataset[indices]
train_labels = train_labels[indices]
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
def load_datasets(pickle_file):
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
f = open(pickle_file, 'rb')
save = pickle.load(f)
f.close()
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
return train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels
train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = load_datasets(pickle_file)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
def sanitize_dataset(dataset, labels, filter_dataset, similarity_epsilon):
similarity = cosine_similarity(np.reshape(dataset, (dataset.shape[0],-1)), np.reshape(filter_dataset, (filter_dataset.shape[0],-1)))
same_filter = np.sum(similarity == 1, axis=1) > 0
similar_filter = np.sum(similarity > 1-similarity_epsilon, axis=1) > 0
same_count = np.sum(same_filter)
similar_count = np.sum(similar_filter)
filtered_dataset = dataset[same_filter==False]
filtered_labels = labels[same_filter==False]
return filtered_dataset, filtered_labels, same_count, similar_count
sanit_pickle_file = 'notMNIST_sanit.pickle'
if not os.path.exists(sanit_pickle_file):
filtered_valid_dataset, filtered_valid_labels, train_valid_same, train_valid_similar = \
sanitize_dataset(valid_dataset, valid_labels, train_dataset, 0.001)
print("training-validation: same=", train_valid_same, "similar=", train_valid_similar)
filtered_test_dataset, filtered_test_labels, train_test_same, train_test_similar = \
sanitize_dataset(test_dataset, test_labels, train_dataset, 0.001)
print("training-testing: same=", train_test_same, "similar=", train_test_similar)
filtered_test_dataset, filtered_test_labels, valid_test_same, valid_test_similar = \
sanitize_dataset(filtered_test_dataset, filtered_test_labels, filtered_valid_dataset, 0.001)
print("validation-testing: same=", valid_test_same, "similar=", valid_test_similar)
try:
f = open(sanit_pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': filtered_valid_dataset,
'valid_labels': filtered_valid_labels,
'test_dataset': filtered_test_dataset,
'test_labels': filtered_test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
train_dataset, train_labels, filtered_valid_dataset, filtered_valid_labels, filtered_test_dataset, filtered_test_labels = load_datasets(sanit_pickle_file)
print('Training (sanitized):', train_dataset.shape, train_labels.shape)
print('Validation (sanitized):', filtered_valid_dataset.shape, filtered_valid_labels.shape)
print('Testing (sanitized):', filtered_test_dataset.shape, filtered_test_labels.shape)
def train_model(dataset, labels, size=None):
maxSize = dataset.shape[0]
if size is None:
size = maxSize
elif size > maxSize:
size = maxSize
else:
dataset = dataset[0:size]
labels = labels[0:size]
X = np.reshape(dataset, (size,-1))
y = labels
lr = LogisticRegression(n_jobs=4)
lr.fit(X, y)
return lr
def model_score(model, dataset, labels):
X = np.reshape(dataset, (dataset.shape[0],-1))
y = labels
return model.score(X, y)
def train(size=None):
if size is None:
print("Training with all examples:")
else:
print("Training with ", size, " examples:")
model = train_model(train_dataset, train_labels, size)
print(" validation score: ", model_score(model, valid_dataset, valid_labels))
print(" test score: ", model_score(model, test_dataset, test_labels))
print(" validation score (sanitized): ", model_score(model, filtered_valid_dataset, filtered_valid_labels))
print(" test score (sanitized): ", model_score(model, filtered_test_dataset, filtered_test_labels))
for size in [50, 100, 1000, 5000]:
train(size)
# training on all examples:
#train()
| 2.703125 | 3 |
scripts/jrk_simple_test.py | jones7625/jrk_controller | 1 | 12797361 | #!/usr/bin/env python
import serial
import time
from sys import stdout
print("starting jrk_simple_test")
ser = serial.Serial( "/dev/ttyACM0", 9600) # input to the JRK controller for sending it commands
print("connected to: " + ser.portstr + " for sending commands to JRK")
init_cmd = "\xAA"
jrk_id = "\x0B"
set_target_cmd = "\xC0"
stop_cmd = "\xFF"
read_feedback_cmd = "\xA5"
read_current_cmd = "\x8F"
read_scaled_feedback = "\xA7"
get_error_cmd = "\x33"
# For my John Deere tractor steering: 2400 full right; 1450 straight; 450 full left
# clear error bits and read the register; Pololu protocol: 0xAA, device number, 0x33; Reference "Get Error Flags Halting" page 34 of manual
print("Clearing errors on start up")
ser.write([init_cmd, jrk_id, get_error_cmd])
time.sleep(0.1)
cycle_delay = .1
for target in [2048, 4094, 1024, 0]:
lowByte = (target & ord("\x1F")) | ord(set_target_cmd)
highByte = (target >> 5) & ord("\x7F")
ser.write([init_cmd, jrk_id, lowByte, highByte])
time.sleep (0.01)
for i in range(1, 30):
time.sleep (cycle_delay)
ser.write([init_cmd, jrk_id, read_current_cmd])
time.sleep (0.01)
checkCurrent = ord(ser.read())
ser.write([init_cmd, jrk_id, read_feedback_cmd])
time.sleep (0.01)
checkFeedback = (ord(ser.read()) | ord(ser.read())<<8)
time.sleep (0.01)
ser.write([init_cmd, jrk_id, read_scaled_feedback])
time.sleep (0.01)
scaled_feedback = (ord(ser.read()) | ord(ser.read())<<8)
#stdout.write (" \r target: %s feedback is at %s of 4095, interation %s" % (target, checkFeedback, i)) # use this if you don't want the values to scroll
#stdout.flush() # used with the statement above
target_delta = abs(target-scaled_feedback)
print (" target: %s feedback: %s scaled feedback: %s current: %s delta: %s interation %s" % (target, checkFeedback, scaled_feedback, checkCurrent, target_delta, i))
ser.write(stop_cmd)
print ("- Finished.")
ser.write(stop_cmd)
| 2.8125 | 3 |
src/cardapp/utils.py | raphv/cardmapper | 0 | 12797362 | <filename>src/cardapp/utils.py
# -*- coding: utf-8 -*-
import re
from html import unescape
from bleach.sanitizer import Cleaner
from html5lib.filters.base import Filter
PARAGRAPH_TAGS = ['p', 'h1', 'h2', 'h3', 'h4', 'li']
STYLE_TAGS = ['strong', 'em']
class ProcessDescription(Filter):
def __iter__(self):
for token in Filter.__iter__(self):
if token['type'] == 'StartTag':
continue
if token['type'] in ['EndTag','EmptyTag']:
token = {'type': 'Characters', 'data': '\n'}
yield token
description_cleaner = Cleaner(
tags = PARAGRAPH_TAGS + ['br'],
filters = [ProcessDescription],
strip = True
)
newline_re = re.compile('\n{2,}')
def process_description(txt):
return unescape(
newline_re.sub(
'\n',
description_cleaner.clean(txt)
).strip()
)
class ProcessShortDescription(Filter):
max_length = 200
def __iter__(self):
current_length = 0
reached_max_length = False
nesting_level = 0
for token in Filter.__iter__(self):
if reached_max_length and nesting_level == 0:
return
if token['type'] in ['StartTag','EndTag'] and token['name'] in PARAGRAPH_TAGS:
token['name'] = 'p'
if token['type'] == 'EndTag':
nesting_level -= 1
if token['type'] == 'StartTag':
nesting_level += 1
if token['type'] in ['Characters', 'SpaceCharacters']:
if reached_max_length:
continue
total_length = current_length + len(token['data'])
if total_length > self.max_length:
reached_max_length = True
token['data'] = token['data'][:self.max_length-current_length] + '...'
token['type'] = 'Characters'
current_length = total_length
yield token
short_description_cleaner = Cleaner(
tags = PARAGRAPH_TAGS + STYLE_TAGS,
filters = [ProcessShortDescription],
strip = True
)
def process_short_description(txt):
return short_description_cleaner.clean(txt)
| 2.515625 | 3 |
oo/pessoa.py | arisobel/pythonbirds | 0 | 12797363 | <reponame>arisobel/pythonbirds
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=10):
self.nome = nome
self.idade = idade
self.filhos = list(filhos)
def cumprimentar(self):
return f"olá {self.nome} id({id(self)})"
@staticmethod
def metodo_estatico():
return 42
@classmethod
def nome_e_atributo_de_clsse(cls):
return f"{cls} olhos {cls.olhos}"
if __name__ == "__main__":
joao = Pessoa(nome='João')
pedro = Pessoa(joao,nome='Pedro')
print(Pessoa.cumprimentar(pedro))
print(id(joao))
for filho in pedro.filhos:
print(filho.nome)
joao.sobrenome = "Silva"
del pedro.filhos
joao.olhos = 1
Pessoa.olhos = 3
print(joao.__dict__)
print(pedro.__dict__)
print(Pessoa.olhos)
print(joao.olhos)
print(pedro.olhos)
print(id(Pessoa.olhos),id(joao.olhos),id(pedro.olhos))
print(Pessoa.metodo_estatico(),joao.metodo_estatico())
print(Pessoa.nome_e_atributo_de_clsse(),joao.nome_e_atributo_de_clsse()) | 3.515625 | 4 |
lumin/data_processing/file_proc.py | nflanner/lumin | 43 | 12797364 | <gh_stars>10-100
import h5py
import numpy as np
import pandas as pd
from typing import List, Union, Optional, Any, Tuple, Dict
import os
from pathlib import Path
import json
from sklearn.model_selection import StratifiedKFold, KFold
__all__ = ['save_to_grp', 'fold2foldfile', 'df2foldfile', 'add_meta_data']
def save_to_grp(arr:np.ndarray, grp:h5py.Group, name:str, compression:Optional[str]=None) -> None:
r'''
Save Numpy array as a dataset in an h5py Group
Arguments:
arr: array to be saved
grp: group in which to save arr
name: name of dataset to create
compression: optional compression argument for h5py, e.g. 'lzf'
'''
# TODO Option for string length
grp.create_dataset(name, shape=arr.shape, dtype=arr.dtype.name if arr.dtype.name not in ['object', 'str864'] else 'S64',
data=arr if arr.dtype.name not in ['object', 'str864'] else arr.astype('S64'), compression=compression)
def _build_matrix_lookups(feats:List[str], vecs:List[str], feats_per_vec:List[str], row_wise:bool) -> Tuple[List[str],np.ndarray,Tuple[int,int]]:
shape = (len(vecs),len(feats_per_vec)) if row_wise else (len(feats_per_vec),len(vecs))
lookup,missing = np.zeros(shape, dtype=np.array(feats).dtype),np.zeros(shape, dtype=np.bool)
if row_wise:
for i, v in enumerate(vecs):
for j, c in enumerate(feats_per_vec):
f = f'{v}_{c}'
if f in feats:
lookup[i,j] = f
else:
lookup[i,j] = feats[0] # Temp value, to be set to null later using missing
missing[i,j] = True
else:
for j, v in enumerate(vecs):
for i, c in enumerate(feats_per_vec):
f = f'{v}_{c}'
if f in feats:
lookup[i,j] = f
else:
lookup[i,j] = feats[0] # Temp value, to be set to null later using missing
missing[i,j] = True
return list(lookup.flatten()),missing.flatten(),shape
def fold2foldfile(df:pd.DataFrame, out_file:h5py.File, fold_idx:int,
cont_feats:List[str], cat_feats:List[str], targ_feats:Union[str,List[str]], targ_type:Any,
misc_feats:Optional[List[str]]=None, wgt_feat:Optional[str]=None,
matrix_lookup:Optional[List[str]]=None, matrix_missing:Optional[np.ndarray]=None, matrix_shape:Optional[Tuple[int,int]]=None,
tensor_data:Optional[np.ndarray]=None, compression:Optional[str]=None) -> None:
r'''
Save fold of data into an h5py Group
Arguments:
df: Dataframe from which to save data
out_file: h5py file to save data in
fold_idx: ID for the fold; used name h5py group according to 'fold_{fold_idx}'
cont_feats: list of columns in df to save as continuous variables
cat_feats: list of columns in df to save as discreet variables
targ_feats: (list of) column(s) in df to save as target feature(s)
targ_type: type of target feature, e.g. int,'float32'
misc_feats: any extra columns to save
wgt_feat: column to save as data weights
matrix_vecs: list of objects for matrix encoding, i.e. feature prefixes
matrix_feats_per_vec: list of features per vector for matrix encoding, i.e. feature suffixes.
Features listed but not present in df will be replaced with NaN.
matrix_row_wise: whether objects encoded as a matrix should be encoded row-wise (i.e. all the features associated with an object are in their own row),
or column-wise (i.e. all the features associated with an object are in their own column)
tensor_data: data of higher order than a matrix can be passed directly as a numpy array, rather than beign extracted and reshaped from the DataFrame.
The array will be saved under matrix data, and this is incompatible with also setting `matrix_lookup`, `matrix_missing`, and `matrix_shape`.
The first dimension of the array must be compatible with the length of the data frame.
compression: optional compression argument for h5py, e.g. 'lzf'
'''
# TODO infer target type automatically
grp = out_file.create_group(f'fold_{fold_idx}')
save_to_grp(np.hstack((df[cont_feats].values.astype('float32'), df[cat_feats].values.astype('float32'))), grp, 'inputs', compression=compression)
save_to_grp(df[targ_feats].values.astype(targ_type), grp, 'targets', compression=compression)
if wgt_feat is not None:
if wgt_feat in df.columns: save_to_grp(df[wgt_feat].values.astype('float32'), grp, 'weights', compression=compression)
else: print(f'{wgt_feat} not found in file')
if misc_feats is not None:
for f in misc_feats:
if f in df.columns: save_to_grp(df[f].values, grp, f, compression=compression)
else: print(f'{f} not found in file')
if matrix_lookup is not None:
if tensor_data is not None:
raise ValueError("The saving of both matrix and tensor data is requested. This is ambiguous. Please only set one of the other.")
mat = df[matrix_lookup].values.astype('float32')
mat[:,matrix_missing] = np.NaN
mat = mat.reshape((len(df),*matrix_shape))
save_to_grp(mat, grp, 'matrix_inputs', compression=compression)
elif tensor_data is not None:
save_to_grp(tensor_data.astype('float32'), grp, 'matrix_inputs', compression=compression)
def df2foldfile(df:pd.DataFrame, n_folds:int, cont_feats:List[str], cat_feats:List[str],
targ_feats:Union[str,List[str]], savename:Union[Path,str], targ_type:str,
strat_key:Optional[str]=None, misc_feats:Optional[List[str]]=None, wgt_feat:Optional[str]=None, cat_maps:Optional[Dict[str,Dict[int,Any]]]=None,
matrix_vecs:Optional[List[str]]=None, matrix_feats_per_vec:Optional[List[str]]=None, matrix_row_wise:Optional[bool]=None,
tensor_data:Optional[np.ndarray]=None, tensor_name:Optional[str]=None, tensor_is_sparse:bool=False, compression:Optional[str]=None) -> None:
r'''
Convert dataframe into h5py file by splitting data into sub-folds to be accessed by a :class:`~lumin.nn.data.fold_yielder.FoldYielder`
Arguments:
df: Dataframe from which to save data
n_folds: number of folds to split df into
cont_feats: list of columns in df to save as continuous variables
cat_feats: list of columns in df to save as discreet variables
targ_feats: (list of) column(s) in df to save as target feature(s)
savename: name of h5py file to create (.h5py extension not required)
targ_type: type of target feature, e.g. int,'float32'
strat_key: column to use for stratified splitting
misc_feats: any extra columns to save
wgt_feat: column to save as data weights
cat_maps: Dictionary mapping categorical features to dictionary mapping codes to categories
matrix_vecs: list of objects for matrix encoding, i.e. feature prefixes
matrix_feats_per_vec: list of features per vector for matrix encoding, i.e. feature suffixes.
Features listed but not present in df will be replaced with NaN.
matrix_row_wise: whether objects encoded as a matrix should be encoded row-wise (i.e. all the features associated with an object are in their own row),
or column-wise (i.e. all the features associated with an object are in their own column)
tensor_data: data of higher order than a matrix can be passed directly as a numpy array, rather than beign extracted and reshaped from the DataFrame.
The array will be saved under matrix data, and this is incompatible with also setting `matrix_vecs`, `matrix_feats_per_vec`, and `matrix_row_wise`.
The first dimension of the array must be compatible with the length of the data frame.
tensor_name: if `tensor_data` is set, then this is the name that will to the foldfile's metadata.
tensor_is_sparse: Set to True if the matrix is in sparse COO format and should be densified later on
The format expected is `coo_x = sparse.as_coo(x); m = np.vstack((coo_x.data, coo_x.coords))`, where `m` is the tensor passed to `tensor_data`.
compression: optional compression argument for h5py, e.g. 'lzf'
'''
savename = str(savename)
os.system(f'rm {savename}.hdf5')
os.makedirs(savename[:savename.rfind('/')], exist_ok=True)
out_file = h5py.File(f'{savename}.hdf5', "w")
lookup,missing,shape = None,None,None
if matrix_vecs is not None:
if tensor_data is not None:
raise ValueError("The saving of both matrix and tensor data is requested. This is ambiguous. Please only set one of the other.")
lookup,missing,shape = _build_matrix_lookups(df.columns, matrix_vecs, matrix_feats_per_vec, matrix_row_wise)
mat_feats = list(np.array(lookup)[np.logical_not(missing)]) # Only features present in data
dup = [f for f in cont_feats if f in mat_feats]
if len(dup) > 1:
print(f'{dup} present in both matrix features and continuous features; removing from continuous features')
cont_feats = [f for f in cont_feats if f not in dup]
if strat_key is not None and strat_key not in df.columns:
print(f'{strat_key} not found in DataFrame')
strat_key = None
if strat_key is None:
kf = KFold(n_splits=n_folds, shuffle=True)
folds = kf.split(X=df)
else:
kf = StratifiedKFold(n_splits=n_folds, shuffle=True)
folds = kf.split(X=df, y=df[strat_key])
for fold_idx, (_, fold) in enumerate(folds):
print(f"Saving fold {fold_idx} with {len(fold)} events")
fold2foldfile(df.iloc[fold].copy(), out_file, fold_idx, cont_feats=cont_feats, cat_feats=cat_feats, targ_feats=targ_feats,
targ_type=targ_type, misc_feats=misc_feats, wgt_feat=wgt_feat,
matrix_lookup=lookup, matrix_missing=missing, matrix_shape=shape, tensor_data=tensor_data[fold] if tensor_data is not None else None,
compression=compression)
add_meta_data(out_file=out_file, feats=df.columns, cont_feats=cont_feats, cat_feats=cat_feats, cat_maps=cat_maps, targ_feats=targ_feats, wgt_feat=wgt_feat,
matrix_vecs=matrix_vecs, matrix_feats_per_vec=matrix_feats_per_vec, matrix_row_wise=matrix_row_wise,
tensor_name=tensor_name, tensor_shp=tensor_data[0].shape if tensor_data is not None else None, tensor_is_sparse=tensor_is_sparse)
def add_meta_data(out_file:h5py.File, feats:List[str], cont_feats:List[str], cat_feats:List[str], cat_maps:Optional[Dict[str,Dict[int,Any]]],
targ_feats:Union[str,List[str]], wgt_feat:Optional[str]=None,
matrix_vecs:Optional[List[str]]=None, matrix_feats_per_vec:Optional[List[str]]=None, matrix_row_wise:Optional[bool]=None,
tensor_name:Optional[str]=None, tensor_shp:Optional[Tuple[int]]=None, tensor_is_sparse:bool=False) -> None:
r'''
Adds meta data to foldfile containing information about the data: feature names, matrix information, etc.
:class:`~lumin.nn.data.fold_yielder.FoldYielder` objects will access this and automatically extract it to save the user from having to manually pass lists
of features.
Arguments:
out_file: h5py file to save data in
feats: list of all features in data
cont_feats: list of continuous features
cat_feats: list of categorical features
cat_maps: Dictionary mapping categorical features to dictionary mapping codes to categories
targ_feats: (list of) target feature(s)
wgt_feat: name of weight feature
matrix_vecs: list of objects for matrix encoding, i.e. feature prefixes
matrix_feats_per_vec: list of features per vector for matrix encoding, i.e. feature suffixes.
Features listed but not present in df will be replaced with NaN.
matrix_row_wise: whether objects encoded as a matrix should be encoded row-wise (i.e. all the features associated with an object are in their own row),
or column-wise (i.e. all the features associated with an object are in their own column)
tensor_name: Name used to refer to the tensor when displaying model information
tensor_shp: The shape of the tensor data (exclusing batch dimension)
tensor_is_sparse: Whether the tensor is sparse (COO format) and should be densified prior to use
'''
grp = out_file.create_group('meta_data')
grp.create_dataset('cont_feats', data=json.dumps(cont_feats))
grp.create_dataset('cat_feats', data=json.dumps(cat_feats))
grp.create_dataset('targ_feats', data=json.dumps(targ_feats))
if wgt_feat is not None: grp.create_dataset('wgt_feat', data=json.dumps(wgt_feat))
if cat_maps is not None: grp.create_dataset('cat_maps', data=json.dumps(cat_maps))
if matrix_vecs is not None:
lookup,missing,shape = _build_matrix_lookups(feats, matrix_vecs, matrix_feats_per_vec, matrix_row_wise)
use = list(np.array(lookup)[np.logical_not(missing)]) # Only features present in data
grp.create_dataset('matrix_feats', data=json.dumps({'present_feats': use, 'vecs': matrix_vecs, 'missing': [int(m) for m in missing],
'feats_per_vec': matrix_feats_per_vec, 'row_wise': matrix_row_wise, 'shape': shape}))
elif tensor_name is not None:
grp.create_dataset('matrix_feats', data=json.dumps({'present_feats': [tensor_name], 'vecs': [tensor_name], 'missing': [],
'feats_per_vec': [''], 'row_wise': None, 'shape': tensor_shp, 'is_sparse':tensor_is_sparse}))
| 2.21875 | 2 |
TranskribusDU/visu/deco.py | Transkribus/TranskribusDU | 20 | 12797365 | <filename>TranskribusDU/visu/deco.py<gh_stars>10-100
"""
A class that reflect a decoration to be made on certain XML node using WX
"""
import types, os
from collections import defaultdict
import glob
import logging
import random
from lxml import etree
#import cStringIO
import wx
sEncoding = "utf-8"
def setEncoding(s):
global sEncoding
sEncoding = s
class DecoSeparator:
"""
this is not properly a decoration but rather a separator of decoration in the toolbar
"""
def __init__(self, cfg, sSurname, xpCtxt):
"""
cfg is a configuration object
sSurname is the surname of the decoration and the section name in the config file!
xpCtxt is an XPath context
"""
self.sSurname = sSurname
def __str__(self):
return "--------"
def isSeparator(self):
return True
def setXPathContext(self, xpCtxt):
pass
class Deco:
"""A general decoration class"""
def __init__(self, cfg, sSurname, xpCtxt):
"""
cfg is a configuration object
sSurname is the surname of the decoration and the section name in the config file!
xpCtxt is an XPath context
"""
self.sSurname = sSurname
self.xpMain = cfg.get(sSurname, "xpath") # a main XPath that select nodes to be decorated in this way
self.xpCtxt = xpCtxt #this context may include the declaration of some namespace
sEnabled = cfg.get(sSurname, "enabled").lower()
self.bEnabled = sEnabled in ['1', 'yes', 'true']
def isSeparator(cls):
return False
isSeparator = classmethod(isSeparator)
def __str__(self):
return "(Surname=%s xpath==%s)" % (self.sSurname, self.xpMain)
def getDecoClass(cls, sClass):
"""given a decoration type, return the associated class"""
c = globals()[sClass]
if type(c) != types.ClassType: raise Exception("No such decoration type: '%s'"%sClass)
return c
getDecoClass = classmethod(getDecoClass)
def getSurname(self):
return self.sSurname
def getMainXPath(self):
return self.xpMain
def isEnabled(self):
return self.bEnabled
def setEnabled(self, b=True):
self.bEnabled = b
return b
def isActionable(self):
return False
def setXPathContext(self, xpCtxt):
self.xpCtxt = xpCtxt
def xpathError(self, node, xpExpr, eExcpt, sMsg=""):
"""report an xpath error"""
try:
Deco._s_prev_xpath_error
except AttributeError:
Deco._s_prev_xpath_error = ""
Deco._prev_xpath_error_count = 0
iMaxLen = 200 # to truncate the node serialization
s = "-"*60
s += "\n--- XPath ERROR on class %s"%self.__class__
s += "\n--- xpath=%s" % xpExpr
s += "\n--- Python Exception=%s" % str(eExcpt)
if sMsg: s += "\n--- Info: %s" % sMsg
if s == Deco._s_prev_xpath_error:
# let's not overload the console.
return
Deco._s_prev_xpath_error = s
Deco._prev_xpath_error_count += 1
if Deco._prev_xpath_error_count > 10:
return
try:
sNode = etree.tostring(node)
except:
sNode = str(node)
if len(sNode) > iMaxLen: sNode = sNode[:iMaxLen] + "..."
s += "\n--- XML node = %s" % sNode
s += "\n" + "-"*60 + "\n"
logging.warning(s)
def warning(self, sMsg):
"""report an xpath error"""
try:
Deco._s_prev_warning
except AttributeError:
Deco._s_prev_warning = ""
Deco._warning_count = 0
# if sMsg != Deco._s_prev_warning and Deco._warning_count < 1000:
if sMsg != Deco._s_prev_warning:
logging.warning(sMsg)
Deco._warning_count += 1
Deco._s_prev_warning = sMsg
def toInt(cls, s):
try:
return int(s)
except ValueError:
return int(round(float(s)))
toInt = classmethod(toInt)
def xpathToInt(self, node, xpExpr, iDefault=0, bShowError=True):
"""The given XPath expression should return an int on the given node.
The XPath expression should return a scalar or a one-node nodeset
On error, return the default int value
"""
try:
# s = node.xpathEval(xpExpr)
self.xpCtxt.setContextNode(node)
if xpExpr[0] == "|":
#must be a lambda
assert xpExpr[:8] == "|lambda ", "Invalid lambda expression %s"%xpExpr
sStartEmpty, sLambdaExpr, xpExprArg, sEndEmpty = xpExpr.split('|')
assert sEndEmpty == "", "Missing last '|'"
sArg = self.xpCtxt.xpathEval(xpExprArg)[0]
sPythonExpr = "(%s)(%s)" % (sLambdaExpr, repr(sArg))
s = eval(sPythonExpr)
else:
s = self.xpCtxt.xpathEval(xpExpr)
if type(s) == types.ListType:
try:
s = s[0].text
except AttributeError:
s = s[0] #should be an attribute value
return Deco.toInt(s)
except Exception, e:
if bShowError: self.xpathError(node, xpExpr, e, "xpathToInt return %d as default value"%iDefault)
return iDefault
def xpathToStr(self, node, xpExpr, sDefault, bShowError=True):
"""The given XPath expression should return a string on the given node
The XPath expression should return a scalar or a one-node nodeset
On error, return the default int value
"""
try:
# s = node.xpathEval(xpExpr)
self.xpCtxt.setContextNode(node)
s = self.xpCtxt.xpathEval(xpExpr)
if type(s) == types.ListType:
try:
s = s[0].text
except AttributeError:
s = s[0]
return s
except Exception, e:
if bShowError: self.xpathError(node, xpExpr, e, "xpathToStr return %s as default value"%sDefault)
return sDefault
def xpathEval(self, node, xpExpr):
""" evaluate the xpath expression
return None on error
"""
try:
# s = node.xpathEval(xpExpr)
self.xpCtxt.setContextNode(node)
return self.xpCtxt.xpathEval(xpExpr)
except Exception, e:
self.xpathError(node, xpExpr, e, "xpathEval return None")
return None
def beginPage(self, node):
"""called before any sequnce of draw for a given page"""
pass
def endPage(self, node):
"""called before any sequnce of draw for a given page"""
pass
def draw(self, wxh, node):
"""draw the associated decorations, return the list of wx created objects"""
return []
class DecoBBXYWH(Deco):
"""A decoration with a bounding box defined by X,Y for its top-left corner and width/height.
xpX, xpY, xpW, xpH are scalar XPath expressions to get the associated x,y,w,h values from the selected nodes
"""
def __init__(self, cfg, sSurname, xpCtxt):
"""
cfg is a config file
sSurname is the decoration surname and the section name in the config file
This section should contain the following items: x, y, w, h
"""
Deco.__init__(self, cfg, sSurname, xpCtxt)
#now get the xpath expressions that let uis find x,y,w,h from a selected node
self.xpX, self.xpY = cfg.get(sSurname, "xpath_x"), cfg.get(sSurname, "xpath_y")
self.xpW, self.xpH = cfg.get(sSurname, "xpath_w"), cfg.get(sSurname, "xpath_h")
self.xpInc = cfg.get(sSurname, "xpath_incr") #to increase the BB width and height
self._node = None
def __str__(self):
s = Deco.__str__(self)
s += "+(x=%s y=%s w=%s h=%s)" % (self.xpX, self.xpY, self.xpW, self.xpH)
return s
def runXYWHI(self, node):
"""get the X,Y values for a node and put them in cache"""
if self._node != node:
self._x = self.xpathToInt(node, self.xpX, 1)
self._y = self.xpathToInt(node, self.xpY, 1)
self._w = self.xpathToInt(node, self.xpW, 1)
self._h = self.xpathToInt(node, self.xpH, 1)
self._inc = self.xpathToInt(node, self.xpInc, 0)
self._x,self._y = self._x-self._inc, self._y-self._inc
self._w,self._h = self._w+2*self._inc, self._h+2*self._inc
self._node = node
return (self._x, self._y, self._w, self._h, self._inc)
class DecoRectangle(DecoBBXYWH):
"""A rectangle
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoBBXYWH.__init__(self, cfg, sSurname, xpCtxt)
#now get the xpath expressions that let us find the rectangle line and fill colors
self.xpLineColor = cfg.get(sSurname, "xpath_LineColor")
self.xpLineWidth = cfg.get(sSurname, "xpath_LineWidth")
self.xpFillColor = cfg.get(sSurname, "xpath_FillColor")
self.xpFillStyle = cfg.get(sSurname, "xpath_FillStyle")
def __str__(self):
s = "%s="%self.__class__
s += DecoBBXYWH.__str__(self)
return s
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
lo = DecoBBXYWH.draw(self, wxh, node)
x,y,w,h,inc = self.runXYWHI(node)
sLineColor = self.xpathToStr(node, self.xpLineColor, "#000000")
iLineWidth = self.xpathToInt(node, self.xpLineWidth, 1)
sFillColor = self.xpathToStr(node, self.xpFillColor, "#000000")
sFillStyle = self.xpathToStr(node, self.xpFillStyle, "Solid")
obj = wxh.AddRectangle((x, -y), (w, -h),
LineWidth=iLineWidth,
LineColor=sLineColor,
FillColor=sFillColor,
FillStyle=sFillStyle)
lo.append(obj)
return lo
class DecoTextBox(DecoRectangle):
"""A text within a bounding box (a rectangle)
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoRectangle.__init__(self, cfg, sSurname, xpCtxt)
self.xpContent = cfg.get(sSurname, "xpath_content")
self.xpFontSize = cfg.get(sSurname, "xpath_font_size")
self.xpFontColor = cfg.get(sSurname, "xpath_font_color")
def __str__(self):
s = "%s="%self.__class__
s += DecoRectangle.__str__(self)
return s
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
lo = DecoRectangle.draw(self, wxh, node)
#add the text itself
txt = self.xpathToStr(node, self.xpContent, "")
iFontSize = self.xpathToInt(node, self.xpFontSize, 8)
sFontColor = self.xpathToStr(node, self.xpFontColor, 'BLACK')
x,y,w,h,inc = self.runXYWHI(node)
obj = wxh.AddScaledTextBox(txt, (x, -y+inc),
Size=iFontSize,
Family=wx.ROMAN, Position='tl',
Color=sFontColor, PadSize=0, LineColor=None)
lo.append(obj)
return lo
class DecoText(DecoBBXYWH):
"""A text
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoBBXYWH.__init__(self, cfg, sSurname, xpCtxt)
self.xpContent = cfg.get(sSurname, "xpath_content")
self.xpFontSize = cfg.get(sSurname, "xpath_font_size")
self.xpFontColor = cfg.get(sSurname, "xpath_font_color")
def __str__(self):
s = "%s="%self.__class__
s += DecoBBXYWH.__str__(self)
return s
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
lo = DecoBBXYWH.draw(self, wxh, node)
#add the text itself
txt = self.getText(wxh, node)
iFontSize = self.xpathToInt(node, self.xpFontSize, 8)
sFontColor = self.xpathToStr(node, self.xpFontColor, 'BLACK')
x,y,w,h,inc = self.runXYWHI(node)
obj = wxh.AddScaledTextBox(txt, (x, -y-h/2.0),
Size=iFontSize,
Family=wx.ROMAN, Position='cl',
Color=sFontColor, PadSize=0, LineColor=None)
lo.append(obj)
return lo
def getText(self, wxh, node):
return self.xpathToStr(node, self.xpContent, "")
class DecoUnicodeChar(DecoText):
"""A character encoded in Unicode
We assume the unicode index is given in a certain base, e.g. 10 or 16
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoText.__init__(self, cfg, sSurname, xpCtxt)
self.base = int(cfg.get(sSurname, "code_base"))
def getText(self, wxh, node):
sEncodedText = self.xpathToStr(node, self.xpContent, "")
try:
return eval('u"\\u%04x"' % int(sEncodedText, self.base))
except ValueError:
logging.error("DecoUnicodeChar: ERROR: base=%d code=%s"%(self.base, sEncodedText))
return ""
class DecoImageBox(DecoRectangle):
"""An image with a box around it
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoRectangle.__init__(self, cfg, sSurname, xpCtxt)
self.xpHRef = cfg.get(sSurname, "xpath_href")
def __str__(self):
s = "%s="%self.__class__
s += DecoRectangle.__str__(self)
return s
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
lo = []
#add the image itself
x,y,w,h,inc = self.runXYWHI(node)
sFilePath = self.xpathToStr(node, self.xpHRef, "")
if sFilePath:
try:
img = wx.Image(sFilePath, wx.BITMAP_TYPE_ANY)
obj = wxh.AddScaledBitmap(img, (x,-y), h)
lo.append(obj)
except Exception, e:
self.warning("DecoImageBox ERROR: File %s: %s"%(sFilePath, str(e)))
lo.append( DecoRectangle.draw(self, wxh, node) )
return lo
class DecoImage(DecoBBXYWH):
"""An image
"""
# in case the use wants to specify it via the menu
sImageFolder = None
def __init__(self, cfg, sSurname, xpCtxt):
DecoBBXYWH.__init__(self, cfg, sSurname, xpCtxt)
self.xpHRef = cfg.get(sSurname, "xpath_href")
def __str__(self):
s = "%s="%self.__class__
s += DecoBBXYWH.__str__(self)
return s
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
lo = DecoBBXYWH.draw(self, wxh, node)
#add the image itself
x,y,w,h,inc = self.runXYWHI(node)
sFilePath = self.xpathToStr(node, self.xpHRef, "")
if sFilePath:
if self.sImageFolder:
sCandidate = os.path.join(self.sImageFolder, sFilePath)
if os.path.exists(sCandidate):
sFilePath = sCandidate
else:
# maybe the file is in a subfolder ?
# e.g. "S_Aicha_an_der_Donau_004-03_0005.jpg" is in folder "S_Aicha_an_der_Donau_004-03"
try:
sDir = sFilePath[:sFilePath.rindex("_")]
sCandidate = os.path.join(self.sImageFolder, sDir, sFilePath)
if os.path.exists(sCandidate):
sFilePath = sCandidate
except ValueError:
pass
if not os.path.exists(sFilePath):
#maybe the image is in a folder with same name as XML file? (Transkribus style)
sUrl = node.getroottree().docinfo.URL.decode('utf-8') # py2 ...
for sPrefix in ["file://", "file:/"]:
if sUrl[0:len(sPrefix)] == sPrefix:
sLocalDir = os.path.dirname(sUrl[len(sPrefix):])
sDir,_ = os.path.splitext(os.path.basename(sUrl))
sCandidate = os.path.abspath(os.path.join(sLocalDir, sDir, sFilePath))
if os.path.exists(sCandidate):
sFilePath = sCandidate
print(sFilePath)
break
if not os.path.exists(sFilePath):
# maybe we have some pattern??
lCandidate = glob.glob(sFilePath)
bKO = True
for s in lCandidate:
if os.path.exists(s):
sFilePath = s
bKO = False
break
if bKO:
self.warning("WARNING: deco Image: file does not exists: '%s'"%sFilePath)
sFilePath = None
if bool(sFilePath):
img = wx.Image(sFilePath, wx.BITMAP_TYPE_ANY)
try:
if h > 0:
obj = wxh.AddScaledBitmap(img, (x,-y), h)
else:
obj = wxh.AddScaledBitmap(img, (x,-y), img.GetHeight())
lo.append(obj)
except Exception, e:
self.warning("DecoImage ERROR: File %s: %s"%(sFilePath, str(e)))
return lo
class DecoOrder(DecoBBXYWH):
"""Show the order with lines
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoBBXYWH.__init__(self, cfg, sSurname, xpCtxt)
self.xpLineColor = cfg.get(sSurname, "xpath_LineColor")
self.xpLineWidth = cfg.get(sSurname, "xpath_LineWidth")
def __str__(self):
s = "%s="%self.__class__
s += DecoBBXYWH.__str__(self)
return s
def beginPage(self, node):
"""called before any sequnce of draw for a given page"""
self.bInit = False
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
lo = DecoBBXYWH.draw(self, wxh, node)
x,y,w,h,inc = self.runXYWHI(node)
sLineColor = self.xpathToStr(node, self.xpLineColor, "BLACK")
x, y = int(x + w/2.0), int(y + h/2.0)
if self.bInit:
#draw a line
iLineWidth = self.xpathToInt(node, self.xpLineWidth, 1)
obj = wxh.AddLine( [(self.prevX, -self.prevY), (x, -y)]
, LineWidth=iLineWidth
, LineColor=sLineColor)
lo.append(obj)
else:
self.bInit = True
iEllipseParam = min(w,h) / 2
wxh.AddEllipse((x, -y), (iEllipseParam, -iEllipseParam), LineColor=sLineColor, LineWidth=5, FillStyle="Transparent")
self.prevX, self.prevY = x, y
return lo
class DecoLine(Deco):
"""A line from x1,y1 to x2,y2
"""
def __init__(self, cfg, sSurname, xpCtxt):
Deco.__init__(self, cfg, sSurname, xpCtxt)
self.xpX1, self.xpY1 = cfg.get(sSurname, "xpath_x1"), cfg.get(sSurname, "xpath_y1")
self.xpX2, self.xpY2 = cfg.get(sSurname, "xpath_x2"), cfg.get(sSurname, "xpath_y2")
#now get the xpath expressions that let us find the rectangle line and fill colors
self.xpLineWidth = cfg.get(sSurname, "xpath_LineWidth")
self.xpLineColor = cfg.get(sSurname, "xpath_LineColor")
self._node = None
def __str__(self):
s = "%s="%self.__class__
s += "+(x1=%s y1=%s x2=%s y2=%s)" % (self.xpX1, self.xpY1, self.xpX2, self.xpY2)
return s
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
# print node.serialize()
# print self.xpX
# for n in node.xpathEval(self.xpX): print n.serialize()
iLARGENEG = -9999
lo = Deco.draw(self, wxh, node)
if self._node != node:
self._x1 = self.xpathToInt(node, self.xpX1, iLARGENEG)
self._y1 = self.xpathToInt(node, self.xpY1, iLARGENEG)
self._x2 = self.xpathToInt(node, self.xpX2, iLARGENEG)
self._y2 = self.xpathToInt(node, self.xpY2, iLARGENEG)
self._node = node
if self._x1 != iLARGENEG and self._y1 != iLARGENEG and self._x2 != iLARGENEG and self._y2 != iLARGENEG:
sLineColor = self.xpathToStr(node, self.xpLineColor, "#000000")
iLineWidth = self.xpathToInt(node, self.xpLineWidth, 1)
#draw a line
obj = wxh.AddLine( [(self._x1, -self._y1), (self._x2, -self._y2)]
, LineWidth=iLineWidth
, LineColor=sLineColor)
lo.append(obj)
return lo
class DecoREAD(Deco):
"""
READ PageXml has a special way to encode coordinates.
like:
<Coords points="985,390 1505,390 1505,440 985,440"/>
or
<Baseline points="985,435 1505,435"/>
"""
def __init__(self, cfg, sSurname, xpCtxt):
Deco.__init__(self, cfg, sSurname, xpCtxt)
self.xpCoords = cfg.get(sSurname, "xpath_lxy")
def _getCoordList(self, node):
sCoords = self.xpathToStr(node, self.xpCoords, "")
if not sCoords:
if node.get("id") is None:
self.warning("No coordinates: node = %s" % etree.tostring(node))
else:
self.warning("No coordinates: node id = %s" % node.get("id"))
return [(0,0)]
try:
ltXY = []
for _sPair in sCoords.split(' '):
(sx, sy) = _sPair.split(',')
ltXY.append((Deco.toInt(sx), Deco.toInt(sy)))
except Exception as e:
logging.error("ERROR: polyline coords are bad: '%s' -> '%s'" % (
self.xpCoords, sCoords))
raise e
return ltXY
def _coordList_to_BB(self, ltXY):
"""
return (x1, y1), (x2, y2)
"""
lX = [_x for _x,_y in ltXY]
lY = [_y for _x,_y in ltXY]
return (min(lX), max(lY)), (max(lX), min(lY))
class DecoREADTextLine(DecoREAD):
"""A TextLine as defined by the PageXml format of the READ project
<TextLine id="line_1551946877389_284" custom="readingOrder {index:0;} Item-name {offset:0; length:11;} Item-price {offset:12; length:2;}">
<Coords points="985,390 1505,390 1505,440 985,440"/>
<Baseline points="985,435 1505,435"/>
<TextEquiv>
<Unicode>Salgadinhos 12</Unicode>
</TextEquiv>
</TextLine>
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoREAD.__init__(self, cfg, sSurname, xpCtxt)
self.xpContent = cfg.get(sSurname, "xpath_content")
self.xpFontColor = cfg.get(sSurname, "xpath_font_color")
self.xpFit = cfg.get(sSurname, "xpath_fit_text_size").lower()
def __str__(self):
s = "%s="%self.__class__
return s
def _getFontSize(self, node, ltXY, txt, Family=wx.FONTFAMILY_TELETYPE):
"""
compute the font size so as to fit the polygon
and the extent of the 'x' character for this font size
return iFontSize, ExtentX, ExtentY
"""
(x1, y1), (x2, y2) = self._coordList_to_BB(ltXY)
sFit = self.xpathToStr(node, self.xpFit, 'xy', bShowError=False)
try:
iFontSize = int(sFit)
Ex, Ey = None, None
except ValueError:
dc = wx.ScreenDC()
# compute for font size of 24 and do proportional
dc.SetFont(wx.Font(24, Family, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
Ex, Ey = dc.GetTextExtent("x")
try:
iFontSizeX = 24 * abs(x2-x1) / Ex / len(txt)
except:
self.warning("absence of text: cannot compute font size along X axis")
iFontSizeX = 8
iFontSizeY = 24 * abs(y2-y1) / Ey
if sFit == "x":
iFontSize = iFontSizeX
elif sFit == "y":
iFontSize = iFontSizeY
else:
iFontSize = min(iFontSizeX, iFontSizeY)
dc.SetFont(wx.Font(iFontSize, Family, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
Ex, Ey = dc.GetTextExtent("x")
del dc
return iFontSize, Ex, Ey
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
lo = []
#add the text itself
txt = self.getText(wxh, node)
sFontColor = self.xpathToStr(node, self.xpFontColor, 'BLACK')
# Position and computation of font size
ltXY = self._getCoordList(node)
iFontSize, Ex, Ey = self._getFontSize(node, ltXY, txt, Family=wx.FONTFAMILY_TELETYPE)
# x, y = ltXY[0]
(x, _y1), (_x2, y) = self._coordList_to_BB(ltXY)
obj = wxh.AddScaledText(txt, (x, -y+iFontSize/6), Size=iFontSize
, Family=wx.FONTFAMILY_TELETYPE
, Position='tl'
, Color=sFontColor)
lo.append(obj)
return lo
def getText(self, wxh, node):
return self.xpathToStr(node, self.xpContent, "")
class READ_custom:
"""
Everything related to the PageXML custom attribute
"""
@classmethod
def parseCustomAttr(cls, s, bNoCase=True):
"""
The custom attribute contains data in a CSS style syntax.
We parse this syntax here and return a dictionary of list of dictionary
Example:
parseCustomAttr( "readingOrder {index:4;} structure {type:catch-word;}" )
--> { 'readingOrder': [{ 'index':'4' }], 'structure':[{'type':'catch-word'}] }
"""
dic = defaultdict(list)
s = s.strip()
lChunk = s.split('}')
if lChunk:
for chunk in lChunk: #things like "a {x:1"
chunk = chunk.strip()
if not chunk: continue
try:
sNames, sValues = chunk.split('{') #things like: ("a,b", "x:1 ; y:2")
except Exception:
raise ValueError("Expected a '{' in '%s'"%chunk)
#the dictionary for that name
dicValForName = dict()
lsKeyVal = sValues.split(';') #things like "x:1"
for sKeyVal in lsKeyVal:
if not sKeyVal.strip(): continue #empty
try:
sKey, sVal = sKeyVal.split(':')
except Exception:
raise ValueError("Expected a comma-separated string, got '%s'"%sKeyVal)
sKey = sKey.strip().lower() if bNoCase else sKey.strip()
dicValForName[sKey] = sVal.strip()
lName = sNames.split(',')
for name in lName:
name = name.strip().lower() if bNoCase else name.strip()
dic[name].append(dicValForName)
return dic
class DecoREADTextLine_custom_offset(DecoREADTextLine, READ_custom):
"""
Here we show the annotation by offset found in the custom attribute
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoREADTextLine.__init__(self, cfg, sSurname, xpCtxt)
self.xpLabel = cfg.get(sSurname, "xpath_label")
self.xpLineColor = cfg.get(sSurname, "xpath_LineColor")
self.xpBackgroundColor = cfg.get(sSurname, "xpath_background_color")
def draw(self, wxh, node):
"""
draw itself using the wx handle
return a list of created WX objects
"""
lo = []
#add the text itself
txt = self.getText(wxh, node)
sFontColor = self.xpathToStr(node, self.xpFontColor, 'BLACK')
sLineColor = self.xpathToStr(node, self.xpLineColor, "#000000")
sBackgroundColor = self.xpathToStr(node, self.xpBackgroundColor, "#000000")
# Position and computation of font size
ltXY = self._getCoordList(node)
iFontSize, Ex, Ey = self._getFontSize(node, ltXY, txt
, Family=wx.FONTFAMILY_TELETYPE)
dCustom = self.parseCustomAttr(node.get("custom"), bNoCase=True)
try:
x0, y0 = ltXY[0]
_ldLabel = dCustom[self.xpathToStr(node, self.xpLabel, "").lower()]
for _dLabel in _ldLabel:
try:
iOffset = int(_dLabel["offset"])
iLength = int(_dLabel["length"])
x = x0 + Ex * iOffset
y = -y0+iFontSize/6
obj = wxh.AddScaledTextBox(txt[iOffset:iOffset+iLength]
, (x, y)
, Size=iFontSize
, Family=wx.FONTFAMILY_TELETYPE
, Position='bl'
, Color=sFontColor
, LineColor=sLineColor
, BackgroundColor=sBackgroundColor)
lo.append(obj)
except KeyError:
pass
except KeyError:
pass
return lo
class DecoPolyLine(DecoREAD):
"""A polyline along
x1,y1,x2,y2, ...,xn,yn
or
x1,y1 x2,y2 .... xn,yn
Example of config:
[TextLine]
type=DecoPolyLine
xpath=.//TextLine/Coords
xpath_lxy=@points
xpath_LineColor="RED"
xpath_FillStyle="Solid"
<NAME> - March 2016
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoREAD.__init__(self, cfg, sSurname, xpCtxt)
#now get the xpath expressions that let us find the rectangle line and fill colors
self.xpLineWidth = cfg.get(sSurname, "xpath_LineWidth")
self.xpLineColor = cfg.get(sSurname, "xpath_LineColor")
#cached values
self._node = None
self._lxy = None
def __str__(self):
s = "%s="%self.__class__
s += "+(coords=%s)" % (self.xpCoords)
return s
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
# print node.serialize()
# print self.xpX
# for n in node.xpathEval(self.xpX): print n.serialize()
lo = DecoREAD.draw(self, wxh, node)
if self._node != node:
self._lxy = self._getCoordList(node)
self._node = node
if self._lxy:
sLineColor = self.xpathToStr(node, self.xpLineColor, "#000000")
iLineWidth = self.xpathToInt(node, self.xpLineWidth, 1)
for (x1, y1), (x2, y2) in zip(self._lxy, self._lxy[1:]):
#draw a line
obj = wxh.AddLine( [(x1, -y1), (x2, -y2)]
, LineWidth=iLineWidth
, LineColor=sLineColor)
lo.append(obj)
return lo
class DecoClosedPolyLine(DecoPolyLine):
"""A polyline that closes automatically the shape
<NAME> - September 2016
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoPolyLine.__init__(self, cfg, sSurname, xpCtxt)
def _getCoordList(self, node):
lCoord = DecoPolyLine._getCoordList(self, node)
if lCoord: lCoord.append(lCoord[0])
return lCoord
class DecoTextPolyLine(DecoPolyLine, DecoText):
"""A polyline that closes automatically the shape
<NAME> - September 2016
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoPolyLine.__init__(self, cfg, sSurname, xpCtxt)
DecoText .__init__(self, cfg, sSurname, xpCtxt)
self.xpX_Inc = cfg.get(sSurname, "xpath_x_incr") #to shift the text
self.xpY_Inc = cfg.get(sSurname, "xpath_y_incr") #to shift the text
def draw(self, wxh, node):
lo = Deco.draw(self, wxh, node)
if self._node != node:
self._lxy = self._getCoordList(node)
self._node = node
#lo = DecoClosedPolyLine.draw(self, wxh, node)
#add the text itself
x, y = self._lxy[0]
x_inc = self.xpathToInt(node, self.xpX_Inc, 0, False)
y_inc = self.xpathToInt(node, self.xpY_Inc, 0, False)
txt = self.xpathToStr(node, self.xpContent, "")
iFontSize = self.xpathToInt(node, self.xpFontSize, 8)
sFontColor = self.xpathToStr(node, self.xpFontColor, 'BLACK')
obj = wxh.AddScaledTextBox(txt, (x+x_inc, -y-y_inc),
Size=iFontSize,
Family=wx.ROMAN, Position='tl',
Color=sFontColor, PadSize=0, LineColor=None)
lo.append(obj)
return lo
class DecoClusterCircle(DecoREAD):
"""
[Cluster]
type=DecoClusterCircle
xpath=.//Cluster
xpath_content=@content
xpath_radius=40
xpath_item_lxy=./pg:Coords/@points
xpath_LineWidth="1"
xpath_FillStyle="Transparent"
LineColors="BLUE SIENNA YELLOW ORANGE RED GREEN"
FillColors="BLUE SIENNA YELLOW ORANGE RED GREEN"
enabled=1
"""
count = 0
def __init__(self, cfg, sSurname, xpCtxt):
DecoREAD.__init__(self, cfg, sSurname, xpCtxt)
self.xpCluster = cfg.get(sSurname, "xpath")
self.xpContent = cfg.get(sSurname, "xpath_content")
self.xpRadius = cfg.get(sSurname, "xpath_radius")
self.xpLineWidth = cfg.get(sSurname, "xpath_LineWidth")
self.xpFillStyle = cfg.get(sSurname, "xpath_FillStyle")
self.lsLineColor = cfg.get(sSurname, "LineColors").split()
self.lsFillColor = cfg.get(sSurname, "FillColors").split()
#cached values
self._node = None
self._laxyr = None
print "DecoClusterCircle lsLineColor = ", self.lsLineColor
print "DecoClusterCircle lsFillColor = ", self.lsFillColor
def __str__(self):
s = "%s="%self.__class__
s += "+(coords=%s)" % (self.xpCoords)
return s
def getArea_and_CenterOfMass(self, lXY):
"""
https://fr.wikipedia.org/wiki/Aire_et_centre_de_masse_d'un_polygone
return A, (Xg, Yg) which are the area and the coordinates (float) of the center of mass of the polygon
"""
if len(lXY) < 2: raise ValueError("Only one point: polygon area is undefined.")
fA = 0.0
xSum, ySum = 0, 0
xprev, yprev = lXY[-1]
for x, y in lXY:
iTerm = xprev*y - yprev*x
fA += iTerm
xSum += iTerm * (xprev+x)
ySum += iTerm * (yprev+y)
xprev, yprev = x, y
if fA == 0.0: raise ValueError("surface == 0.0")
fA = fA / 2
xg, yg = xSum/6/fA, ySum/6/fA
if fA <0:
return -fA, (xg, yg)
else:
return fA, (xg, yg)
assert fA >0 and xg >0 and yg >0, "%s\t%s"%(lXY (fA, (xg, yg)))
return fA, (xg, yg)
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
DecoClusterCircle.count = DecoClusterCircle.count + 1
lo = DecoREAD.draw(self, wxh, node)
if self._node != node:
self._laxyr = []
#need to go thru each item
ndPage = node.xpath("ancestor::*[local-name()='Page']")[0]
sIds = self.xpathEval(node, self.xpContent)[0]
for sId in sIds.split():
l = self.xpathEval(ndPage, './/*[@id="%s"]'%sId)
ndItem = l[0]
lxy = self._getCoordList(ndItem)
fA, (xg, yg) = self.getArea_and_CenterOfMass(lxy)
r = self.xpathToInt(ndItem, self.xpRadius, 1)
self._laxyr.append( (fA, xg, yg, r) )
self._node = node
if self._laxyr:
iMaxFC = len(self.lsFillColor)
iMaxLC = len(self.lsLineColor)
if False:
Nf = DecoClusterCircle.count
Nl = Nf
else:
Nf = random.randrange(iMaxFC)
Nl = random.randrange(iMaxFC)
iLineWidth = self.xpathToInt(node, self.xpLineWidth, 1)
sFillStyle = self.xpathToStr(node, self.xpFillStyle, "Solid")
for (_a, x, y, r) in self._laxyr:
#draw a circle
sFillColor = self.lsFillColor[Nf % iMaxFC]
if self.lsLineColor:
sLineColor = self.lsLineColor[Nl % iMaxLC]
else:
sLineColor = sFillColor
obj = wxh.AddCircle((x, -y), r,
LineWidth=iLineWidth,
LineColor=sLineColor,
FillColor=sFillColor,
FillStyle=sFillStyle)
# obj = wxh.AddRectangle((x, -y), (20, 20),
# LineWidth=iLineWidth,
# LineColor=sLineColor,
# FillColor=sFillColor,
# FillStyle=sFillStyle)
lo.append(obj)
"""
lo = DecoBBXYWH.draw(self, wxh, node)
x,y,w,h,inc = self.runXYWHI(node)
sLineColor = self.xpathToStr(node, self.xpLineColor, "#000000")
iLineWidth = self.xpathToInt(node, self.xpLineWidth, 1)
sFillColor = self.xpathToStr(node, self.xpFillColor, "#000000")
sFillStyle = self.xpathToStr(node, self.xpFillStyle, "Solid")
obj = wxh.AddRectangle((x, -y), (w, -h),
LineWidth=iLineWidth,
LineColor=sLineColor,
FillColor=sFillColor,
FillStyle=sFillStyle)
"""
return lo
class DecoLink(Deco):
"""A link from x1,y1 to x2,y2
"""
def __init__(self, cfg, sSurname, xpCtxt):
Deco.__init__(self, cfg, sSurname, xpCtxt)
self.xpX1, self.xpY1 = cfg.get(sSurname, "xpath_x1"), cfg.get(sSurname, "xpath_y1")
#the following expression must be evaluated twice
self.xpEvalX2, self.xpEvalY2 = cfg.get(sSurname, "eval_xpath_x2"), cfg.get(sSurname, "eval_xpath_y2")
self.xpDfltX2, self.xpDfltY2 = cfg.get(sSurname, "xpath_x2_default"), cfg.get(sSurname, "xpath_y2_default")
#now get the xpath expressions that let us find the rectangle line and fill colors
self.xpLineWidth = cfg.get(sSurname, "xpath_LineWidth")
self.xpLineColor = cfg.get(sSurname, "xpath_LineColor")
self._node = None
def __str__(self):
s = "%s="%self.__class__
s += "+(x1=%s y1=%s x2=%s y2=%s)" % (self.xpX1, self.xpY1, self.xpEvalX2, self.xpEvalY2)
return s
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
# print node.serialize()
# print self.xpX
# for n in node.xpathEval(self.xpX): print n.serialize()
iLARGENEG = -9999
lo = Deco.draw(self, wxh, node)
if self._node != node:
self._x1 = self.xpathToInt(node, self.xpX1, iLARGENEG)
self._y1 = self.xpathToInt(node, self.xpY1, iLARGENEG)
#double evaluation, and a default value if necessary
xpX2 = self.xpathToStr(node, self.xpEvalX2, '""')
self._x2 = self.xpathToInt(node, xpX2, iLARGENEG, False) #do not show any error
if self._x2 == iLARGENEG: self._x2 = self.xpathToInt(node, self.xpDfltX2, iLARGENEG)
xpY2 = self.xpathToStr(node, self.xpEvalY2, '""')
self._y2 = self.xpathToInt(node, xpY2, iLARGENEG, False) #do not show any error
if self._y2 == iLARGENEG: self._y2 = self.xpathToInt(node, self.xpDfltY2, iLARGENEG)
self._node = node
if self._x1 != iLARGENEG and self._y1 != iLARGENEG and self._x2 != iLARGENEG and self._y2 != iLARGENEG:
sLineColor = self.xpathToStr(node, self.xpLineColor, "#000000")
iLineWidth = self.xpathToInt(node, self.xpLineWidth, 1)
#draw a line
obj = wxh.AddLine( [(self._x1, -self._y1), (self._x2, -self._y2)]
, LineWidth=iLineWidth
, LineColor=sLineColor)
lo.append(obj)
return lo
class DecoClickableRectangleSetAttr(DecoBBXYWH):
"""A rectangle
clicking on it add/remove an attribute
the rectangle color is indicative of the presence/absence of the attribute
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoBBXYWH.__init__(self, cfg, sSurname, xpCtxt)
#now get the xpath expressions that let us find the rectangle line and fill colors
self.xpLineColor = cfg.get(sSurname, "xpath_LineColor")
self.xpLineWidth = cfg.get(sSurname, "xpath_LineWidth")
self.xpFillColor = cfg.get(sSurname, "xpath_FillColor")
self.xpFillStyle = cfg.get(sSurname, "xpath_FillStyle")
self.xpAttrName = cfg.get(sSurname, "xpath_AttrName")
self.xpAttrValue = cfg.get(sSurname, "xpath_AttrValue")
self.dInitialValue = {}
self.xpLineColorSlctd = cfg.get(sSurname, "xpath_LineColor_Selected")
self.xpLineWidthSlctd = cfg.get(sSurname, "xpath_LineWidth_Selected")
self.xpFillColorSlctd = cfg.get(sSurname, "xpath_FillColor_Selected")
self.xpFillStyleSlctd = cfg.get(sSurname, "xpath_FillStyle_Selected")
def __str__(self):
s = "%s="%self.__class__
s += DecoBBXYWH.__str__(self)
return s
def isActionable(self):
return True
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
lo = DecoBBXYWH.draw(self, wxh, node)
x,y,w,h,inc = self.runXYWHI(node)
sAttrName = self.xpathToStr(node, self.xpAttrName , None)
sAttrValue = self.xpathToStr(node, self.xpAttrValue, None)
if sAttrName and sAttrValue != None:
if node.prop(sAttrName) == sAttrValue:
sLineColor = self.xpathToStr(node, self.xpLineColorSlctd, "#000000")
iLineWidth = self.xpathToInt(node, self.xpLineWidthSlctd, 1)
sFillColor = self.xpathToStr(node, self.xpFillColorSlctd, "#000000")
sFillStyle = self.xpathToStr(node, self.xpFillStyleSlctd, "Solid")
else:
sLineColor = self.xpathToStr(node, self.xpLineColor, "#000000")
iLineWidth = self.xpathToInt(node, self.xpLineWidth, 1)
sFillColor = self.xpathToStr(node, self.xpFillColor, "#000000")
sFillStyle = self.xpathToStr(node, self.xpFillStyle, "Solid")
obj = wxh.AddRectangle((x, -y), (w, -h),
LineWidth=iLineWidth,
LineColor=sLineColor,
FillColor=sFillColor,
FillStyle=sFillStyle)
lo = [obj] + lo
return lo
def act(self, obj, node):
"""
Toggle the attribute value
"""
s = "do nothing"
sAttrName = self.xpathToStr(node, self.xpAttrName , None)
sAttrValue = self.xpathToStr(node, self.xpAttrValue, None)
if sAttrName and sAttrValue != None:
try:
initialValue = self.dInitialValue[node]
except KeyError:
initialValue = node.prop(sAttrName) #first time
self.dInitialValue[node] = initialValue
if node.get(sAttrName) == sAttrValue:
#back to previous value
if initialValue == None or initialValue == sAttrValue:
#very special case: when an attr was set, then saved, re-clicking on it wil remove it.
del node.attrib[sAttrName]
s = "Removal of @%s"%sAttrName
else:
node.set(sAttrName, initialValue)
s = '@%s := "%s"'%(sAttrName,initialValue)
else:
if not sAttrValue:
del node.attrib[sAttrName]
s = "Removal of @%s"%sAttrName
else:
node.set(sAttrName, sAttrValue)
s = '@%s := "%s"'%(sAttrName,sAttrValue)
return s
class DecoClickableRectangleJump(DecoBBXYWH):
"""A rectangle
clicking on it jump to a node
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoBBXYWH.__init__(self, cfg, sSurname, xpCtxt)
#now get the xpath expressions that let us find the rectangle line and fill colors
self.xpLineColor = cfg.get(sSurname, "xpath_LineColor")
self.xpLineWidth = cfg.get(sSurname, "xpath_LineWidth")
self.xpFillColor = cfg.get(sSurname, "xpath_FillColor")
self.xpFillStyle = cfg.get(sSurname, "xpath_FillStyle")
self.xp_xTo = cfg.get(sSurname, "xpath_xTo")
self.xp_yTo = cfg.get(sSurname, "xpath_yTo")
self.xp_wTo = cfg.get(sSurname, "xpath_wTo")
self.xp_hTo = cfg.get(sSurname, "xpath_hTo")
self.xpAttrToId = cfg.get(sSurname, "xpath_ToId")
self.config = cfg.jl_hack_cfg #HACK
def __str__(self):
s = "%s="%self.__class__
s += DecoBBXYWH.__str__(self)
return s
def isActionable(self):
return True
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
lo = DecoBBXYWH.draw(self, wxh, node)
x,y,w,h,inc = self.runXYWHI(node)
sLineColor = self.xpathToStr(node, self.xpLineColor, "#000000")
iLineWidth = self.xpathToInt(node, self.xpLineWidth, 1)
sFillColor = self.xpathToStr(node, self.xpFillColor, "#000000")
sFillStyle = self.xpathToStr(node, self.xpFillStyle, "Solid")
obj = wxh.AddRectangle((x, -y), (w, -h), LineWidth=iLineWidth,
LineColor=sLineColor,
FillColor=sFillColor,
FillStyle=sFillStyle)
lo = [obj] + lo
return lo
def act(self, obj, node):
"""
return the page number of the destination
or None on error
"""
sPageTag = self.config.getPageTag()
sPageNumberAttr = self.config.getPageNumberAttr()
number = None
x,y,w,h = None, None, None, None
bbHighlight = None
sToId = self.xpathToStr(node, self.xpAttrToId , None)
if sToId:
ln = self.xpathEval(node.doc.getroot(), '//*[@id="%s"]'%sToId.strip())
if ln:
#find the page number
ndTo = nd = ln[0]
#while nd and nd.name != "PAGE": nd = nd.parent
while nd and nd.name != sPageTag: nd = nd.parent
try:
#number = max(0, int(nd.prop("number")) - 1)
number = max(0, self.xpathToInt(nd, sPageNumberAttr, 1, True) - 1)
#maybe we can also indicate the precise arrival point?
if self.xp_xTo and self.xp_yTo and self.xp_hTo and self.xp_wTo:
x = self.xpathToInt(ndTo, self.xp_xTo, None)
y = self.xpathToInt(ndTo, self.xp_yTo, None)
w = self.xpathToInt(ndTo, self.xp_wTo, None)
h = self.xpathToInt(ndTo, self.xp_hTo, None)
if x==None or y==None or w==None or h==None:
x,y,w,h = None, None, None, None
except:
pass
return number,x,y,w,h
class DecoClickableRectangleJumpToPage(DecoBBXYWH):
"""A rectangle
clicking on it jump to a page
"""
def __init__(self, cfg, sSurname, xpCtxt):
DecoBBXYWH.__init__(self, cfg, sSurname, xpCtxt)
#now get the xpath expressions that let us find the rectangle line and fill colors
self.xpLineColor = cfg.get(sSurname, "xpath_LineColor")
self.xpLineWidth = cfg.get(sSurname, "xpath_LineWidth")
self.xpFillColor = cfg.get(sSurname, "xpath_FillColor")
self.xpFillStyle = cfg.get(sSurname, "xpath_FillStyle")
self.xpAttrToPageNumber = cfg.get(sSurname, "xpath_ToPageNumber")
def __str__(self):
s = "%s="%self.__class__
s += DecoBBXYWH.__str__(self)
return s
def isActionable(self):
return True
def draw(self, wxh, node):
"""draw itself using the wx handle
return a list of created WX objects"""
lo = DecoBBXYWH.draw(self, wxh, node)
x,y,w,h,inc = self.runXYWHI(node)
sLineColor = self.xpathToStr(node, self.xpLineColor, "#000000")
iLineWidth = self.xpathToInt(node, self.xpLineWidth, 1)
sFillColor = self.xpathToStr(node, self.xpFillColor, "#000000")
sFillStyle = self.xpathToStr(node, self.xpFillStyle, "Solid")
obj = wxh.AddRectangle((x, -y), (w, -h), LineWidth=iLineWidth,
LineColor=sLineColor,
FillColor=sFillColor,
FillStyle=sFillStyle)
lo = [obj] + lo
return lo
def act(self, obj, node):
"""
return the page number of the destination
or None on error
"""
index,x,y,w,h = None,None,None,None,None
sToPageNum = self.xpathToStr(node, self.xpAttrToPageNumber , None)
if sToPageNum:
index = int(sToPageNum) - 1
return index,x,y,w,h
| 2.828125 | 3 |
ex/ex7.py | bluethon/lpthw | 0 | 12797366 | <filename>ex/ex7.py
from sys import argv
script, first, second, third = argv
print("The script is called:", script)
print("Your first variable is:", first)
print("Your second variable is:", second)
print("Your third variable is:", third)
# $ python ex13.py first 2nd 3rd
# The script is called: ex13.py
# Your first variable is: first
# Your second variable is: 2nd
# Your third variable is: 3rd
| 3.09375 | 3 |
online/build/livereload/livereload/server.py | hiphopsmurf/bitcoin-secured | 1 | 12797367 | # -*- coding: utf-8 -*-
"""livereload.app
Core Server of LiveReload.
"""
import os
import logging
import time
import mimetypes
import webbrowser
from tornado import ioloop
from tornado import escape
from tornado import websocket
from tornado.web import RequestHandler, Application
from tornado.util import ObjectDict
from tornado.options import enable_pretty_logging
from livereload.task import Task
PORT = 35729
ROOT = '.'
LIVERELOAD = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'livereload.js',
)
class LiveReloadHandler(websocket.WebSocketHandler):
waiters = set()
_last_reload_time = None
def allow_draft76(self):
return True
def on_close(self):
if self in LiveReloadHandler.waiters:
LiveReloadHandler.waiters.remove(self)
def send_message(self, message):
if isinstance(message, dict):
message = escape.json_encode(message)
try:
self.write_message(message)
except:
logging.error('Error sending message', exc_info=True)
def watch_tasks(self):
changes = Task.watch()
if not changes:
return
if time.time() - self._last_reload_time < 3:
# if you changed lot of files in one time
# it will refresh too many times
logging.info('ignore this reload action')
return
logging.info('Reload %s waiters', len(self.waiters))
msg = {
'command': 'reload',
'path': '*',
'liveCSS': True
}
self._last_reload_time = time.time()
for waiter in LiveReloadHandler.waiters:
try:
waiter.write_message(msg)
except:
logging.error('Error sending message', exc_info=True)
LiveReloadHandler.waiters.remove(waiter)
def on_message(self, message):
"""Handshake with livereload.js
1. client send 'hello'
2. server reply 'hello'
3. client send 'info'
http://help.livereload.com/kb/ecosystem/livereload-protocol
"""
message = ObjectDict(escape.json_decode(message))
if message.command == 'hello':
handshake = {}
handshake['command'] = 'hello'
protocols = message.protocols
protocols.append(
'http://livereload.com/protocols/2.x-remote-control'
)
handshake['protocols'] = protocols
handshake['serverName'] = 'livereload-tornado'
self.send_message(handshake)
if message.command == 'info' and 'url' in message:
logging.info('Browser Connected: %s' % message.url)
LiveReloadHandler.waiters.add(self)
if not LiveReloadHandler._last_reload_time:
if os.path.exists('Guardfile'):
logging.info('Reading Guardfile')
execfile('Guardfile')
else:
logging.info('No Guardfile')
Task.add(os.getcwd())
LiveReloadHandler._last_reload_time = time.time()
logging.info('Start watching changes')
ioloop.PeriodicCallback(self.watch_tasks, 800).start()
class IndexHandler(RequestHandler):
def get(self, path='/'):
abspath = os.path.join(os.path.abspath(ROOT), path.lstrip('/'))
mime_type, encoding = mimetypes.guess_type(abspath)
if not mime_type:
mime_type = 'text/html'
self.mime_type = mime_type
self.set_header('Content-Type', mime_type)
self.read_path(abspath)
def inject_livereload(self):
if self.mime_type != 'text/html':
return
ua = self.request.headers.get('User-Agent', 'bot').lower()
if 'msie' not in ua:
self.write('<script src="/livereload.js"></script>')
def read_path(self, abspath):
filepath = abspath
if abspath.endswith('/'):
filepath = os.path.join(abspath, 'index.html')
if not os.path.exists(filepath):
self.create_index(abspath)
return
elif not os.path.exists(abspath):
filepath = abspath + '.html'
if os.path.exists(filepath):
for line in open(filepath):
if '</head>' in line:
self.inject_livereload()
self.write(line)
return
self.send_error(404)
return
def create_index(self, root):
self.inject_livereload()
files = os.listdir(root)
self.write('<ul>')
for f in files:
path = os.path.join(root, f)
self.write('<li>')
if os.path.isdir(path):
self.write('<a href="%s/">%s</a>' % (f, f))
else:
self.write('<a href="%s">%s</a>' % (f, f))
self.write('</li>')
self.write('</ul>')
class LiveReloadJSHandler(RequestHandler):
def get(self):
f = open(LIVERELOAD)
self.set_header('Content-Type', 'application/javascript')
for line in f:
if '{{port}}' in line:
line = line.replace('{{port}}', str(PORT))
self.write(line)
f.close()
handlers = [
(r'/livereload', LiveReloadHandler),
(r'/livereload.js', LiveReloadJSHandler),
(r'(.*)', IndexHandler),
]
def start(port=35729, root='.', autoraise=False):
global PORT
PORT = port
global ROOT
if root is None:
root = '.'
ROOT = root
logging.getLogger().setLevel(logging.INFO)
enable_pretty_logging()
app = Application(handlers=handlers)
app.listen(port)
print('Serving path %s on 127.0.0.1:%s' % (root, port))
if autoraise:
webbrowser.open(
'http://127.0.0.1:%s' % port, new=2, autoraise=True
)
ioloop.IOLoop.instance().start()
if __name__ == '__main__':
start(8000)
| 2.25 | 2 |
app/form.py | CalebF98/tbcc-moonkin-dps-simulator | 0 | 12797368 | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField
from wtforms.fields.simple import SubmitField
from wtforms.validators import DataRequired, NumberRange
class SimParamsForm(FlaskForm):
intellect = IntegerField('Intellect', [NumberRange(0,1000)])
spellpower = IntegerField('Spellpower', [NumberRange(0,1000)])
hit_score = IntegerField('Spell Hit Rating', [NumberRange(0,202)])
crit_score = IntegerField('Spell Crit Rating', [NumberRange(0,500)])
haste_score = IntegerField('Spell Haste Rating', [NumberRange(0,1000)])
num_fights = IntegerField('# of fights to simulate', [NumberRange(1,2500)]) | 2.5 | 2 |
nba_api/stats/endpoints/teamdetails.py | AlexEidt/nba_api | 1 | 12797369 | from nba_api.stats.endpoints._base import Endpoint
from nba_api.stats.library.http import NBAStatsHTTP
class TeamDetails(Endpoint):
endpoint = 'teamdetails'
expected_data = {'TeamAwardsChampionships': ['YEARAWARDED', 'OPPOSITETEAM'], 'TeamAwardsConf': ['YEARAWARDED', 'OPPOSITETEAM'], 'TeamAwardsDiv': ['YEARAWARDED', 'OPPOSITETEAM'], 'TeamBackground': ['TEAM_ID', 'ABBREVIATION', 'NICKNAME', 'YEARFOUNDED', 'CITY', 'ARENA', 'ARENACAPACITY', 'OWNER', 'GENERALMANAGER', 'HEADCOACH', 'DLEAGUEAFFILIATION'], 'TeamHistory': ['TEAM_ID', 'CITY', 'NICKNAME', 'YEARFOUNDED', 'YEARACTIVETILL'], 'TeamHof': ['PLAYERID', 'PLAYER', 'POSITION', 'JERSEY', 'SEASONSWITHTEAM', 'YEAR'], 'TeamRetired': ['PLAYERID', 'PLAYER', 'POSITION', 'JERSEY', 'SEASONSWITHTEAM', 'YEAR'], 'TeamSocialSites': ['ACCOUNTTYPE', 'WEBSITE_LINK']}
nba_response = None
data_sets = None
player_stats = None
team_stats = None
headers = None
def __init__(self,
team_id,
proxy=None,
headers=None,
timeout=30,
get_request=True):
self.proxy = proxy
if headers is not None:
self.headers = headers
self.timeout = timeout
self.parameters = {
'TeamID': team_id
}
if get_request:
self.get_request()
def get_request(self):
self.nba_response = NBAStatsHTTP().send_api_request(
endpoint=self.endpoint,
parameters=self.parameters,
proxy=self.proxy,
headers=self.headers,
timeout=self.timeout,
)
self.load_response()
def load_response(self):
data_sets = self.nba_response.get_data_sets()
self.data_sets = [Endpoint.DataSet(data=data_set) for data_set_name, data_set in data_sets.items()]
self.team_awards_championships = Endpoint.DataSet(data=data_sets['TeamAwardsChampionships'])
self.team_awards_conf = Endpoint.DataSet(data=data_sets['TeamAwardsConf'])
self.team_awards_div = Endpoint.DataSet(data=data_sets['TeamAwardsDiv'])
self.team_background = Endpoint.DataSet(data=data_sets['TeamBackground'])
self.team_history = Endpoint.DataSet(data=data_sets['TeamHistory'])
self.team_hof = Endpoint.DataSet(data=data_sets['TeamHof'])
self.team_retired = Endpoint.DataSet(data=data_sets['TeamRetired'])
self.team_social_sites = Endpoint.DataSet(data=data_sets['TeamSocialSites'])
| 2.5625 | 3 |
seating_charts/migrations/0006_auto_20160203_1332.py | rectory-school/rectory-apps | 0 | 12797370 | <reponame>rectory-school/rectory-apps
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seating_charts', '0005_auto_20160203_1110'),
]
operations = [
migrations.AlterModelOptions(
name='tableassignment',
options={'permissions': (('view_table_assignments', 'Can view table assignments'), ('edit_table_assignments', 'Can edit table assignments'))},
),
]
| 1.46875 | 1 |
ch4/4_3_list_of_depths.py | xuanyuwang/ctci | 0 | 12797371 | import unittest
class node():
def __init__(self, value=None):
self.value = value
self.left = None
self.right = None
def solution(root):
res = []
queue = []
queue.append(root)
while queue:
numberOfNodesInThisLevel = len(queue)
level = [queue.pop() for _ in range(numberOfNodesInThisLevel)]
res.append(level)
for n in level:
if n.left:
queue.append(n.left)
if n.right:
queue.append(n.right)
for level in res:
print(*[n.value for n in level])
return res
class Test(unittest.TestCase):
tree_1 = node(1)
tree_1.left = node(2)
tree_1.right = node(3)
tree_1.left.left = node(4)
tree_1.left.right = node(5)
tree_1.right.left = node(6)
tree_1.right.right = node(7)
def testTree1(self):
solution(self.tree_1)
if __name__ == "__main__":
unittest.main()
| 3.984375 | 4 |
classes/dice_rolling/shadowrun_rolling.py | ephreal/rollbot | 2 | 12797372 | # -*- coding: utf-8 -*-
"""
This software is licensed under the License (MIT) located at
https://github.com/ephreal/rollbot/Licence
Please see the license for any restrictions or rights granted to you by the
License.
"""
from utils.rolling import rolling_utils
class Shadowrun3Roller():
"""
The shadowrun roller for shadowrun 1E games.
class methods:
check_successes(target: int, rolls: list[int])
-> dict(successes: int, rolls: list[int], failure: bool)
Checks how many integers in the rolls list exceed the target int.
Returns a dictionary with the amount of successes and the integers
that exceeded the target and whether or not the roll is a failure
is_failure(rolls: list[int]) -> bool
Checks to see if the roll is a failure, which is all 1's by
shadowrun 1E rules. Returns True if the roll is a failure.
roll(dice_pool: int) -> list[int]
Rolls and counts the dice according to shadowrun 1E rules. Does
no checks for failures or successes. Returns a list of integers
representing the totals.
roll_initiative(dice_pool: int, modifier: int) -> initiative: int
Rolls initiative dice and adds in reaction to give the initiative
score.
"""
def __init__(self):
pass
async def check_successes(self, target, rolls):
"""
Checks the rolls to see if any of the rolls are successes
target : int
roll : list[int]
-> dict{successes: int, rolls[int], failure: Bool}
"""
rolls = [roll for roll in rolls if roll >= target]
successes = {"successes": len(rolls),
"rolls": rolls
}
if await self.is_failure(rolls):
successes["failure"] = True
else:
successes["failure"] = False
return successes
async def is_failure(self, rolls):
"""
Checks to see if the roll is a failure. This is only the case if all
items in the roll are a 1.
rolls : list[int]
-> bool
"""
ones = [x for x in rolls if x == 1]
if len(ones) == len(rolls):
return True
return False
async def roll(self, dice_pool):
"""
Rolls and counts the dice according to shadowrun 1E rules. This does
no checking for successes.
dice_pool : int
-> list[int]
"""
rolls = await rolling_utils.roll(dice_pool)
if 6 in rolls:
# Get the sixes and remove them from the original list.
sixes = [x for x in rolls if x == 6]
rolls = [x for x in rolls if x != 6]
added = await self.roll(len(sixes))
sixes = [sixes[i] + added[i] for i in range(0, len(sixes))]
rolls.extend(sixes)
return rolls
async def roll_initiative(self, dice_pool=1, modifier=1):
"""
Rolls initiative dice and adds reaction in.
dice_pool: int
reaction: int
-> int
"""
# Adding 6's does not apply to initiative. Therefore use the general
# roller.
initiative_roll = await rolling_utils.roll(dice_pool)
for i in initiative_roll:
modifier += i
return initiative_roll, modifier
class Shadowrun5Roller():
"""
TODO: Add in glitch counting.
The shadowrun roller is my handler for all shadowrun 5E related rolling
functions. Types of rolls that are completed inlcude
general rolling and hit counting
- Adding in additional dice with +
- removing dice with -
class variables:
roller (base_roll_functions.roller()):
A roller class that handles the actual dice rolling.
class methods:
buy_hits(dice_pool: int) -> hits: int
"buys" hits at a 1 hit : 4 dice ratio. Rounds down.
SR5E CORE pg. 45
count_hits(rolls: list[int], prime: Boolean) -> {hits, misses, ones}
Creates the amount of hits, misses, and ones in the rolls. If the
roll is designated for a prime runner, it lowers the hit threshold
by 1.
SR5E CORE pg. 44
extended_test(dice_pool: int, threshold: int, prime: boolean)
-> {success: bool, rolls: list[int], totals {total_hits: int,
running_total: list[int]}}
Runs extended tests by shadowrun 5E rules. Stops as soon as
the test has been completed rather than running through all
iterations if not needed.
SR5E CORE pg. 48
is_glitch(rolls: list[int], hits: int)
-> {glitch: bool, type: str or None}
Checks whether or not a roll is a glitch.
SR5E CORE pg. 45-46
roll(dice_pool: int, exploding: Boolean) -> list[int]:
A dice roller that handles basic dice rolling. This allows for
exploding 6's with exploding=True
SR5E CORE pg. 44
SR5E CORE pg. 56 (Edge effects)
roll_initiative(dice_pool: int, modifier: int) -> initiative: int
Rolls initiative for shadowrun 5E.
SR5E CORE pg. 159
"""
def __init__(self):
pass
async def buy_hits(self, dice_pool=0):
"""
"buys" hits at a 1 hit : 4 dice ration. Rounds down.
dice_pool: int
-> int
"""
return dice_pool // 4
async def count_hits(self, rolls, prime=False):
"""
Counts the amount of hits, misses, and ones in a list of integers.
rolls: list[int]
-> {hits, misses, ones}
"""
hit_limit = 5
# Lower the hit threshold if rolling for a prime runner
if prime:
hit_limit = 4
hits, misses, ones = 0, 0, 0
for i in rolls:
if i >= hit_limit:
hits += 1
elif i > 1:
misses += 1
else:
ones += 1
return {"hits": hits, "misses": misses, "ones": ones}
async def extended_test(self, dice_pool, threshold, prime=False,
exploding=False):
"""
Runs an extended test with a dice pool to see if it is possible to
reach a threshold. Prime will lower the threshold when counting hits
if it is True. Returns a dict with a boolean representing success
status and a list of int lists representing the rolls.
dice_pool: int
threshold: int
prime: bool
exploding: bool
-> {success, rolls, totals {total_hits, running_total}}
"""
rolls = []
totals = []
success = False
total_hits = 0
while dice_pool > 0:
roll = await self.roll(dice_pool, exploding=exploding)
if prime:
counted = await self.count_hits(roll, prime=True)
else:
counted = await self.count_hits(roll)
total_hits += counted["hits"]
totals.append(total_hits)
rolls.append({"hits": counted["hits"], "roll": roll})
dice_pool -= 1
if total_hits >= threshold:
success = True
break
return {"success": success, "rolls": rolls, "totals": {
"total_hits": total_hits,
"running_total": totals}}
async def is_glitch(self, rolls, hits):
"""
Checks whether or not a roll is a glitch.
rolls: list[int]
hits: int
-> dict{glitch: bool, type: str or None}
"""
glitch = False
glitch_type = None
ones = [x for x in rolls if x == 1]
if len(ones) > (len(rolls) // 2) and not hits:
glitch = True
glitch_type = "critical"
elif len(ones) > (len(rolls) // 2) and hits:
glitch = True
glitch_type = "normal"
return {"glitch": glitch, "type": glitch_type}
async def roll(self, dice_pool, exploding=False):
"""
A dice roller that handles basic dice rolling. This allows for
exploding 6's with exploding=True
dice_pool: int
exploding: Boolean
-> list[int]
"""
rolls = await rolling_utils.roll(dice_pool=dice_pool, sides=6)
if exploding:
sixes = [x for x in rolls if x == 6]
rolls.extend(await self.roll(len(sixes)))
rolls.sort()
return rolls
rolls.sort()
return rolls
async def roll_initiative(self, dice_pool, modifier=0):
"""
Rolls initiative for shadowrun 5E.
dice_pool: int
modifier: int
-> initiative: int
"""
initiative_roll = await self.roll(dice_pool)
for i in initiative_roll:
modifier += i
return initiative_roll, modifier
| 3.234375 | 3 |
Emulator/__main__.py | samedamci/7seg-Emulator | 0 | 12797373 | <gh_stars>0
#!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from Emulator.emulator import main
main()
| 1.578125 | 2 |
sunspec2/smdx.py | mptei/pysunspec2 | 26 | 12797374 |
"""
Copyright (C) 2020 SunSpec Alliance
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import os
import xml.etree.ElementTree as ET
import sunspec2.mdef as mdef
SMDX_ROOT = 'sunSpecModels'
SMDX_MODEL = mdef.MODEL
SMDX_BLOCK = 'block'
SMDX_POINT = 'point'
SMDX_ATTR_VERS = 'v'
SMDX_ATTR_ID = 'id'
SMDX_ATTR_LEN = 'len'
SMDX_ATTR_NAME = mdef.NAME
SMDX_ATTR_TYPE = mdef.TYPE
SMDX_ATTR_COUNT = mdef.COUNT
SMDX_ATTR_VALUE = mdef.VALUE
SMDX_ATTR_TYPE_FIXED = 'fixed'
SMDX_ATTR_TYPE_REPEATING = 'repeating'
SMDX_ATTR_OFFSET = 'offset'
SMDX_ATTR_MANDATORY = mdef.MANDATORY
SMDX_ATTR_ACCESS = mdef.ACCESS
SMDX_ATTR_SF = mdef.SF
SMDX_ATTR_UNITS = mdef.UNITS
SMDX_SYMBOL = 'symbol'
SMDX_COMMENT = 'comment'
SMDX_STRINGS = 'strings'
SMDX_ATTR_LOCALE = 'locale'
SMDX_LABEL = mdef.LABEL
SMDX_DESCRIPTION = 'description'
SMDX_NOTES = 'notes'
SMDX_DETAIL = mdef.DETAIL
SMDX_TYPE_INT16 = mdef.TYPE_INT16
SMDX_TYPE_UINT16 = mdef.TYPE_UINT16
SMDX_TYPE_COUNT = mdef.TYPE_COUNT
SMDX_TYPE_ACC16 = mdef.TYPE_ACC16
SMDX_TYPE_ENUM16 = mdef.TYPE_ENUM16
SMDX_TYPE_BITFIELD16 = mdef.TYPE_BITFIELD16
SMDX_TYPE_PAD = mdef.TYPE_PAD
SMDX_TYPE_INT32 = mdef.TYPE_INT32
SMDX_TYPE_UINT32 = mdef.TYPE_UINT32
SMDX_TYPE_ACC32 = mdef.TYPE_ACC32
SMDX_TYPE_ENUM32 = mdef.TYPE_ENUM32
SMDX_TYPE_BITFIELD32 = mdef.TYPE_BITFIELD32
SMDX_TYPE_IPADDR = mdef.TYPE_IPADDR
SMDX_TYPE_INT64 = mdef.TYPE_INT64
SMDX_TYPE_UINT64 = mdef.TYPE_UINT64
SMDX_TYPE_ACC64 = mdef.TYPE_ACC64
SMDX_TYPE_IPV6ADDR = mdef.TYPE_IPV6ADDR
SMDX_TYPE_FLOAT32 = mdef.TYPE_FLOAT32
SMDX_TYPE_STRING = mdef.TYPE_STRING
SMDX_TYPE_SUNSSF = mdef.TYPE_SUNSSF
SMDX_TYPE_EUI48 = mdef.TYPE_EUI48
SMDX_ACCESS_R = 'r'
SMDX_ACCESS_RW = 'rw'
SMDX_MANDATORY_FALSE = 'false'
SMDX_MANDATORY_TRUE = 'true'
smdx_access_types = {SMDX_ACCESS_R: mdef.ACCESS_R, SMDX_ACCESS_RW: mdef.ACCESS_RW}
smdx_mandatory_types = {SMDX_MANDATORY_FALSE: mdef.MANDATORY_FALSE, SMDX_MANDATORY_TRUE: mdef.MANDATORY_TRUE}
smdx_type_types = [
SMDX_TYPE_INT16,
SMDX_TYPE_UINT16,
SMDX_TYPE_COUNT,
SMDX_TYPE_ACC16,
SMDX_TYPE_ENUM16,
SMDX_TYPE_BITFIELD16,
SMDX_TYPE_PAD,
SMDX_TYPE_INT32,
SMDX_TYPE_UINT32,
SMDX_TYPE_ACC32,
SMDX_TYPE_ENUM32,
SMDX_TYPE_BITFIELD32,
SMDX_TYPE_IPADDR,
SMDX_TYPE_INT64,
SMDX_TYPE_UINT64,
SMDX_TYPE_ACC64,
SMDX_TYPE_IPV6ADDR,
SMDX_TYPE_FLOAT32,
SMDX_TYPE_STRING,
SMDX_TYPE_SUNSSF,
SMDX_TYPE_EUI48
]
SMDX_PREFIX = 'smdx_'
SMDX_EXT = '.xml'
def to_smdx_filename(model_id):
return '%s%05d%s' % (SMDX_PREFIX, int(model_id), SMDX_EXT)
def model_filename_to_id(filename):
f = filename
if '.' in f:
f = os.path.splitext(f)[0]
try:
mid = int(f.rsplit('_', 1)[1])
except ValueError:
raise mdef.ModelDefinitionError('Error extracting model id from filename')
return mid
'''
smdx to json mapping:
fixed block -> top level group
model 'name' attribute -> group 'name'
ID point is created for model ID and 'value' is the model ID value as a number
L point is created for model len - model len has no value specified in the model definition
fixed block points are placed in top level group
repeating block -> group with count = 0 (indicates model len shoud be used to determine number of groups)
repeating block 'name' -> group 'name', if no 'name' is defined 'name' = 'repeating'
points:
all type, access, and mandatory attributes are preserved
point symbol map to the symbol object and placed in the symbols list for the point
symbol 'name' attribute -> symbol object 'name'
symbol element content -> symbol object 'value'
strings 'label', 'description', 'notes' elements map to point attributes 'label', 'desc', 'detail'
'''
def from_smdx_file(filename):
tree = ET.parse(filename)
root = tree.getroot()
return(from_smdx(root))
def from_smdx(element):
""" Sets the model type attributes based on an element tree model type
element contained in an SMDX model definition.
Parameters:
element :
Element Tree model type element.
"""
model_def = {}
m = element.find(SMDX_MODEL)
if m is None:
raise mdef.ModelDefinitionError('Model definition not found')
try:
mid = mdef.to_number_type(m.attrib.get(SMDX_ATTR_ID))
except ValueError:
raise mdef.ModelDefinitionError('Invalid model id: %s' % m.attrib.get(SMDX_ATTR_ID))
name = m.attrib.get(SMDX_ATTR_NAME)
if name is None:
name = 'model_' + str(mid)
model_def[mdef.NAME] = name
strings = element.find(SMDX_STRINGS)
# create top level group with ID and L points
fixed_def = {mdef.NAME: name,
mdef.TYPE: mdef.TYPE_GROUP,
mdef.POINTS: [
{mdef.NAME: 'ID', mdef.VALUE: mid,
mdef.DESCRIPTION: 'Model identifier', mdef.LABEL: 'Model ID', mdef.SIZE: 1,
mdef.MANDATORY: mdef.MANDATORY_TRUE, mdef.STATIC: mdef.STATIC_TRUE, mdef.TYPE: mdef.TYPE_UINT16},
{mdef.NAME: 'L',
mdef.DESCRIPTION: 'Model length', mdef.LABEL: 'Model Length', mdef.SIZE: 1,
mdef.MANDATORY: mdef.MANDATORY_TRUE, mdef.STATIC: mdef.STATIC_TRUE, mdef.TYPE: mdef.TYPE_UINT16}
]
}
repeating_def = None
fixed = None
repeating = None
for b in m.findall(SMDX_BLOCK):
btype = b.attrib.get(SMDX_ATTR_TYPE, SMDX_ATTR_TYPE_FIXED)
if btype == SMDX_ATTR_TYPE_FIXED:
if fixed is not None:
raise mdef.ModelDefinitionError('Duplicate fixed block type definition')
fixed = b
elif btype == SMDX_ATTR_TYPE_REPEATING:
if repeating is not None:
raise mdef.ModelDefinitionError('Duplicate repeating block type definition')
repeating = b
else:
raise mdef.ModelDefinitionError('Invalid block type: %s' % btype)
fixed_points_map = {}
if fixed is not None:
points = []
for e in fixed.findall(SMDX_POINT):
point_def = from_smdx_point(e)
if point_def[mdef.NAME] not in fixed_points_map:
fixed_points_map[point_def[mdef.NAME]] = point_def
points.append(point_def)
else:
raise mdef.ModelDefinitionError('Duplicate point definition: %s' % point_def[mdef.NAME])
if points:
fixed_def[mdef.POINTS].extend(points)
repeating_points_map = {}
if repeating is not None:
name = repeating.attrib.get(SMDX_ATTR_NAME)
if name is None:
name = 'repeating'
repeating_def = {mdef.NAME: name, mdef.TYPE: mdef.TYPE_GROUP, mdef.COUNT: 0}
points = []
for e in repeating.findall(SMDX_POINT):
point_def = from_smdx_point(e)
if point_def[mdef.NAME] not in repeating_points_map:
repeating_points_map[point_def[mdef.NAME]] = point_def
points.append(point_def)
else:
raise mdef.ModelDefinitionError('Duplicate point definition: %s' % point_def[mdef.NAME])
if points:
repeating_def[mdef.POINTS] = points
fixed_def[mdef.GROUPS] = [repeating_def]
e = element.find(SMDX_STRINGS)
if e.attrib.get(SMDX_ATTR_ID) == str(mid):
m = e.find(SMDX_MODEL)
if m is not None:
for a in m.findall('*'):
if a.tag == SMDX_LABEL and a.text:
fixed_def[mdef.LABEL] = a.text
elif a.tag == SMDX_DESCRIPTION and a.text:
fixed_def[mdef.DESCRIPTION] = a.text
elif a.tag == SMDX_NOTES and a.text:
fixed_def[mdef.DETAIL] = a.text
for p in e.findall(SMDX_POINT):
pid = p.attrib.get(SMDX_ATTR_ID)
label = desc = notes = None
for a in p.findall('*'):
if a.tag == SMDX_LABEL and a.text:
label = a.text
elif a.tag == SMDX_DESCRIPTION and a.text:
desc = a.text
elif a.tag == SMDX_NOTES and a.text:
notes = a.text
point_def = fixed_points_map.get(pid)
if point_def is not None:
if label:
point_def[mdef.LABEL] = label
if desc:
point_def[mdef.DESCRIPTION] = desc
if notes:
point_def[mdef.DETAIL] = notes
point_def = repeating_points_map.get(pid)
if point_def is not None:
if label:
point_def[mdef.LABEL] = label
if desc:
point_def[mdef.DESCRIPTION] = desc
if notes:
point_def[mdef.DETAIL] = notes
model_def = {'id': mid, 'group': fixed_def}
return model_def
def from_smdx_point(element):
""" Sets the point attributes based on an element tree point element
contained in an SMDX model definition.
Parameters:
element :
Element Tree point type element.
strings :
Indicates if *element* is a subelement of the 'strings'
definintion within the model definition.
"""
point_def = {}
pid = element.attrib.get(SMDX_ATTR_ID)
if pid is None:
raise mdef.ModelDefinitionError('Missing point id attribute')
point_def[mdef.NAME] = pid
ptype = element.attrib.get(SMDX_ATTR_TYPE)
if ptype is None:
raise mdef.ModelDefinitionError('Missing type attribute for point: %s' % pid)
elif ptype not in smdx_type_types:
raise mdef.ModelDefinitionError('Unknown point type %s for point %s' % (ptype, pid))
point_def[mdef.TYPE] = ptype
plen = mdef.to_number_type(element.attrib.get(SMDX_ATTR_LEN))
if ptype == SMDX_TYPE_STRING:
if plen is None:
raise mdef.ModelDefinitionError('Missing len attribute for point: %s' % pid)
point_def[mdef.SIZE] = plen
else:
point_def[mdef.SIZE] = mdef.point_type_info.get(ptype)['len']
mandatory = element.attrib.get(SMDX_ATTR_MANDATORY, SMDX_MANDATORY_FALSE)
if mandatory not in smdx_mandatory_types:
raise mdef.ModelDefinitionError('Unknown mandatory type: %s' % mandatory)
if mandatory == SMDX_MANDATORY_TRUE:
point_def[mdef.MANDATORY] = smdx_mandatory_types.get(mandatory)
access = element.attrib.get(SMDX_ATTR_ACCESS, SMDX_ACCESS_R)
if access not in smdx_access_types:
raise mdef.ModelDefinitionError('Unknown access type: %s' % access)
if access == SMDX_ACCESS_RW:
point_def[mdef.ACCESS] = smdx_access_types.get(access)
units = element.attrib.get(SMDX_ATTR_UNITS)
if units:
point_def[mdef.UNITS] = units
# if scale factor is an number, convert to correct type
sf = mdef.to_number_type(element.attrib.get(SMDX_ATTR_SF))
if sf is not None:
point_def[mdef.SF] = sf
# if scale factor is an number, convert to correct type
value = mdef.to_number_type(element.attrib.get(SMDX_ATTR_VALUE))
if value is not None:
point_def[mdef.VALUE] = value
symbols = []
for e in element.findall('*'):
if e.tag == SMDX_SYMBOL:
sid = e.attrib.get(SMDX_ATTR_ID)
value = e.text
try:
value = int(value)
except ValueError:
pass
symbols.append({mdef.NAME: sid, mdef.VALUE: value})
if symbols:
point_def[mdef.SYMBOLS] = symbols
return point_def
def indent(elem, level=0):
i = os.linesep + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
| 1.273438 | 1 |
src/mmw/apps/home/views.py | mmcfarland/model-my-watershed | 1 | 12797375 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
from django.template.context_processors import csrf
from rest_framework import serializers, viewsets
# Serializers define the API representation.
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'is_staff')
# ViewSets define the view behavior.
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
def home_page(request):
csrf_token = {}
csrf_token.update(csrf(request))
return render_to_response('home/home.html', csrf_token)
def compare(request):
return render_to_response('home/compare.html')
| 2.09375 | 2 |
pspy/pspy_utils.py | xgarrido/pspy | 6 | 12797376 | <reponame>xgarrido/pspy
"""
Utils for pspy.
"""
import os
import numpy as np
def ps_lensed_theory_to_dict(filename, output_type, lmax=None, start_at_zero=False):
"""Read a lensed power spectrum from CAMB and return a dictionnary
Parameters
----------
filename : string
the name of the CAMB lensed power spectrum you want to read
lmax : integer
the maximum multipole (spectra will be cut at)
output_type : string
'Cl' or 'Dl'
start_at_zero : boolean
if True, ps start at l=0 and cl(l=0) and cl(l=1) are set to 0
"""
fields = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]
ps = {}
l, ps["TT"], ps["EE"], ps["BB"], ps["TE"] = np.loadtxt(filename, unpack=True)
ps["ET"] = ps["TE"].copy()
ps["TB"], ps["BT"], ps["EB"], ps["BE"] = np.zeros((4, len(l)))
if lmax is not None:
l = l[:lmax]
scale = l * (l + 1) / (2 * np.pi)
for f in fields:
if lmax is not None:
ps[f] = ps[f][:lmax]
if output_type == "Cl":
ps[f] /= scale
if start_at_zero:
ps[f] = np.append(np.array([0, 0]), ps[f])
if start_at_zero:
l = np.append(np.array([0, 1]), l)
return l, ps
def ps_from_params(cosmo_params, output_type, lmax, start_at_zero=False):
"""Given a set of cosmological parameters compute the corresponding lensed power spectrum
You need to have camb installed to use this function
----------
cosmo_params: dict
dictionnary of cosmological parameters
# e.g cosmo_params = {"cosmomc_theta":0.0104085, "logA": 3.044, "ombh2": 0.02237, "omch2": 0.1200, "ns": 0.9649, "Alens": 1.0, "tau": 0.0544}
output_type : string
'Cl' or 'Dl'
lmax: integer
the maximum multipole to consider
start_at_zero : boolean
if True, ps start at l=0 and cl(l=0) and cl(l=1) are set to 0
else, start at l=2
"""
try:
import camb
except ModuleNotFoundError:
raise ModuleNotFoundError("you need to install camb to use this function")
if start_at_zero:
lmin = 0
else:
lmin = 2
camb_cosmo = {k: v for k, v in cosmo_params.items() if k not in ["logA", "As"]}
camb_cosmo.update({"As": 1e-10*np.exp(cosmo_params["logA"]), "lmax": lmax, "lens_potential_accuracy": 1})
pars = camb.set_params(**camb_cosmo)
results = camb.get_results(pars)
powers = results.get_cmb_power_spectra(pars, CMB_unit="muK")
l = np.arange(lmin, lmax)
ps = {spec: powers["total"][l][:, count] for count, spec in enumerate(["TT", "EE", "BB", "TE" ])}
ps["ET"] = ps["TE"]
for spec in ["TB", "BT", "EB", "BE" ]:
ps[spec] = ps["TT"] * 0
scale = l * (l + 1) / (2 * np.pi)
if output_type == "Cl":
if start_at_zero:
ps[2:] /= scale[2:]
else:
ps[:] /= scale[:]
return l, ps
def get_nlth_dict(rms_uKarcmin_T, type, lmax, spectra=None, rms_uKarcmin_pol=None, beamfile=None):
"""Return the effective noise power spectrum Nl/bl^2 given a beam file and a noise rms
Parameters
----------
rms_uKarcmin_T: float
the temperature noise rms in uK.arcmin
type: string
'Cl' or 'Dl'
lmax: integer
the maximum multipole to consider
spectra: list of strings
needed for spin0 and spin2 cross correlation, the arrangement of the spectra
rms_uKarcmin_pol: float
the polarisation noise rms in uK.arcmin
beamfile: string
the name of the beam transfer function (assuming it's given as a two column file l,bl)
"""
if beamfile is not None:
l, bl = np.loadtxt(beamfile, unpack=True)
else:
bl = np.ones(lmax + 2)
lth = np.arange(2, lmax + 2)
nl_th = {}
if spectra is None:
nl_th["TT"] = (
np.ones(lmax) * (rms_uKarcmin_T * np.pi / (60 * 180)) ** 2 / bl[2 : lmax + 2] ** 2
)
if type == "Dl":
nl_th["TT"] *= lth * (lth + 1) / (2 * np.pi)
return nl_th
else:
if rms_uKarcmin_pol is None:
rms_uKarcmin_pol = rms_uKarcmin_T * np.sqrt(2)
for spec in spectra:
nl_th[spec] = np.zeros(lmax)
nl_th["TT"] = np.ones(lmax) * (rms_uKarcmin_T * np.pi / (60 * 180)) ** 2 / bl[2 :lmax + 2] ** 2
nl_th["EE"] = np.ones(lmax) * (rms_uKarcmin_pol * np.pi / (60 * 180)) ** 2 / bl[2 :lmax + 2] ** 2
nl_th["BB"] = np.ones(lmax) * (rms_uKarcmin_pol * np.pi / (60 * 180)) ** 2 / bl[2 :lmax + 2] ** 2
if type == "Dl":
for spec in spectra:
nl_th[spec] *= lth * (lth + 1) / (2 * np.pi)
return nl_th
def read_beam_file(beamfile, lmax=None):
"""Read beam file with formal, l, bl, stuff and normalize it
Parameters
__________
beamfile: string
the name of the beam file
lmax: integer
the maximum multipole to consider
"""
beam = np.loadtxt(beamfile)
l, bl = beam[:, 0], beam[:, 1]
if lmax is not None:
l, bl = l[:lmax], bl[:lmax]
return l, bl / bl[0]
def create_binning_file(bin_size, n_bins, lmax=None, file_name=None):
"""Create a (constant) binning file, and optionnaly write it to disk
Parameters
----------
bin_size: float
the size of the bins
n_bins: integer
the number of bins
lmax: integer
the maximum multipole to consider
file_name: string
the name of the binning file
"""
bins = np.arange(n_bins)
bin_low = bins * bin_size + 2
bin_hi = (bins + 1) * bin_size + 1
bin_cent = (bin_low + bin_hi) / 2
if lmax is not None:
id = np.where(bin_hi < lmax)
bin_low, bin_hi, bin_cent = bin_low[id], bin_hi[id], bin_cent[id]
if file_name is None:
return bin_low, bin_hi, bin_cent
else:
f = open("%s" % file_name, mode="w")
for i in range(len(bin_low)):
f.write("%0.2f %0.2f %0.2f\n" % (bin_low[i], bin_hi[i], bin_cent[i]))
f.close()
def read_binning_file(file_name, lmax):
"""Read a binningFile and truncate it to lmax, if bin_low lower than 2, set it to 2.
format is bin_low, bin_high, bin_mean
Parameters
----------
binningfile: string
the name of the binning file
lmax: integer
the maximum multipole to consider
"""
bin_low, bin_hi, bin_cent = np.loadtxt(file_name, unpack=True)
id = np.where(bin_hi < lmax)
bin_low, bin_hi, bin_cent = bin_low[id], bin_hi[id], bin_cent[id]
if bin_low[0] < 2:
bin_low[0] = 2
bin_hi = bin_hi.astype(int)
bin_low = bin_low.astype(int)
bin_size = bin_hi - bin_low + 1
return bin_low, bin_hi, bin_cent, bin_size
def create_directory(name):
"""Create a directory
Parameters
----------
name: string
the name of the directory
"""
os.makedirs(name, exist_ok=True)
def naive_binning(l, fl, binning_file, lmax):
"""Bin a function of l given a binning file and lmax
Parameters
----------
l: 1d integer array
the multipoles
fl: 1d float array
the 1-dimensional function to bin
binning_file: string
the name of the binning file
lmax: integer
the maximum multipole to consider
"""
bin_low, bin_hi, bin_cent, bin_size = read_binning_file(binning_file, lmax)
n_bins = len(bin_hi)
fl_bin = np.zeros(len(bin_cent))
for ibin in range(n_bins):
loc = np.where((l >= bin_low[ibin]) & (l <= bin_hi[ibin]))
fl_bin[ibin] = (fl[loc]).mean()
return bin_cent, fl_bin
def beam_from_fwhm(fwhm_arcminute, lmax):
"""Compute the harmonic transform of the beam
given the beam full width half maximum in arcminute
Parameters
----------
fwhm_arcminute: float
full width half maximum in arcminute
lmax: integer
the maximum multipole to consider
"""
beam_fwhm_rad = np.deg2rad(fwhm_arcminute) / 60
fac = beam_fwhm_rad / np.sqrt(8 * np.log(2))
ell = np.arange(2, lmax)
bl = np.exp(-ell * (ell + 1) * fac ** 2 / 2.0)
return ell, bl
| 2.546875 | 3 |
DQN/Network.py | hojunkim13/master2048 | 0 | 12797377 | <gh_stars>0
import torch
import torch.nn as nn
class DQNNetwork(nn.Module):
def __init__(self, n_state, n_action):
super(DQNNetwork, self).__init__()
self.ConvNet = nn.Sequential(
nn.Conv2d(n_state[0], 64, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(64, 256, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(256, 512, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(512, 256, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(256, n_action, 4, 1, 0),
nn.Flatten(),
)
self.ConvNet.apply(init_weights)
self.cuda()
def forward(self, state):
x = self.ConvNet(state)
return x
def init_weights(m):
if type(m) in (nn.Linear, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01) | 2.421875 | 2 |
dynamorm/__init__.py | borgstrom/dynomallow | 81 | 12797378 | """The base module namespace simply imports the most frequently used objects to simplify imports in clients:
.. code-block:: python
from dynamorm import DynaModel
"""
from .model import DynaModel # noqa
from .indexes import (
GlobalIndex,
LocalIndex,
ProjectAll,
ProjectKeys,
ProjectInclude,
) # noqa
from .relationships import ManyToOne, OneToMany, OneToOne # noqa
from .table import Q # noqa
| 1.648438 | 2 |
my9221.py | mcauser/micropython-my9221 | 4 | 12797379 | """
MicroPython MY9221 LED driver
https://github.com/mcauser/micropython-my9221
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from time import sleep_ms
from machine import Pin
class MY9221:
def __init__(self, di, dcki, reverse=False):
self._d = di
self._c = dcki
self._r = reverse
self._d.init(Pin.OUT, value=0)
self._c.init(Pin.OUT, value=0)
def _latch(self):
self._d(0)
sleep_ms(1)
for i in range(4):
self._d(1)
self._d(0)
sleep_ms(1)
def _write16(self, data):
for i in range(15,-1,-1):
self._d((data >> i) & 1)
state = self._c()
self._c(not state)
def _begin(self):
self._write16(0) # command: 8bit mode
def _end(self):
# unused last 2 channels are required to fill the 208 bit shift register
self._write16(0)
self._write16(0)
self._latch()
def reverse(self, val=None):
if val is None:
return self._r
self._r = val
def level(self, val, brightness=255):
self._begin()
for i in range(9,-1,-1) if self._r else range(10):
self._write16(brightness if val > i else 0)
self._end()
def bits(self, val, brightness=255):
val &= 0x3FF
self._begin()
for i in range(9,-1,-1) if self._r else range(10):
self._write16(brightness if (val >> i) & 1 else 0)
self._end()
def bytes(self, buf):
self._begin()
for i in range(9,-1,-1) if self._r else range(10):
self._write16(buf[i])
self._end()
| 2.640625 | 3 |
procasl/_utils.py | salma1601/process-asl-old | 1 | 12797380 | <reponame>salma1601/process-asl-old
import os
import glob
import warnings
import numpy as np
import nibabel
def _single_glob(pattern):
filenames = glob.glob(pattern)
if not filenames:
print('Warning: non exitant file with pattern {}'.format(pattern))
return None
if len(filenames) > 1:
raise ValueError('Non unique file with pattern {}'.format(pattern))
return filenames[0]
def _list_to_4d(input_files):
"""Form a 4D data from a list of 3d images.
"""
data = []
for f in input_files:
image = nibabel.load(f)
data.append(image.get_data())
data = np.array(data)
data = np.transpose(data, (1, 2, 3, 0))
def check_images(file1, file2):
"""Check that 2 images have the same affines and data shapes.
"""
img = nibabel.load(file1)
shape1 = np.shape(img.get_data())
affine1 = img.get_affine()
img = nibabel.load(file2)
shape2 = np.shape(img.get_data())
affine2 = img.get_affine()
if shape1 != shape2:
raise ValueError('{0} of shape {1}, {2} of shape {3}'.format(
file1, shape1, file2, shape2))
if np.any(affine1 != affine2):
raise ValueError('affine for {0}: {1}, for {2}: {3}'
.format(file1, affine1, file2, affine2))
def get_vox_dims(in_file):
if isinstance(in_file, list):
in_file = in_file[0]
img = nibabel.load(in_file)
header = img.get_header()
voxdims = header.get_zooms()
return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])]
def threshold(in_file, threshold_min=-1e7, threshold_max=1e7):
img = nibabel.load(in_file)
data = img.get_data()
data[data > threshold_max] = threshold_max
data[data < threshold_min] = threshold_min
img = nibabel.Nifti1Image(data, img.get_affine(), img.get_header())
out_file, _ = os.path.splitext(in_file)
out_file += '_thresholded.nii'
if os.path.isfile(out_file):
warnings.warn('File {} exits, overwriting.'.format(out_file))
nibabel.save(img, out_file)
return out_file
def remove_nan(in_file, fill_value=0.):
img = nibabel.load(in_file)
data = img.get_data()
if np.any(np.isnan(data)):
data[np.isnan(data)] = fill_value
img = nibabel.Nifti1Image(data, img.get_affine(), img.get_header())
out_file, _ = os.path.splitext(in_file)
out_file += '_no_nan.nii'
nibabel.save(img, out_file)
return out_file
| 2.703125 | 3 |
inkscape-set-css-class-master/set_css_class.py | ilnanny/Inkscape-addons | 3 | 12797381 | <filename>inkscape-set-css-class-master/set_css_class.py
#!/usr/bin/env python
"""
Sets a css class on selected elements, while optionally removing the elements' styling.
If inline styles are not removed, the css class might not have effect.
Inspired by MergeStyles (and best used together with it).
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (C) 2017 Mois Moshev"
__license__ = "GPL"
import inkex
import sys
class SetCSSClass(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("-n", "--name",
action="store", type="string",
dest="name", default="",
help="Name of css class to apply")
self.OptionParser.add_option("-c", "--clear_styles",
action="store", type="inkbool",
dest="clear_styles", default=True,
help="Name of css class to apply")
def effect(self):
newclass = self.options.name
elements = self.selected.values()
for el in elements:
current_classes = el.attrib.has_key("class") and el.attrib["class"].split() or []
if newclass not in current_classes:
current_classes.append(newclass)
if self.options.clear_styles:
el.attrib["style"] = ""
el.attrib["class"] = " ".join(current_classes)
if __name__ == "__main__":
e = SetCSSClass()
e.affect()
| 2.84375 | 3 |
hummingbot/strategy/execution2/__init__.py | anyachopra97/hummingbot | 0 | 12797382 | <reponame>anyachopra97/hummingbot
#!/usr/bin/env python
from .execution2 import Execution2Strategy
__all__ = [
Execution2Strategy
]
| 0.980469 | 1 |
src/bpp/models/grant.py | iplweb/django-bpp | 1 | 12797383 | from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import CASCADE
class Grant(models.Model):
nazwa_projektu = models.TextField(blank=True, null=True)
zrodlo_finansowania = models.TextField(blank=True, null=True)
numer_projektu = models.CharField(max_length=200, unique=True)
rok = models.PositiveSmallIntegerField(null=True, blank=True)
class Meta:
verbose_name = "grant"
verbose_name_plural = "granty"
def __str__(self):
return f"{self.numer_projektu} {self.nazwa_projektu or ''}".strip()
class Grant_Rekordu(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
rekord = GenericForeignKey()
grant = models.ForeignKey(Grant, models.PROTECT)
class Meta:
verbose_name = "grant rekordu"
verbose_name_plural = "granty rekordu"
unique_together = [("grant", "content_type", "object_id")]
| 2.03125 | 2 |
self/test/preprocess_test.py | luweishuang/rasa | 0 | 12797384 | <gh_stars>0
# -*- coding: utf-8 -*-
import jieba
jieba.load_userdict("user_dict.txt")
line_list = ["查询安顺站一号风机的电压曲线",
"查询安各庄1母线的故障信息",
"开始进行南京站设备状态核实",
"看下安顺站3月1号的静态功率曲线"]
for cur_line in line_list:
seg_list = jieba.cut(cur_line.strip())
print("jieba rst: " + "/ ".join(seg_list))
| 2.703125 | 3 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/debug/views.py | osoco/better-ways-of-thinking-about-software | 3 | 12797385 | """Views for debugging and diagnostics"""
import pprint
import traceback
from codejail.safe_exec import safe_exec
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.utils.html import escape
from django.views.decorators.csrf import ensure_csrf_cookie
from common.djangoapps.edxmako.shortcuts import render_to_response
from openedx.core.djangolib.markup import HTML
@login_required
@ensure_csrf_cookie
def run_python(request):
"""
A page to allow testing the Python sandbox on a production server.
Runs in the override context "debug_run_python", so resource limits with come first from:
CODE_JAIL['limit_overrides']['debug_run_python']
and then from:
CODE_JAIL['limits']
"""
if not request.user.is_staff:
raise Http404
c = {}
c['code'] = ''
c['results'] = None
if request.method == 'POST':
py_code = c['code'] = request.POST.get('code')
g = {}
try:
safe_exec(
code=py_code,
globals_dict=g,
slug="debug_run_python",
limit_overrides_context="debug_run_python",
)
except Exception: # pylint: disable=broad-except
c['results'] = traceback.format_exc()
else:
c['results'] = pprint.pformat(g)
return render_to_response("debug/run_python_form.html", c)
@login_required
def show_parameters(request):
"""A page that shows what parameters were on the URL and post."""
html_list = []
for name, value in sorted(request.GET.items()):
html_list.append(escape(f"GET {name}: {value!r}"))
for name, value in sorted(request.POST.items()):
html_list.append(escape(f"POST {name}: {value!r}"))
return HttpResponse("\n".join(HTML("<p>{}</p>").format(h) for h in html_list))
| 2.125 | 2 |
solutions/1254_number_of_closed_islands.py | YiqunPeng/leetcode_pro | 0 | 12797386 | <filename>solutions/1254_number_of_closed_islands.py<gh_stars>0
class Solution:
def closedIsland(self, grid: List[List[int]]) -> int:
res = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 0:
grid[i][j] = 1
if self._dfs(grid, i, j):
res += 1
return res
def _dfs(self, grid, i, j):
closed = 0 < i < len(grid) - 1 and 0 < j < len(grid[0]) - 1
for ni, nj in [(i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)]:
if 0 <= ni < len(grid) and 0 <= nj < len(grid[0]) and grid[ni][nj] == 0:
grid[ni][nj] = 1
closed = self._dfs(grid, ni, nj) and closed
return closed
| 2.890625 | 3 |
src/tidygraphtool/graph_measures.py | jstonge/tidygraphtool | 0 | 12797387 | """Calculate metrics on graph"""
import graph_tool.all as gt
from .nodedataframe import NodeDataFrame
from .context import expect_nodes
def graph_component_count(G: gt.Graph,
directed: bool = False) -> NodeDataFrame:
expect_nodes(G)
counted_comp, _ = gt.label_components(G, directed=directed)
return NodeDataFrame({"cc": list(counted_comp)})["cc"]
def graph_largest_component(G: gt.Graph,
directed: bool = False) -> NodeDataFrame:
expect_nodes(G)
largest_comp = gt.label_largest_component(G, directed=directed)
return NodeDataFrame({"lc": list(largest_comp)})["lc"]
| 2.5625 | 3 |
pysh/transforms/alpha/lazybools.py | drslump/pysh | 3 | 12797388 | <reponame>drslump/pysh
"""
In Python it's not possible to overload the boolean operators (``not``,
``and``, ``or``) since they have short-circuiting semantics (PEP-532 is
deferred right now).
The problem manifests when trying to use a ``cmd and ok or fail``
or similar constructs, which are quite common in shell scripts. We would
like to keep that expression lazily evaluated but is not possible since
the Python interpreter will try to resolve it immediately, trigering the
evaluation of ``cmd`` to know if it should go with the ``and`` or the ``or``
branch.
This tranformation converts the above example to:
>>> OR(AND(cmd, lambda: ok), lambda: fail)
Where ``OR`` and ``AND`` are runtime helpers that will inspect the value
and delegate to it if it has defined the proper protocol:
- ``__lazyboolnot__(self)``
- ``__lazybooland__(self, rhs_callable)``
- ``__lazyboolor__(self, rhs_callable)``
.. note:: These operators do not have a reverse, the argument will always
be the right operand.
.. caution:: Since the rhs is opaque inside the lambda, we can't check it
until it resolves.
"""
from ast import NodeTransformer, copy_location, fix_missing_locations, AST, \
BoolOp, UnaryOp, And, Or, Not, Call, Lambda, arguments, Name, Load
from typing import Union
__all__ = ['__lazybooland__', '__lazyboolor__', '__lazyboolnot__']
class LazyBoolsTransformer(NodeTransformer):
""" Make logical operators aware of laziness.
"""
def visit_BoolOp(self, node: BoolOp) -> Union[UnaryOp, Call]:
self.generic_visit(node)
if isinstance(node.op, And):
runtime = '__lazybooland__'
elif isinstance(node.op, Or):
runtime = '__lazyboolor__'
else:
return node
lhs, rhs = node.values
delegate = Call(
func=Name(id=runtime, ctx=Load()),
args=[
lhs,
# Make the rhs a deferred computation by wrapping with a lambda
Lambda(
args=arguments(args=[], kwonlyargs=[], kw_defaults=[], defaults=[]),
body=rhs)
],
keywords=[])
copy_location(delegate, node)
fix_missing_locations(delegate)
return delegate
def visit_UnaryOp(self, node: UnaryOp) -> Union[UnaryOp, Call]:
self.generic_visit(node)
if not isinstance(node.op, Not):
return node
delegate = Call(
func=Name(id='__lazyboolnot__', ctx=Load()),
args=[node.operand],
keywords=[])
copy_location(delegate, node)
fix_missing_locations(delegate)
return delegate
def __lazybooland__(expr, deferred):
if hasattr(expr, '__lazybooland__'):
result = expr.__lazyand__(deferred)
if result is not NotImplemented:
return result
return expr and deferred()
def __lazyboolor__(expr, deferred):
if hasattr(expr, '__lazyboolor__'):
result = expr.__lazyor__(deferred)
if result is not NotImplemented:
return result
return expr or deferred()
def __lazyboolnot__(expr):
if hasattr(expr, '__lazyboolnot__'):
result = expr.__lazynot__()
if result is not NotImplemented:
return result
return not expr
def parser(node: AST) -> AST:
return LazyBoolsTransformer().visit(node)
from ast import parse, dump
# print(dump(parse('lambda: 10')))
cmd = 0
ok = 'ok'
fail = 'fail'
node = parse(r'''
print( cmd and ok or not fail )
''')
node = parser(node)
print(dump(node))
co_code = compile(node, '<string>', 'exec')
eval(co_code, globals())
| 2.640625 | 3 |
users/urls.py | Bytlot/foodgram-project | 0 | 12797389 | <gh_stars>0
from django.contrib.auth import views as auth_views
from django.urls import path
from . import views
urlpatterns = [
path('signup/', views.SignUp.as_view(), name='signup'),
path(
'login/',
auth_views.LoginView.as_view(template_name='auth/authForm.html'),
name='login'
),
path(
'logout/',
auth_views.LogoutView.as_view(template_name='auth/logout.html'),
name='logout'
),
path(
'password-change/',
auth_views.PasswordChangeView.as_view(
template_name='auth/changePassword.html'
),
name='password_change'
),
path(
'password-change-done/',
auth_views.PasswordChangeDoneView.as_view(
template_name='auth/changePasswordDone.html'
),
name='password_change_done'
),
path('password-reset/', auth_views.PasswordResetView.as_view(
template_name='auth/resetPassword.html'),
name='password_reset'
),
path(
'password-reset-done/',
auth_views.PasswordResetDoneView.as_view(
template_name='auth/resetPasswordDone.html'
),
name='password_reset_done'
),
path(
'password-reset/confirm/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(
template_name='auth/resetPasswordConfirm.html'
),
name='password_reset_confirm'
),
]
| 1.789063 | 2 |
bevy/app/options.py | ZechCodes/bevy.app | 0 | 12797390 | from bevy.injection import AutoInject, detect_dependencies
from bevy.app.args import ArgumentParser, CLIArgs
from typing import Any
import os
@detect_dependencies
class Options(AutoInject):
"""The options object aggregates all options values that the Bevy.App application pulls in from the environment."""
args: CLIArgs
var_prefix = "BEVY_APP_"
path_key = "PATH"
config_file_key = "CONFIG_FILE"
logger_level_key = "LOGGER_LEVEL"
logger_name_key = "LOGGER_NAME"
def __init__(self):
self._cli_options = {}
self._env_options = self._load_env()
self._options = self._build_base_options()
def __getitem__(self, item: str) -> Any:
if item in self._cli_options:
return self._cli_options[item]
if item in self._env_options:
return self._env_options[item]
return self._options[item]
def __contains__(self, item: str) -> bool:
return item in (self._cli_options | self._env_options | self._options)
@property
def cli(self) -> dict[str, Any]:
return self._cli_options.copy()
@property
def env(self) -> dict[str, Any]:
return self._env_options.copy()
def add_using_arg_parser(self, parser: ArgumentParser):
"""Uses an ArgumentParser to populate the CLI options."""
self._cli_options.update(self.args.parse_args(parser).to_dict())
def get(self, item: str, default: Any | None = None) -> Any | None:
try:
return self[item]
except KeyError:
return default
def _build_base_options(self) -> dict[str, Any]:
return {self.path_key: self._get_path()}
def _get_path(self):
return os.getcwd()
def _load_env(self) -> dict[str, Any]:
return {
key.removeprefix(self.prefix): value
for key, value in os.environ.items()
if key.startswith(self.prefix)
}
| 2.703125 | 3 |
reto2.py | MiguelSanchezP/Bitsxlamarato2020 | 2 | 12797391 | f = open ("./Files/COPEDICATClinicSympt_DATA_2020-12-17_1642.csv", 'r')
data = []
for line in f:
data.append(line)
#covid and other illnesses:
COVID_vrs = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[131] == '1':
COVID_vrs.append(value)
elif value.split(',')[127] == '1' and value.split(',')[131] == '1':
COVID_vrs.append(value)
COVID_adeno = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[132] == '1':
COVID_adeno.append(value)
elif value.split(',')[127] == '1' and value.split(',')[132] == '1':
COVID_adeno.append(value)
COVID_fluA&B = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[133] == '1':
COVID_fluA&B.append(value)
elif value.split(',')[127] == '1' and value.split(',')[133] == '1':
COVID_fluA&B.append(value)
elif value.split(',')[122] == '1' and value.split(',')[134] == '1':
COVID_fluA&B.append(value)
elif value.split(',')[127] == '1' and value.split(',')[134] == '1':
COVID_fluA&B.append(value)
#covid and other more generically:
COVID_resp_vir = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[142] == '1':
COVID_resp_vir.append(value)
elif value.split(',')[127] == '1' and value.split(',')[142] == '1':
COVID_resp_vir.append(value)
COVID_bacteria = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[145] == '1':
COVID_bacteria.append(value)
elif value.split(',')[127] == '1' and value.split(',')[145] == '1':
COVID_bacteria.append(value)
COVID_inflam_diseases = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[161] == '1':
COVID_inflam_diseases.append(value)
elif value.split(',')[127] == '1' and value.split(',')[161] == '1':
COVID_inflam_diseases.append(value)
COVID_&_vaccines = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[167] == '1':
COVID_&_vaccines.append(value)
elif value.split(',')[127] == '1' and value.split(',')[167] == '1':
COVID_&_vaccines.append(value)
#covid and pathologies:
COVID_comorbidities = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[148] == '1':
COVID_comorbidities.append(value)
elif value.split(',')[127] == '1' and value.split(',')[148] == '1':
COVID_comorbidities.append(value)
#covid and more specific conditions:
COVID_cardiopathy = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[149] == '1':
COVID_cardiopathy.append(value)
elif value.split(',')[127] == '1' and value.split(',')[149] == '1':
COVID_cardiopathy.append(value)
COVID_hypertension = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[150] == '1':
COVID_hypertension.append(value)
elif value.split(',')[127] == '1' and value.split(',')[150] == '1':
COVID_hypertension.append(value)
COVID_pulm_disease = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[151] == '1':
COVID_pulm_disease.append(value)
elif value.split(',')[127] == '1' and value.split(',')[151] == '1':
COVID_pulm_disease.append(value)
COVID_asthma = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[152] == '1':
COVID_asthma.append(value)
elif value.split(',')[127] == '1' and value.split(',')[152] == '1':
COVID_asthma.append(value)
COVID_nephrology = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[153] == '1':
COVID_nephrology.append(value)
elif value.split(',')[127] == '1' and value.split(',')[153] == '1':
COVID_nephrology.append(value)
COVID_hepB = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[154] == '1':
COVID_hepB.append(value)
elif value.split(',')[127] == '1' and value.split(',')[154] == '1':
COVID_hepB.append(value)
COVID_epilepsy = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[155] == '1':
COVID_epilepsy.append(value)
elif value.split(',')[127] == '1' and value.split(',')[155] == '1':
COVID_epilepsy.append(value)
COVID_diabetes = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[156] == '1':
COVID_diabetes.append(value)
elif value.split(',')[127] == '1' and value.split(',')[156] == '1':
COVID_diabetes.append(value)
COVID_tuberculosis = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[157] == '1':
COVID_tuberculosis.append(value)
elif value.split(',')[127] == '1' and value.split(',')[157] == '1':
COVID_tuberculosis.append(value)
COVID_immunodeficiency = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[158] == '1':
COVID_immunodeficiency.append(value)
elif value.split(',')[127] == '1' and value.split(',')[158] == '1':
COVID_immunodeficiency.append(value)
COVID_neoplasia = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[159] == '1':
COVID_neoplasia.append(value)
elif value.split(',')[127] == '1' and value.split(',')[159] == '1':
COVID_neoplasia.append(value)
COVID_kawasaki = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[160] == '1':
COVID_kawasaki.append(value)
elif value.split(',')[127] == '1' and value.split(',')[160] == '1':
COVID_kawasaki.append(value)
COVID_hiv = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[162] == '1':
COVID_hiv.append(value)
elif value.split(',')[127] == '1' and value.split(',')[162] == '1':
COVID_hiv.append(value)
COVID_obesity = []
for value in data:
if value.split(',')[122] == '1' and value.split(',')[163] == '1':
COVID_obesity.append(value)
elif value.split(',')[127] == '1' and value.split(',')[163] == '1':
COVID_obesity.append(value)
| 2.9375 | 3 |
synbols/predefined_datasets.py | shikhar-srivastava/synbols | 0 | 12797392 | import logging
import numpy as np
import math
from .drawing import Camouflage, NoPattern, SolidColor, MultiGradient, ImagePattern, Gradient, Image, Symbol
from .fonts import LANGUAGE_MAP
from .generate import (
dataset_generator,
basic_attribute_sampler,
flatten_mask,
flatten_mask_except_first,
add_occlusion,
rand_seed,
)
def generate_i(n_samples, alphabet = None, language="english", font = 'calibri', set = "plain", seed=None, **kwargs):
"""[summary]
Args:
n_samples ([type]): [description]
language (str, optional): [description]. Defaults to "english".
seed ([type], optional): [description]. Defaults to None.
"""
if alphabet is None:
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
print(alphabet)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
rotation = 0
translation = (0.0,0.0)
if set == 'rotation':
rotation = (lambda rng: rng.uniform(low=0, high=1)*math.pi)
elif set == 'translation':
translation= (lambda rng: tuple(rng.uniform(low=-1, high=1, size=2)))
elif set == 'gradient':
fg = None
bg = None
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
font = font,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=rotation,
scale=0.7,
translation=translation,
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_dataset_alphabet_onlygrad(n_samples, chars, seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP['english'].get_alphabet(support_bold=False)
#print(alphabet.fonts[:10])
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
char=lambda rng: rng.choice(chars),
font=lambda rng: rng.choice(alphabet.fonts[50:55]),
is_slant=False,
is_bold=False,
rotation=0,
scale=0.7,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_dataset_alphabet(n_samples, chars, seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP['english'].get_alphabet(support_bold=False)
#print(alphabet.fonts[:10])
fg = [SolidColor((1, 1, 1)), ImagePattern(seed=123)]
bg = [SolidColor((0, 0, 0)), ImagePattern(seed=123)]
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
char=lambda rng: rng.choice(chars),
font=lambda rng: rng.choice(alphabet.fonts[50:55]),
is_slant=False,
is_bold=False,
background= lambda rng:rng.choice(bg),
foreground= lambda rng:rng.choice(fg),
rotation=0,
scale=0.7,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=0.7,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_rotated_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=lambda rng: rng.uniform(low=0, high=1)*math.pi,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_translated_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate default with translation uniformly b/w (-1,1)
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=1.0,
translation=lambda rng: tuple(rng.uniform(low=-1, high=1, size=2)),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_scaled_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=None,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_bold_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=True)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=True,
background=bg,
foreground=fg,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_italic_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate white on black, centered symbols.
The only factors of variations are font and char.
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=True,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_gradient_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate white on black, centered symbols.
The only factors of variations are font and char.
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_natural_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate white on black, centered symbols.
The only factors of variations are font and char.
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
background=lambda rng: ImagePattern(seed=rand_seed(rng)), #lambda rng: Gradient(seed=rand_seed(_rng))
foreground=lambda rng: ImagePattern(seed=rand_seed(rng)),
is_slant=False,
is_bold=False,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_camouflage_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
angle = 0
fg = Camouflage(stroke_angle=angle, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
bg = Camouflage(stroke_angle=angle + np.pi / 2, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
scale = 0.7 * np.exp(np.random.randn() * 0.1)
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=scale,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_tiny_dataset(n_samples, language="english", seed=None, **kwarg):
"""Generate a dataset of 8x8 resolution in gray scale
with scale of 1 and minimal variations.
"""
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(support_bold=False),
background=bg,
foreground=fg,
is_bold=False,
is_slant=False,
scale=1,
resolution=(8, 8),
is_gray=True,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_default_dataset(n_samples, language="english", seed=None, **kwarg):
"""Generate the default dataset,
using gradiant as foreground and background.
"""
attr_sampler = basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet())
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_solid_bg_dataset(n_samples, language="english", seed=None, **kwarg):
"""Same as default datasets, but uses white on black."""
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet(), background=bg, foreground=fg)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_natural_images_dataset(n_samples, language="english", seed=None, **kwargs):
"""Same as default dataset, but uses natural images as foreground and background."""
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(),
background=lambda rng: ImagePattern(seed=rand_seed(rng)),
foreground=lambda rng: ImagePattern(seed=rand_seed(rng)),
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_korean_1k_dataset(n_samples, seed=None, **kwarg):
"""Uses the first 1000 korean symbols"""
alphabet = LANGUAGE_MAP["korean"].get_alphabet(support_bold=True)
chars = alphabet.symbols[:1000]
fonts = alphabet.fonts
attr_sampler = basic_attribute_sampler(char=lambda rng: rng.choice(chars), font=lambda rng: rng.choice(fonts))
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_camouflage_dataset(n_samples, language="english", texture="camouflage", seed=None, **kwarg):
"""Generate a dataset where the pixel distribution
is the same for the foreground and background.
"""
def attr_sampler(seed=None):
if texture == "camouflage":
angle = 0
fg = Camouflage(stroke_angle=angle, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
bg = Camouflage(stroke_angle=angle + np.pi / 2, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
elif texture == "shade":
fg, bg = None, None
elif texture == "bw":
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
else:
raise ValueError("Unknown texture %s." % texture)
scale = 0.7 * np.exp(np.random.randn() * 0.1)
return basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(support_bold=True),
background=bg,
foreground=fg,
is_bold=True,
is_slant=False,
scale=scale,
)(seed)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_non_camou_bw_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate a black and white dataset with
the same attribute distribution as the camouflage dataset.
"""
return generate_camouflage_dataset(n_samples, language=language, texture="bw", seed=seed, **kwargs)
def generate_non_camou_shade_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate a gradient foreground and background dataset
with same attribute distribution as the camouflage dataset.
"""
return generate_camouflage_dataset(n_samples, language=language, texture="shade", seed=seed, **kwargs)
# for segmentation, detection, counting
# -------------------------------------
def generate_segmentation_dataset(n_samples, language="english", resolution=(128, 128), seed=None, **kwarg):
"""Generate 3-10 symbols of various scale
and rotation and translation (no bold).
"""
def scale(rng):
return 0.1 * np.exp(rng.randn() * 0.4)
def n_symbols(rng):
return rng.choice(list(range(3, 10)))
attr_generator = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(support_bold=False),
resolution=resolution,
scale=scale,
is_bold=False,
n_symbols=n_symbols,
)
return dataset_generator(attr_generator, n_samples, flatten_mask, dataset_seed=seed)
def generate_counting_dataset(
n_samples, language="english", resolution=(128, 128), n_symbols=None, scale_variation=0.5, seed=None, **kwarg
):
"""Generate 3-10 symbols at various scale.
Samples 'a' with prob 70% or a latin lowercase otherwise.
"""
if n_symbols is None:
def n_symbols(rng):
return rng.choice(list(range(3, 10)))
def scale(rng):
return 0.1 * np.exp(rng.randn() * scale_variation)
def char_sampler(rng):
if rng.rand() < 0.3:
return rng.choice(LANGUAGE_MAP[language].get_alphabet(support_bold=False).symbols)
else:
return "a"
attr_generator = basic_attribute_sampler(
char=char_sampler, resolution=resolution, scale=scale, is_bold=False, n_symbols=n_symbols
)
return dataset_generator(attr_generator, n_samples, flatten_mask, dataset_seed=seed)
def generate_counting_dataset_scale_fix(n_samples, seed=None, **kwargs):
"""Generate 3-10 symbols at fixed scale.
Samples 'a' with prob 70% or a latin lowercase otherwise.
"""
return generate_counting_dataset(n_samples, scale_variation=0, seed=seed, **kwargs)
def generate_counting_dataset_crowded(n_samples, seed=None, **kwargs):
"""Generate 30-50 symbols at fixed scale.
Samples 'a' with prob 70% or a latin lowercase otherwise.
"""
def n_symbols(rng):
return rng.choice(list(range(30, 50)))
return generate_counting_dataset(n_samples, scale_variation=0.1, n_symbols=n_symbols, seed=seed, **kwargs)
# for few-shot learning
# ---------------------
def all_chars(n_samples, seed=None, **kwarg):
"""Combines the symbols of all languages (up to 200 per languages).
Note: some fonts may appear rarely.
"""
symbols_list = []
for language in LANGUAGE_MAP.values():
alphabet = language.get_alphabet()
symbols = alphabet.symbols[:200]
logging.info("Using %d/%d symbols from alphabet %s", len(symbols), len(alphabet.symbols), alphabet.name)
symbols_list.extend(zip(symbols, [alphabet] * len(symbols)))
def attr_sampler(seed=None):
char, alphabet = symbols_list[np.random.choice(len(symbols_list))]
return basic_attribute_sampler(alphabet=alphabet, char=char)(seed)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_balanced_font_chars_dataset(n_samples, seed=None, **kwarg):
"""Samples uniformly from all fonts (max 200 per alphabet)
or uniformly from all symbols (max 200 per alphabet)
with probability 50%.
"""
font_list = []
symbols_list = []
for language in LANGUAGE_MAP.values():
alphabet = language.get_alphabet()
fonts = alphabet.fonts[:200]
symbols = alphabet.symbols[:200]
logging.info("Using %d/%d fonts from alphabet %s", len(fonts), len(alphabet.fonts), alphabet.name)
font_list.extend(zip(fonts, [alphabet] * len(fonts)))
logging.info("Using %d/%d symbols from alphabet %s", len(symbols), len(alphabet.symbols), alphabet.name)
symbols_list.extend(zip(symbols, [alphabet] * len(symbols)))
logging.info("Total n_fonts: %d, n_symbols: %d.", len(font_list), len(symbols_list))
def attr_sampler(seed=None):
if np.random.rand() > 0.5:
font, alphabet = font_list[np.random.choice(len(font_list))]
symbol = np.random.choice(alphabet.symbols[:200])
else:
symbol, alphabet = symbols_list[np.random.choice(len(symbols_list))]
font = np.random.choice(alphabet.fonts[:200])
return basic_attribute_sampler(char=symbol, font=font, is_bold=False, is_slant=False)(seed)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
# for active learning
# -------------------
def generate_large_translation(n_samples, language="english", seed=None, **kwarg):
"""Synbols are translated beyond the border of the image
to create a cropping effect. Scale is fixed to 0.5.
"""
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(), scale=0.5, translation=lambda rng: tuple(rng.rand(2) * 4 - 2)
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def missing_symbol_dataset(n_samples, language="english", seed=None, **kwarg):
"""With 10% probability, no symbols are drawn"""
def background(rng):
return MultiGradient(alpha=0.5, n_gradients=2, types=("linear", "radial"), seed=rand_seed(rng))
def tr(rng):
if rng.rand() > 0.1:
return tuple(rng.rand(2) * 2 - 1)
else:
return 10
attr_generator = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(), translation=tr, background=background
)
return dataset_generator(attr_generator, n_samples, dataset_seed=seed)
def generate_some_large_occlusions(n_samples, language="english", seed=None, **kwarg):
"""With probability 20%, add a large occlusion
over the existing symbol.
"""
def n_occlusion(rng):
if rng.rand() < 0.2:
return 1
else:
return 0
attr_sampler = add_occlusion(
basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet()),
n_occlusion=n_occlusion,
scale=lambda rng: 0.6 * np.exp(rng.randn() * 0.1),
translation=lambda rng: tuple(rng.rand(2) * 6 - 3),
)
return dataset_generator(attr_sampler, n_samples, flatten_mask_except_first, dataset_seed=seed)
def generate_many_small_occlusions(n_samples, language="english", seed=None, **kwarg):
"""Add small occlusions on all images.
Number of occlusions are sampled uniformly in [0,5).
"""
attr_sampler = add_occlusion(
basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet()),
n_occlusion=lambda rng: rng.randint(0, 5),
)
return dataset_generator(attr_sampler, n_samples, flatten_mask_except_first, dataset_seed=seed)
def generate_pixel_noise(n_samples, language="english", seed=None, **kwarg):
"""Add large pixel noise with probability 0.5."""
def pixel_noise(rng):
if rng.rand() > 0.1:
return 0
else:
return 0.3
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(), pixel_noise_scale=pixel_noise
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
# for font classification
# -----------------------
def less_variations(n_samples, language="english", seed=None, **kwarg):
"""Less variations in scale and rotations.
Also, no bold and no italic. This makes a more accessible font
classification task.
"""
attr_generator = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(),
is_bold=False,
is_slant=False,
scale=lambda rng: 0.5 * np.exp(rng.randn() * 0.1),
rotation=lambda rng: rng.randn() * 0.1,
)
return dataset_generator(attr_generator, n_samples, dataset_seed=seed)
DATASET_GENERATOR_MAP = {
"plain": generate_plain_dataset,
"default": generate_default_dataset,
"default-bw": generate_solid_bg_dataset,
"korean-1k": generate_korean_1k_dataset,
"camouflage": generate_camouflage_dataset,
"non-camou-bw": generate_non_camou_bw_dataset,
"non-camou-shade": generate_non_camou_shade_dataset,
"segmentation": generate_segmentation_dataset,
"counting": generate_counting_dataset,
"counting-fix-scale": generate_counting_dataset_scale_fix,
"counting-crowded": generate_counting_dataset_crowded,
"missing-symbol": missing_symbol_dataset,
"some-large-occlusion": generate_some_large_occlusions,
"many-small-occlusion": generate_many_small_occlusions,
"large-translation": generate_large_translation,
"tiny": generate_tiny_dataset,
"balanced-font-chars": generate_balanced_font_chars_dataset,
"all-chars": all_chars,
"less-variations": less_variations,
"pixel-noise": generate_pixel_noise,
"natural-patterns": generate_natural_images_dataset,
}
| 2.265625 | 2 |
Pyrado/pyrado/environment_wrappers/state_augmentation.py | jacarvalho/SimuRLacra | 0 | 12797393 | <reponame>jacarvalho/SimuRLacra
import numpy as np
from init_args_serializer import Serializable
from pyrado.environment_wrappers.base import EnvWrapper
from pyrado.environment_wrappers.utils import inner_env
from pyrado.environments.base import Env
from pyrado.utils.data_types import EnvSpec
from pyrado.spaces.box import BoxSpace
class StateAugmentationWrapper(EnvWrapper, Serializable):
""" TODO """
def __init__(self,
wrapped_env: Env,
params=None,
fixed=False):
"""
Constructor TODO
:param wrapped_env:
:param params:
:param fixed:
"""
Serializable._init(self, locals())
EnvWrapper.__init__(self, wrapped_env)
if params is not None:
self._params = params
else:
self._params = list(inner_env(self.wrapped_env).domain_param.keys())
self._nominal = inner_env(self.wrapped_env).get_nominal_domain_param()
self.nominal['dt'] = 1 / 100. # TODO ATTENTION! THIS CAN BE DEADLY! @Robin, why are you doing this?
self._nominal = np.array([self._nominal[k] for k in self._params])
self.fixed = fixed
def _params_as_tensor(self):
if self.fixed:
return self._nominal
else:
return np.array([inner_env(self.wrapped_env).domain_param[k] for k in self._params])
@property
def obs_space(self):
outer_space = self.wrapped_env.obs_space
augmented_space = BoxSpace(0.5 * self._nominal, 1.5 * self._nominal, [self._nominal.shape[0]], self._params)
return BoxSpace.cat((outer_space, augmented_space))
def step(self, act: np.ndarray):
obs, reward, done, info = self.wrapped_env.step(act)
params = self._params_as_tensor()
obs = np.concatenate((obs, params))
return obs, reward, done, info
def reset(self, init_state: np.ndarray = None, domain_param: dict = None):
obs = self.wrapped_env.reset(init_state, domain_param)
params = self._params_as_tensor()
obs = np.concatenate((obs, params))
return obs
@property
def mask(self):
return np.concatenate((np.zeros(self.wrapped_env.obs_space.flat_dim), np.ones(len(self._params))))
@property
def offset(self):
return self.wrapped_env.obs_space.flat_dim
def set_param(self, params):
newp = dict()
for key, value in zip(self._params, params):
newp[key] = value.item()
inner_env(self.wrapped_env).domain_param = newp
def set_adv(self, params):
for key, value in zip(self._params, params):
inner_env(self.wrapped_env).domain_param[key] = self._nominal[key] + value
@property
def nominal(self):
return self._nominal
| 2.03125 | 2 |
WCST_Scoring.py | Wyko/WCST | 3 | 12797394 | <filename>WCST_Scoring.py
from __future__ import with_statement
import re, csv, os
from prettytable import PrettyTable
# The top level folder which contains the tests
PATH = os.getcwd() # r"C:\Users\wyko.terhaar\Downloads\Map voor Wyko\raw"
# Variables which contain the column numbers, used for readability
SUBJECT = 3
MATCH = 5
SET = 6
STIMULUS = 9
RESPONSE = 10
def saveResults(test):
with open('ANALYSIS_' + test[0]['file'] + '.csv', 'w', newline='') as csvfile:
cw = csv.writer(csvfile)
cw.writerow(['#', 'Match', "Response", 'Correct', 'Pres-To', 'Perserverative', 'Ambiguous', "2b (1st Sandwich)", '2c (Chained Sandwich)', '3.1 (Self-Perserveration)', '3.2', '3.3', 'Reasoning'])
prevMatch = ''
prevPrin = ''
for a in test:
response = ''
if a['stimulus']['color'] == a['response']['color']: response += 'C* '
else: response += 'C '
if a['stimulus']['form'] == a['response']['form']: response += 'F* '
else: response += 'F '
if a['stimulus']['number'] == a['response']['number']: response += 'N* '
else: response += 'N '
if a['perservative'] and a['ambiguous']: pers = 'A-Pers'
elif a['perservative']: pers = 'U-Pers'
else: pers = ''
if a['match'] != prevMatch: n = a['match']
else: n = ''
if a['currentPerservativeTo'] != prevPrin:
prin = a['currentPerservativeTo']
prevPrin = prin
else: prin = ''
if a['correct']: corr = '+ '
else: corr = ' '
if a['firstSandwich']: sw= '+'
else: sw = ''
if a['2c']: chain = '+'
else: chain = ''
cw.writerow([a['attemptNum'],
a['match'], #n,
response,
'+' if a['correct'] else '',
a['currentPerservativeTo'], #prin,
'+' if a['perservative'] else '',
'+' if a['ambiguous'] else '',
sw,
chain,
'X' if a['rule_3_1'] else '',
'X' if a['rule_3_2'] else '',
'X' if a['rule_3_3'] else '',
a['reasoning']
])
prevMatch = a['match']
def printTest(test):
x = PrettyTable()
# x.padding_width = 10
x.left_padding_width = 1
x.right_padding_width = 1
x.field_names = ['#', "Match", "Matched", 'Pres-To', 'Pers', "2b", '2c', '3.1', '3.2', '3.3']
prevMatch = ''
prevPrin = ''
for a in test:
response = ''
if a['stimulus']['color'] == a['response']['color']: response += 'C* '
else: response += 'C '
if a['stimulus']['form'] == a['response']['form']: response += 'F* '
else: response += 'F '
if a['stimulus']['number'] == a['response']['number']: response += 'N* '
else: response += 'N '
if a['perservative'] and a['ambiguous']: pers = 'A-Pers'
elif a['perservative']: pers = 'U-Pers'
else: pers = ''
if a['match'] != prevMatch: n = a['match']
else: n = ''
if a['currentPerservativeTo'] != prevPrin:
prin = a['currentPerservativeTo']
prevPrin = prin
else: prin = ''
if a['correct']: corr = '+ '
else: corr = ' '
if a['firstSandwich']: sw= '+'
else: sw = ''
if a['2c']: chain = '+'
else: chain = ''
x.add_row([a['attemptNum'],
a['match'], #n,
corr + response,
a['currentPerservativeTo'], #prin,
pers,
sw,
chain,
'X' if a['rule_3_1'] else '',
'X' if a['rule_3_2'] else '',
'X' if a['rule_3_3'] else '',
])
prevMatch = a['match']
print(x.get_string(title= str(test[0]['classNum']) + ', ' + str(test[0]['testNum'])+ ', ' + str(test[0]['subject']) + ', ' + test[0]['file']))
def splitStim(stim):
x = re.match(r'(^[A-Z][a-z]+)([A-Z][a-z]+)(\d+)', stim)
return {
'color' : x.group(1),
'form' : x.group(2),
'number' : x.group(3)
}
def continuePreviousPresTo(test, attemptNum):
if attemptNum > 0:
test[attemptNum]['currentPerservativeTo'] = test[attemptNum-1]['currentPerservativeTo']
test[attemptNum]['reasoning'] += 'Principle was set to ' + str(test[attemptNum]['currentPerservativeTo']) + ' to continue previous pattern.'
return True
else:
test[attemptNum]['reasoning'] += 'Principle is none due to start of test.'
return None
def checkNewSet(test, attemptNum):
# The very first attempt will never have a Principle
if attemptNum == 0: return None
if test[attemptNum]['set'] != test[attemptNum-1]['set']:
test[attemptNum]['currentPerservativeTo'] = test[attemptNum-1]['match']
test[attemptNum]['reasoning'] += ' - Principle was set to ' + str(test[attemptNum]['currentPerservativeTo']) + ' (last set match clause) because set was changed.'
return True
def checkAnswer(test, attemptNum):
# Determine how ambiguous the answer is
matchQuotient = 0 # This is the number of ways the response matches the answer
for k, v in test[attemptNum]['stimulus'].items():
if test[attemptNum]['response'][k] == v: matchQuotient += 1
# Mark whether the attempt is ambiguous
if matchQuotient > 1:
test[attemptNum]['ambiguous'] = True
else: test[attemptNum]['ambiguous'] = False
# Determine if the answer is correct
if isCorrect(test, attemptNum): test[attemptNum]['correct'] = True
else: test[attemptNum]['correct'] = False
def isCorrect(test, a):
# Determine if a response matches the stimulus on the match criteria
match = test[a]['match']
if test[a]['stimulus'][match] == test[a]['response'][match]: return True
else: return False
def checkFirstSetPers(test, attemptNum):
# Break out if this is not the first set
if test[attemptNum]['set'] != 1: return None
# Break out if this was not an incorrect answer
if test[attemptNum]['correct'] == True: return None
if test[attemptNum]['currentPerservativeTo'] is not None:
test[attemptNum]['reasoning'] += ' - Principle already set. No change for unambiguous error.'
return None
# Check if the attempt had an unambiguous incorrect answer.
# If so, set the Principle whichever principle the client matched
if (test[attemptNum]['correct'] == False and
test[attemptNum]['ambiguous'] == False):
for k, v in test[attemptNum]['stimulus'].items():
if test[attemptNum]['response'][k] == v:
test[attemptNum]['currentPerservativeTo'] = k
test[attemptNum]['set1PrincipleEstablished'] = True
test[attemptNum]['reasoning'] += ' - Principle was established as ' + k + ' from first unambiguous error.'
return True
# If the client perserverated to the Other category, Principle isn't set.
test[attemptNum]['reasoning'] += ' - Client perserverated to Other category. No Principle set.'
return None
def containsPrincipleMatch(test, attemptNum):
# Helper function which satisfies Heaton rule 2a:
# > The ambiguous response must match the
# > perseverated-to principle that is currently in
# > effect (in our example, Color as defined by
# > the previous sorting category)
# False if not principle has been set yet.
if test[attemptNum]['currentPerservativeTo'] is None: return False
pers = test[attemptNum]['currentPerservativeTo']
# Check to see if the response matches the stimulus on the current Perserveration principle.
# This would suggest that this response is perserverating
if test[attemptNum]['stimulus'][pers] == test[attemptNum]['response'][pers]:
# test[attemptNum]['reasoning'] += ' - Attempt has a response (' + test[attemptNum]['response'][pers] + ') which matches the principle (' + pers + ')'
return True
else: return False
def getMatches(test, a):
matches = []
for k, v in test[a]['stimulus'].items():
if test[a]['response'][k] == v: matches.append(k)
return matches
def checkUnambiguousPerserveration(test, attemptNum):
# Check if the attempt had an unambiguous incorrect answer. Skip the answer
# in which the Principle principle was established in the first set.
if (test[attemptNum]['correct'] == False and
test[attemptNum]['ambiguous'] == False and
test[attemptNum]['currentPerservativeTo'] is not None and
test[attemptNum]['set1PrincipleEstablished'] == False and
containsPrincipleMatch(test, attemptNum)
):
test[attemptNum]['reasoning'] += ' - Attempt is unambiguously perservative due to matching the current principle and nothing else.'
test[attemptNum]['perservative'] = True
return True
else: return False
def isSandwiched(test, attemptNum):
# It has to have a principle match to be considered perservative at all
if not containsPrincipleMatch(test, attemptNum): return False
# It has to be ambiguous to be considered for sandwich perseveration
if not test[attemptNum]['ambiguous']: return False
# First we look backwards to find if an ambiguous, potentially perservative
# response was sandwiched by an unambiguous response for the same principle
x = attemptNum - 1
sandwichBefore = False
while x > 0:
if test[x]['set'] != test[attemptNum]['set']: break
if (test[x]['ambiguous'] == False and
test[x]['perservative'] == True and
test[x]['currentPerservativeTo'] == test[attemptNum]['currentPerservativeTo']
):
sandwichBefore = True
# print (str(attemptNum) + ' Sandwiched Before by attempt ' + str(x))
break
x -= 1
if sandwichBefore == False: return False
# Next we check forwards.
y = attemptNum + 1
sandwichAfter = False
while y < len(test):
if test[y]['set'] != test[attemptNum]['set']: break
if (test[y]['ambiguous'] == False and
test[y]['perservative'] == True and
test[y]['currentPerservativeTo'] == test[attemptNum]['currentPerservativeTo']
):
sandwichAfter = True
# print (str(attemptNum) + ' Sandwiched After by attempt ' + str(y))
break
y += 1
if sandwichAfter and sandwichBefore:
#Mark the sandwich if it hasn't already been done
if not test[attemptNum]['sandwiched']: test[attemptNum]['reasoning'] += ' - Attempt ' + str(attemptNum) + ' is "sandwiched" between ' + str(x) + ' and ' + str(y)
test[attemptNum]['sandwiched'] = True
# print (str(attemptNum) + ' Sandwiched Before by attempt ' + str(x))
# print (str(attemptNum) + ' Sandwiched After by attempt ' + str(y))
# print (test[x])
# print (test[attemptNum])
# print (test[y])
# wait = input('')
return True
else:
if not 'Attempt is not sandwiched' in test[attemptNum]['reasoning']:
test[attemptNum]['reasoning'] += ' - Attempt is not sandwiched.'
return False
def isFirstSandwich(test, attemptNum):
if not isSandwiched(test, attemptNum): return False
x = attemptNum - 1
while x > 0:
if test[x]['set'] != test[attemptNum]['set']: return False
if isSandwiched(test, x): return False
# if test[x]['sandwiched']: return False
if (test[x]['ambiguous'] == False and
test[x]['perservative'] == True and
test[x]['currentPerservativeTo'] == test[attemptNum]['currentPerservativeTo']
):
test[attemptNum]['firstSandwich'] = True
test[attemptNum]['perservative'] = True
test[attemptNum]['reasoning'] += ' - Attempt is a first sandwich, matching 2a and 2b. Marking perservative.'
return True
x-=1
def isChainedSandwich(test, attemptNum):
if not isSandwiched(test, attemptNum): return False
if isFirstSandwich(test, attemptNum): return False
x = attemptNum - 1
while x > 0:
if test[x]['set'] != test[attemptNum]['set']: return False
# Check to see if we found the bread
if (test[x]['ambiguous'] == False and
test[x]['perservative'] == True and
test[x]['currentPerservativeTo'] == test[attemptNum]['currentPerservativeTo']
):
break
# If any of the preceeding attempts before the "bread" aren't also
# sandwiches, then 2c doesn't apply
if not isSandwiched(test, x): return False
x -= 1
# Next we check forwards.
y = attemptNum + 1
while y < len(test):
if test[y]['set'] != test[attemptNum]['set']: return False
# Check to see if we found the bread
if (test[y]['ambiguous'] == False and
test[y]['perservative'] == True and
test[y]['currentPerservativeTo'] == test[attemptNum]['currentPerservativeTo']
):
break
# If any of the preceeding attempts before the "bread" aren't also
# sandwiches, then 2c doesn't apply
if not isSandwiched(test, y): return False
y += 1
# print('Holy shit, we found one on attempt ', attemptNum)
# print (test[attemptNum])
return True
def checkChainedSandwich(test, a):
if isChainedSandwich(test, a):
test[a]['2c'] = True
test[a]['reasoning'] += ' - Attempt is chain sandwich perservative per 2c'
test[a]['perservative'] = True
return True
else:
test[a]['2c'] = False
if (
test[a]['sandwiched'] and
'NOT perservative per 2c' not in test[a]['reasoning']
): test[a]['reasoning'] += ' - Sandwiched attempt is NOT perservative per 2c'
return False
def checkSelfPerserveration(test, a):
# 1. The client must make 3 unambiguous errors to a sorting principle
# which is neither correct nor currently perserverative.
#
# 2. All responses between the first and third unambiguous response must
# match this sorting principle.
#
# 3. The new principle becomes active to register perserverations only
# after the second unambiguous error.
# First, we check to see if this is an unambiguous error
# to something other than the current principle
matches = getMatches(test, a)
if len(matches) != 1: return False # One match for an unambiguous result
if test[a]['currentPerservativeTo'] in matches: return False
if isCorrect(test, a): return False # Make sure it's an error
match = matches[0]
# If we get here, then we know the attempt is a candidate for the first indicator.
# Let's look ahead for more indicators!
x = a
unambiguousMatches = [x,] # We need 3 to confirm self-perserveration
# print{'Added first', x)
while x < len(test)-1:
x+=1
# Make sure the potential principle is matched in all subsequent attempts
# This covers the intermediate results
tempMatches = getMatches(test, x)
if not match in tempMatches: return False
# Now we look for the last two unambiguous errors to the new, not currently
# perserverative principle
if len(tempMatches) != 1: continue # Ensure it is an unambiguous result
if isCorrect(test, x): continue # Make sure it's an error
if test[x]['currentPerservativeTo'] in tempMatches: continue # Not currently pers
# It's a match!
unambiguousMatches.append(test[x]['attemptNum'])
if len(unambiguousMatches) == 3: break
if len(unambiguousMatches) != 3: return False
# print(str(test[0]['classNum']) + ', ' + str(test[0]['testNum'])+ ', ' + str(test[0]['subject']), '\n', unambiguousMatches, '\n')
test[unambiguousMatches[0]]['rule_3_1'] = True
test[unambiguousMatches[0]]['reasoning'] += ' - Rule 3: First unambiguous self-perserveration'
test[unambiguousMatches[1]]['rule_3_2'] = True
test[unambiguousMatches[1]]['reasoning'] += ' - Rule 3: Second unambiguous self-perserveration after attempt ' + str(test[unambiguousMatches[0]]['attemptNum'])
test[unambiguousMatches[2]]['rule_3_3'] = True
test[unambiguousMatches[2]]['reasoning'] += ' - Rule 3: Final unambiguous self-perserveration'
# Set all responses from the first untill the second response (not
# including the second response) as unscorable
x = unambiguousMatches[0]
while x < unambiguousMatches[1]:
test[x]['currentPerservativeTo'] = None
test[x]['reasoning'] += ' - Rule 3: Set to unscorable for first to second responses'
# print (test[x])
x+=1
#################################
# Principle is not set for future attempts. Maybe we also need to have the category changer run after this?
#####################################
# Set all the rest, up to the next set change, to the new principle
x = unambiguousMatches[1]
while (x < len(test) and test[x]['set'] == test[unambiguousMatches[1]]['set']):
test[x]['currentPerservativeTo'] = match
test[x]['reasoning'] += ' - Rule 3: Principle set to ' + match + ' due to self-perserveration'
# print("Test ", x, " principle set to ", match)
x+=1
def analyzeTest(fullpath, testNum, classNum):
test = []
# Open the file and read it into memory
with open (fullpath) as f: lines = f.readlines()
# Iterate through the test and format it for analysis. Skip the headers.
lines.pop(0)
lineCount = 0
for line in lines:
# Split the test report into an array of Dicts
# Added some error handling because the text files aren't always clean.
try:
attempt = line.split()
test.append({
'file' : os.path.basename(fullpath),
'attemptNum' : lineCount,
'subject' : int(attempt[SUBJECT]),
'set' : int(attempt[SET]),
'match' : attempt[MATCH],
'stimulus' : splitStim(attempt[STIMULUS]),
'response' : splitStim(attempt[RESPONSE]),
'testNum' : testNum,
'2c' : '',
'classNum' : classNum,
'currentPerservativeTo' : None, # Stores the currently active Pres-To Principle
'reasoning' : '', # Contains plaintext reasoning to help verify results
# The following are all boolean
'correct' : False,
'perservative' : False,
'ambiguous' : False,
'sandwiched' : False,
'firstSandwich' : False,
'set1PrincipleEstablished' : False, # Will change to true if the principle changed this attempt in the first set
'rule_3_1' : False, # These are to show matches for Heaton's rule 3
'rule_3_2' : False, # for self perserverations
'rule_3_3' : False,
})
except: print ('There was an error reading line ' + str(lineCount) + ' in file: \n' + fullpath)
lineCount += 1
# First pass: Analyze the data with a set of rules
for attempt in test:
# 1. Set the principle the same as last attempt.The current
# principle will be the same as the last attempt, unless
# it changes in a subsequent rule.
continuePreviousPresTo(test, attempt['attemptNum'])
# 2. Check if we just moved into a new set, and adjust the principle accordingly
checkNewSet(test, attempt['attemptNum'])
# 3. Check if the attempt was an error
checkAnswer(test, attempt['attemptNum'])
# 4. If Principle has not been determined (first set) then the first unambiguous
# incorrect answer determines the first-set's Principle
checkFirstSetPers(test, attempt['attemptNum'])
for attempt in test:
# 5. Heaton's rule 3: Find self-made perserverations
checkSelfPerserveration(test, attempt['attemptNum'])
for attempt in test:
# 6. If this was an unambiguous error matching the perservative-to rule, AND
# the rule was not established this attempt (on the first set).
# We have to know this for every rule before we can go on to the more complicated
# tests, so we finish the loop this way and then loop again
checkUnambiguousPerserveration(test, attempt['attemptNum'])
for attempt in test:
# 7. Now we start looking for ambiguous perserverations. Here we check the
# "sandwich rule."
isSandwiched(test, attempt['attemptNum'])
for attempt in test:
# 8. Check if the sandwiched ambiguous answers are the first ones
isFirstSandwich(test, attempt['attemptNum'])
for attempt in test:
# 9. Check rule 2c for chained perserverations
checkChainedSandwich(test, attempt['attemptNum'])
# Return the fully populated and analyzed test object
# printTest(test)
saveResults(test)
return test
# Iterate through each file in each folder in the PATH variable
allTests = []
for path, dirs, files in os.walk(PATH):
if path == PATH: continue # Skip the root
for filename in files:
if '.py' in filename: continue # Skip any python files
if 'ANALYSIS' in filename: continue # Skip any generated analysis files
# if not 'iqdat' in filename: continue
fullpath = os.path.join(path, filename) # Get the filename for each file
# Get the test number and class number from the directory names
p = path.split('\\')
classNum = p[len(p)-1]
testNum = p[len(p)-2]
allTests.append(analyzeTest(fullpath, testNum, classNum))
for t in allTests:
totalCorrect = 0
totalError = 0
totalSetsAttempted = 0
totalNonPerserverative = 0
totalNonPerserverativeErrors = 0
totalPerserverative = 0
totalTrials = 0
for a in t:
if a['correct']: totalCorrect +=1
if not a['correct']: totalError +=1
totalSetsAttempted = a['set'] # Will end up being the final set
if a['perservative']: totalPerserverative +=1
if not a['perservative']: totalNonPerserverative +=1
if (not a['perservative'] and not a['correct']): totalNonPerserverativeErrors +=1
totalTrials+=1
with open('SUMMARY_' + t[0]['testNum'] + '_' + t[0]['classNum'] + '.csv', 'a', newline='') as csvfile:
cw = csv.writer(csvfile)
if os.stat('SUMMARY_' + t[0]['testNum'] + '_' + t[0]['classNum'] + '.csv').st_size == 0:
cw.writerow(['Class', 'Subject', 'Sets Attempted', 'Correct', 'Errors', 'Non-Perserverative', 'Non-Perserverative Errors', 'Perserverative', 'Trials', '% Perserverative'])
cw.writerow([
t[0]['classNum'],
t[0]['subject'],
totalSetsAttempted,
totalCorrect,
totalError,
totalNonPerserverative,
totalNonPerserverativeErrors,
totalPerserverative,
totalTrials,
str(round((totalPerserverative / totalTrials)*100, 2)) + '%',
])
| 2.78125 | 3 |
shodohflo/redis_handler.py | m3047/shodoflo | 10 | 12797395 | <filename>shodohflo/redis_handler.py<gh_stars>1-10
#!/usr/bin/python3
# Copyright (c) 2019 by <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Redis Handler.
The Redis Handler is an asyncio event sink using a ThreadPoolExecutor to post to
Redis.
"""
import traceback
import asyncio
from concurrent.futures import ThreadPoolExecutor
import redis
class RedisBaseHandler(object):
"""Handles calls to Redis so that they can be run in a different thread."""
CONNECT_TIMEOUT = 5
def redis_server(self):
"""Needs to be subclassed to return the address of the Redis server."""
pass
def __init__(self, event_loop, ttl_grace):
self.redis = redis.client.Redis(self.redis_server(), decode_responses=True,
socket_connect_timeout=self.CONNECT_TIMEOUT
)
# NOTE: Tried to do this with a BlockingConnectionPool but it refused to connect
# to anything but localhost. I don't think it matters, the ThreadPoolExecutor
# should limit the number of connections to the number of threads, which is 1.
#connection_pool=redis.connection.BlockingConnectionPool(
#max_connections=2,timeout=5)
#)
self.executor = ThreadPoolExecutor(max_workers=1)
self.event_loop = event_loop
self.ttl_grace = ttl_grace
# NOTE: This could be protected by a lock, but setting it True is final so it
# it doesn't really matter. Worst thing that happens is that multiple
# errors get logged. (Look at subclasses to see how this is used.)
self.stop = False
return
def client_to_redis(self, client_address):
"""Called internally by the other *_to_redis() methods to update the client."""
k = 'client;{}'.format(client_address)
self.redis.incr(k)
self.redis.expire(k, self.ttl_grace)
return
def submit(self, func, *args):
"""Submit a Redis update to run."""
if self.stop:
self.event_loop.stop()
return
self.event_loop.run_in_executor(self.executor, func, *args)
return
def redis_executor(self, func, *args):
"""Encapsulate exceptions which might occur within redis threads.
All calling of Redis network functions is done inside of one of these
blocks.
"""
try:
func(*args)
except ConnectionError as e:
if not self.stop:
logging.error('redis.exceptions.ConnectionError: {}'.format(e))
self.stop = True
except Exception as e:
if not self.stop:
traceback.print_exc()
self.stop = True
return
| 2.859375 | 3 |
src/lib/models/networks/GN.py | hz-ants/CenterPose- | 96 | 12797396 | <gh_stars>10-100
from torch import nn
def group_norm(out_channels):
num_groups = 32
if out_channels % 32 == 0:
return nn.GroupNorm(num_groups, out_channels)
else:
return nn.GroupNorm(num_groups // 2, out_channels)
| 2.59375 | 3 |
test/test_builtins/test_arithmetics.py | dragonteros/unsuspected-hangeul | 62 | 12797397 | <filename>test/test_builtins/test_arithmetics.py
from test.test_base import TestBase
class TestArithmetics(TestBase):
def test_multiply(self):
_test = self._assert_execute
_test('ㄱ ㄴ ㄷ ㄹ ㄱㅎㅁ', "0")
_test('ㄴㄱ ㄴ ㄷ ㄹ ㄱㅎㅁ', "-6")
_test('(ㄷ ㄴㄱ ㅅㅎㄷ) ㄷ ㄱㅎㄷ', "1.0")
_test('(ㄷ ㄴㄱ ㅅㅎㄷ) ㄷ ㅄㅎㄴ ㄱㅎㄷ', "1+0i")
_test('(ㄷ ㄴㄱ ㅄㅎㄷ) (ㄷ ㄴ ㅄㅎㄷ) ㄱㅎㄷ', "5+0i")
_test('(ㄱ ㄱ ㅈㅎㄷ) (ㄱ ㄴ ㄴㅎㄷ) ㄱㅎㄷ', "False")
_test('(ㄱ ㄱ ㅈㅎㄷ) (ㄱ ㄴ ㅈㅎㄷ) ㄱㅎㄷ', "False")
_test('(ㄱ ㄱ ㄴㅎㄷ) (ㄱ ㄴ ㅈㅎㄷ) (ㅈㅈㅎㄱ) ㄱㅎㄹ', "True")
def test_add(self):
_test = self._assert_execute
_test('ㄱ ㄴ ㄷ ㄹ ㄷㅎㅁ', "6")
_test('ㄴㄱ ㄴ ㄷ ㄹㄱ ㄷㅎㅁ', "-1")
_test('(ㄷ ㄴㄱ ㅅㅎㄷ) ㄷ ㄷㅎㄷ', "2.5")
_test('(ㄷ ㄴㄱ ㅅㅎㄷ) ㄷ ㅄㅎㄴ ㄷㅎㄷ', "2.5+0i")
_test('(ㄷ ㄴㄱ ㅄㅎㄷ) (ㄷ ㄴ ㅄㅎㄷ) ㄷㅎㄷ', "4+0i")
_test('(ㄱ ㄱ ㅈㅎㄷ) (ㄱ ㄴ ㄴㅎㄷ) ㄷㅎㄷ', "False")
_test('(ㄱ ㄱ ㅈㅎㄷ) (ㄱ ㄴ ㅈㅎㄷ) ㄷㅎㄷ', "True")
_test('(ㄱ ㄱ ㄴㅎㄷ) (ㄱ ㄴ ㅈㅎㄷ) (ㅈㅈㅎㄱ) ㄷㅎㄹ', "True")
_test('ㅁㅀㄱ ㅁㅀㄱ ㄷㅎㄷ', "[]")
_test('ㄱ ㅁㅀㄴ ㅁㅀㄱ ㄷㅎㄷ', "[0]")
_test('ㄱ ㅁㅀㄴ ㄴ ㄷ ㅁㅀㄷ ㄷㅎㄷ', "[0, 1, 2]")
_test('ㄱ ㅁㅀㄴ ㄷㅎㄴ', "[0]")
_test('ㅀㄱ ㅀㄱ ㄱㅇㄱ ㄴㅇㄱ ㄷㅎㄷ ㄳㅎㄶ ㄱㅀㄹ', "''", '\n\n')
_test('ㅀㄱ ㅀㄱ ㄱㅇㄱ ㄴㅇㄱ ㄷㅎㄷ ㄳㅎㄶ ㄱㅀㄹ', "'불꽃'", '불\n꽃')
_test('ㅅㅈㅎㄱ ㅅㅈㅎㄱ ㄷㅎㄷ', "{}")
_test('ㅅㅈㅎㄱ ㅅㅈㅎㄱ ㄴ ㄷ ㅅㅈㅎㄷ ㅅㅈㅎㄱ ㄷㅎㅁ', "{1: 2}")
_test('ㄱ ㄴ ㅅㅈㅎㄷ ㄴ ㄷ ㅅㅈㅎㄷ ㄷㅎㄷ', "{0: 1, 1: 2}")
_test('ㄱ ㄴ ㅅㅈㅎㄷ ㄷ ㄹ ㅅㅈㅎㄷ ㅁ ㅂ ㅅㅈㅎㄷ ㄷㅎㄹ', "{0: 1, 2: 3, 4: 5}")
_test('ㄱ ㄴ ㅅㅈㅎㄷ ㄷ ㄹ ㅅㅈㅎㄷ ㄷ ㅁ ㅅㅈㅎㄷ ㄷㅎㄹ', "{0: 1, 2: 4}")
_test('(ㅂ ㅂ ㅂㅎㄷ) (ㅁㅈㅎㄱ ㄱ ㄴ ㄱㅇㄱㅎㄷㅎㄴ) ㄷㅎㄴ ㅎㅎㄴ', "b''")
_test('(ㄱ ㄴ ㅂ ㅂ ㅂㅎㄷㅎㄷ) (ㅁㅈㅎㄱ ㄱㅇㄱㅎㄴ) (ㅁㅈㅎㄱ ㄱㅇㄱㅎㄴ) ㄷㅎㄷ ㅎㅎㄴ', "b''")
_test('(ㅂ ㅂ ㅂㅎㄷ) (ㄳㄱ ㄴ ㄴ ㄱㅇㄱㅎㄷㅎㄴ) (ㅁㅈㅎㄱ ㄱ ㄴ ㄱㅇㄱㅎㄷㅎㄴ) ㄷㅎㄷ ㅎㅎㄴ', "b'\\x30'")
_test('(ㅂ ㅂ ㅂㅎㄷ) (ㄳㄱ ㄴ ㄴ ㄱㅇㄱㅎㄷㅎㄴ) (ㄴ ㅁㅈㅎㄴ ㄱ ㄴ ㄱㅇㄱㅎㄷㅎㄴ) ㄷㅎㄷ ㅎㅎㄴ',
"b'\\x30\\x31'")
def test_exponentiate(self):
_test = self._assert_execute
_test('ㄱ ㄴ ㅅㅎㄷ', "0")
_test('ㄴㄱ ㄴ ㅅㅎㄷ', "-1")
_test('ㄷ ㄴㄱ ㅅㅎㄷ', "0.5")
_test('ㄴㄱ ㄴㄱ ㅅㅎㄷ', "-1.0")
_test('ㄱ ㅄㅎㄴ ㄴ ㅅㅎㄷ', "0i")
_test('ㄱ ㄴ ㅄㅎㄷ ㄷ ㅅㅎㄷ', "-1+0i")
_test('ㄴ ㄴ ㅄㅎㄷ ㄷ ㅅㅎㄷ ㄱ ㄷ ㅄㅎㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', "True")
_test('(ㄴ ㄴ ㅄㅎㄷ ㄴㄱ ㅅㅎㄷ ㄷ ㄱㅎㄷ) ㄴ ㄴㄱ ㅄㅎㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', "True")
def test_floor_div(self):
_test = self._assert_execute
_test('ㄱ ㄴ ㄴㄶㄷ', "0")
_test('ㄴ ㄴ ㄴㄶㄷ', "1")
_test('ㄴㄱ ㄴ ㄴㄶㄷ', "-1")
_test('ㄷ ㄴㄱ ㄴㄶㄷ', "-2")
_test('ㄹ ㄷ ㄴㄶㄷ', "1")
_test('ㄺ ㄷ ㄴㄶㄷ', "-1")
_test('ㄹ ㄷㄱ ㄴㄶㄷ', "-1")
_test('ㄺ ㄷㄱ ㄴㄶㄷ', "1")
_test('ㄹ (ㄷ ㄴㄱ ㅅㅎㄷ) ㄴㄶㄷ', "6.0")
_test('(ㄷ ㄴㄱ ㅅㅎㄷ) ㄹ ㄴㄶㄷ', "0.0")
def test_modulo(self):
_test = self._assert_execute
_test('ㄱ ㄴ ㄴㅁㅎㄷ', "0")
_test('ㄴ ㄴ ㄴㅁㅎㄷ', "0")
_test('ㄴㄱ ㄴ ㄴㅁㅎㄷ', "0")
_test('ㄷ ㄴㄱ ㄴㅁㅎㄷ', "0")
_test('ㄹ ㄷ ㄴㅁㅎㄷ', "1")
_test('ㄺ ㄷ ㄴㅁㅎㄷ', "-1")
_test('ㄹ ㄷㄱ ㄴㅁㅎㄷ', "1")
_test('ㄺ ㄷㄱ ㄴㅁㅎㄷ', "-1")
_test('ㄹ (ㄷ ㄴㄱ ㅅㅎㄷ) ㄴㅁㅎㄷ', "0.0")
_test('(ㄷ ㄴㄱ ㅅㅎㄷ) ㄹ ㄴㅁㅎㄷ', "0.5")
| 2.890625 | 3 |
waapi/test/test_large_payload.py | kakyoism/waapi-client-python | 23 | 12797398 | <reponame>kakyoism/waapi-client-python<filename>waapi/test/test_large_payload.py
import unittest
from waapi import WaapiClient, connect, CannotConnectToWaapiException
class LargePayload(unittest.TestCase):
def test_large_rpc(self):
with WaapiClient() as client:
result = client.call("ak.wwise.core.object.get", {
"from": {
"name": ["GameParameter:a" + str(n) for n in range(5000)],
}
})
self.assertTrue(client.is_connected())
if __name__ == "__main__":
LargePayload().test_large_rpc() | 2.40625 | 2 |
scripts/generate_gan_data.py | ssundaram21/6.819FinalProjectRAMP | 2 | 12797399 | <reponame>ssundaram21/6.819FinalProjectRAMP
import pickle
import numpy as np
import pandas as pd
import tensorflow as tf
import PIL.Image
import imageio
# import tfutils
import matplotlib.pyplot as plt
import os
import sys
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
network_path = "/om/user/shobhita/src/chexpert/CheXpert GAN/"
output_data_path = "/om/user/shobhita/src/chexpert/gan_fake_data/"
real_data_path = "/om/user/shobhita/src/chexpert/data/"
names = ['No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity',
'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis',
'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture',
'Support Devices']
tf.InteractiveSession()
# Import pretrained Chexpert GAN.
with open(network_path + "network-final.pkl", 'rb') as file:
G, D, Gs = pickle.load(file)
real_labels = pd.read_csv(real_data_path + "CheXpert-v1.0-small/train_preprocessed.csv")
classes_to_generate = ["Lung Lesion", "Pleural Other", "Fracture"]
total = len(real_labels)
lesion = sum(real_labels["Lung Lesion"])
pleural = sum(real_labels["Pleural Other"])
fracture = sum(real_labels["Fracture"])
lesion_n, pleural_n, fracture_n = int(lesion*1.65), int(pleural*3.65), int(fracture*1.95)
total_gen = total + lesion_n + pleural_n + fracture_n
print("Lesion: {}/{} + {} --> {}/{}".format(lesion, lesion/total, lesion_n, lesion+lesion_n, (lesion+lesion_n)/(total+total_gen)))
print("Pleural: {}/{} + {} --> {}/{}".format(pleural, pleural/total, pleural_n, pleural+pleural_n, (pleural+pleural_n)/(total + total_gen)))
print("Fracture: {}/{} + {} --> {}/{}".format(fracture, fracture/total, fracture_n, fracture+fracture_n, (fracture+fracture_n)/(total + total_gen)))
sys.stdout.flush()
label_vectors = {}
for cat, n in zip(classes_to_generate, [lesion_n, pleural_n, fracture_n]):
relevant_labels = real_labels[real_labels[cat] == 1]
new_labels = relevant_labels.sample(n, replace=True)[names].to_numpy()
label_vectors[cat] = new_labels
for cat, arr in label_vectors.items():
print("{}: {}".format(cat, arr.shape))
label_vectors = {}
for cat, n in zip(classes_to_generate, [lesion_n, pleural_n, fracture_n]):
relevant_labels = real_labels[real_labels[cat] == 1]
new_labels = relevant_labels.sample(n, replace=True)[names].to_numpy()
label_vectors[cat] = new_labels
for cat, arr in label_vectors.items():
print("{}: {}".format(cat, arr.shape))
labels_save = {}
for cat in classes_to_generate:
labels = label_vectors[cat]
batch = 1
used_labels = []
used_imgname = []
latents_raw = np.random.RandomState(1000).randn(labels.shape[0], *Gs.input_shapes[0][1:])
total_num = latents_raw.shape[0]
print("Generating {}".format(cat))
sys.stdout.flush()
for n in range(int(total_num / batch)):
if n % 1000 == 0:
print("{}/{}".format(n, total_num))
latent_vec = latents_raw[n * batch: (n + 1) * batch, :]
label_vec = labels[n * batch: (n + 1) * batch, :]
used_labels.append(label_vec)
images = Gs.run(latent_vec, label_vec)
images = np.clip(np.rint((images + 1.0) / 2.0 * 255.0), 0.0, 255.0).astype(np.uint8) # [-1,1] => [0,255]
images = images.transpose(0, 2, 3, 1) # NCHW => NHWC
save_images = np.squeeze(images, axis=-1)
data_dir = output_data_path
if not os.path.exists(data_dir):
os.makedirs(data_dir)
for idx in range(save_images.shape[0]):
image_idx = idx + batch * n
labels_save["{}_{}".format(cat, image_idx)] = labels[image_idx, :]
store_name = 'fake_{}_{}.png'.format(cat, image_idx)
used_imgname.append(store_name)
store_path = os.path.join(data_dir, store_name)
imageio.imwrite(store_path, save_images[idx])
print("Done with {}".format(cat))
print(len(labels))
print(len(used_labels))
print(len(used_imgname))
sys.stdout.flush()
with open(output_data_path + "gan_image_labels.pkl", "wb") as handle:
pickle.dump(labels_save, handle)
sys.stdout.flush()
print("Done :)") | 2.234375 | 2 |
swampymud/character.py | ufosc/MuddySwamp | 10 | 12797400 | <reponame>ufosc/MuddySwamp
"""Module defining the CharacterClass metaclass and Character class,
which serves as the basis for all in-game characters.
This module also defines the 'Filter', used for CharacterClass-based
permissions systems, and 'Command', a wrapper that converts methods into
commands that can be invoked by characters.
"""
import enum
import functools
import inspect
import weakref
import asyncio
import swampymud.inventory as inv
from swampymud import util
from swampymud.util.shadowdict import ShadowDict
class Filter:
"""Filter for screening out certain CharacterClasses and Characters
_classes - set of CharacterClasses tracked by the filter
_include_chars - set characters to be included
_exclude_chars - set characters to be excluded
_mode - Filter.WHITELIST or Filter.BLACKLIST
if WHITELIST is selected, only characters whose class is in
_classes are allowed through the filter.
if BLACKLIST is selected, only characters whose class is NOT
in _classes are allowed through the filter.
Note that _include_chars / _exclude_chars take precedence over
the _classes. That is, if a WHITELIST includes the class
Wizard, but Bill the Wizard is in _exclude_chars, Bill will not
be permitted through the filter.
"""
class _FilterMode(enum.Enum):
"""Enum representing whether a filter includes or excludes the
classes that it tracks"""
WHITELIST = True
BLACKLIST = False
WHITELIST = _FilterMode.WHITELIST
BLACKLIST = _FilterMode.BLACKLIST
def __init__(self, mode, classes=(),
include_chars=(), exclude_chars=()):
"""initialize a Filter with [mode]
if [mode] is True, the Filter will act as a whitelist
if [mode] is False, the Filter will act as a blacklist
[classes] are those classes to be whitelisted/blacklisted
[include_chars] are specific characters to be included
[exclude_chars] are specific characters to be excluded
"""
self._classes = set(classes)
for char in include_chars:
if char in exclude_chars:
raise ValueError("Cannot have character in both include"
" and exclude")
for char in exclude_chars:
if char in include_chars:
raise ValueError("Cannot have character in both include"
" and exclude")
# store characters in a WeakSet, so that the Filter will not
# prevent them from getting garbage collected
self._include_chars = weakref.WeakSet(include_chars)
self._exclude_chars = weakref.WeakSet(exclude_chars)
if isinstance(mode, self._FilterMode):
self._mode = mode
elif isinstance(mode, bool):
if mode:
self._mode = Filter.WHITELIST
else:
self._mode = Filter.BLACKLIST
else:
if mode.lower() == "whitelist":
self._mode = Filter.WHITELIST
elif mode.lower() == "blacklist":
self._mode = Filter.BLACKLIST
else:
raise ValueError("Unrecognized mode %s" % repr(mode))
def permits(self, other):
"""returns True if Character/CharacterClass is allowed in
the individual Character is evaluated first,
then the Character's class, then all the Character's
ancestor classes
"""
if isinstance(other, Character):
if other in self._include_chars:
return True
elif other in self._exclude_chars:
return False
# now try the Character's class
other = type(other)
if isinstance(other, CharacterClass):
# cycle through each ancestor
ancestors = filter(lambda x: isinstance(x, CharacterClass),
other.__mro__)
for char_class in ancestors:
if char_class in self._classes:
return self._mode.value
# "other" is neither a CharClass nor Character
else:
return False
# the character / ancestors cannot be found in the list
return not self._mode.value
def include(self, other):
"""Set the filter to return 'True' if [other] is supplied
to permit()"""
# check that other is a Character / CharacterClass
if isinstance(other, CharacterClass):
if self._mode is Filter.WHITELIST:
self._classes.add(other)
else:
if other in self._classes:
self._classes.remove(other)
elif isinstance(other, Character):
if other in self._exclude_chars:
self._exclude_chars.remove(other)
self._include_chars.add(other)
else:
raise ValueError("Expected Character/CharacterClass,"
" received %s" % type(other))
def exclude(self, other):
"""Set the filter to return 'False' if [other] is supplied
to permit()"""
# check that other is a Character / CharacterClass
if isinstance(other, CharacterClass):
if self._mode == Filter.WHITELIST:
if other in self._classes:
self._classes.remove(other)
else:
self._classes.add(other)
elif isinstance(other, Character):
if other in self._include_chars:
self._include_chars.remove(other)
self._exclude_chars.add(other)
else:
raise ValueError("Expected Character/CharacterClass,"
f" received {type(other)}")
def __repr__(self):
"""overriding repr()"""
return "Filter({!r}, {!r}, {!r}, {!r})".format(
self._mode.value,
set(self._classes),
set(self._include_chars), set(self._exclude_chars)
)
@staticmethod
def from_dict(filter_dict):
"""returns a Filter pythonic representation [filter_dict]"""
return Filter(**filter_dict)
def to_dict(self):
"""returns a pythonic representation of this Filter"""
data = {"mode" : self._mode.value}
if self._classes:
data["classes"] = list(self._classes)
if self._include_chars:
data["include_chars"] = list(self._include_chars)
if self._exclude_chars:
data["exclude_chars"] = list(self._exclude_chars)
return data
class Command(functools.partial):
"""A subclass of functools.partial that supports equality.
The default implementation of functools.partial does not normally
support equality for mathematically sound reasons:
https://bugs.python.org/issue3564
With this class's equality operators, we aren't trying to solve an
undecidable problem, but just confirm that two partially-applied
functions have the same arguments and underlying functions.
Optional fields, "name", "label", and "field" are also provided.
These fields store player-relevant information that are NOT factored
into comparisons.
In addition, this class has a convenience method, '.specify' to
derive a new Command from an existing one by simply adding
additional arguments. All other information (base function, names,
etc.) will be propagated.
While you can update Command.keywords, avoid doing so.
All comparisons are based on the INITIAL keywords, so changing
keywords after initialization is unsupported.
"""
def __init__(self, *args, **kwargs):
"""initialize a Command like a functools.partial object"""
super().__init__()
# creating an immutable set of keywords for comparisons
self._keys = frozenset(self.keywords.items())
# propagate the name and doc from the base function
self.__name__ = self.func.__name__
self.__doc__ = self.func.__doc__
# try to clean the docstring, if one was provided
try:
self.__doc__ = inspect.cleandoc(self.__doc__)
except AttributeError:
pass
# initialize satellite data
self.name = None
self.label = None
# by default, add a filter that permits all (empty blacklist)
self.filter = Filter(Filter.BLACKLIST)
def __eq__(self, other):
"""Two commands are equal iff the base functions are equal,
the args are equal, and the (initial) keywords are equal"""
try:
return (self.func, self.args, self._keys) == \
(other.func, other.args, other._keys)
except AttributeError:
# other is not a Command
return False
def __hash__(self):
"""overriding hash"""
return hash((self.func, self.args, self._keys))
def specify(self, *newargs, **new_keywords) -> 'Command':
"""Derive a new version of this function by applying additional
arguments.
If a provided keyword argument conflicts with a prior argument,
the prior argument will be overriden.
"""
args = self.args + tuple(newargs)
keywords = self.keywords.copy()
keywords.update(new_keywords)
new_cmd = Command(self.func, *args, **keywords)
# propagate the name and source
new_cmd.name = self.name
new_cmd.label = self.label
# note that a new filter is not created, so any changes to the
# old NewCommand will change to the old Command, and visa versa
new_cmd.filter = self.filter
return new_cmd
def __str__(self):
"""returns the name of this command
if no name is provided, func.__name__ is used
"""
if self.name is None:
return self.func.__name__
return self.name
def help_entry(self) -> str:
"""return a help message for this command"""
if self.label is not None:
return f"{self} [from {self.label}]:\n{self.__doc__}"
return f"{self}:\n{self.__doc__}"
@staticmethod
def with_traits(name: str = None, label: str = None,
filter: Filter = None):
"""decorator to easily wrap a function additional traits
[name] = to invoke this Command, the Character must use [name]
instead of the function's name
[label] = the type of the command. (Affects help menu.)
[filter] = if provided, determine which Characters / Classes
are permitted to use this command. """
def decorator(func):
cmd = Command(func)
cmd.name = name
cmd.label = label
if filter is not None:
cmd.filter = filter
return cmd
return decorator
class CharacterClass(type):
"""metaclass establishing basic Character behaviors
CharacterClasses include the following important attributes:
- classname: how the class appears to the players
- frequency: how often will new players spawn as this class
- command_label: how commands from this class appear in help menu
"""
def __init__(cls, name, bases, namespace):
# add the proper name, if not already provided
if "classname" not in namespace:
cls.classname = util.camel_to_space(name)
# add a frequency field, if not already provided
if "frequency" not in namespace:
cls.frequency = 1
# add a "command_label", if not already provided
# this field is used in creating help menus
if "command_label" not in namespace:
cls.command_label = f"{cls} Commands"
# commands that were implemented for this class
cls._local_commands = {}
for value in namespace.values():
if isinstance(value, Command):
value.label = cls.command_label
cls._local_commands[str(value)] = value
# all commands, with the most recent commands exposed
cls._commands = {}
for base in reversed(cls.__mro__):
if not isinstance(base, CharacterClass):
continue
cls._commands.update(base._local_commands)
cls._commands.update(cls._local_commands)
# calling the super init
super().__init__(name, bases, namespace)
def __str__(cls):
"""overriding str to return classname"""
return cls.classname
class Character(metaclass=CharacterClass):
"""Base class for all other CharacterClasses"""
# How this class appears to players
classname = "Default Character"
# Starting location for this class
starting_location = None
# Commands from this class will be labeled "Default Commands"
command_label = "Default Commands"
# Valid equip slots for characters of this class
equip_slots = []
def __init__(self, name=None):
super().__init__()
self._name = name
self.location = None
self.msgs = asyncio.Queue()
# build dict from Commands collected by CharacterClass
self.cmd_dict = ShadowDict()
for (name, cmd) in self._commands.items():
cmd = cmd.specify(self)
# add command only if filter permits it
if cmd.filter.permits(self):
self.cmd_dict[name] = cmd
# because sCommands are not bound properly like a normal
# method, we must manually bind the methods
# TODO: override getattribute__ to solve the super() issue?
if isinstance(getattr(self, cmd.func.__name__), Command):
setattr(self, cmd.func.__name__, cmd)
# set up inventory and equipping items
self.inv = inv.Inventory()
self.equip_dict = inv.EquipTarget.make_dict(*self.equip_slots)
# put character in default command parsing mode
self._parser = self._command_parser
def message(self, msg):
"""send a message to the controller of this character"""
# place a
self.msgs.put_nowait(msg)
def command(self, msg):
"""issue 'msg' to character.
character will parse 'msg' using its current parser."""
if msg:
self._parser(msg)
def update(self):
"""periodically called method that updates character state"""
print(f"[{self}] received update")
def spawn(self, spawn_location):
"""Send a greeting to the character and put them into a
name-selection mode.
[spawn_location]: where the character should spawn after a name
is submitted.
"""
self.message(f"Welcome to our SwampyMud! You are a {type(self)}")
self.message(f"What should we call you?")
# set player location to spawn_location, but do not MOVE them
# thus, player will not be available to attack
self.location = spawn_location
self._parser = self._join_parser
def despawn(self):
"""method executed when a player dies"""
self.message("You died.")
if self.location is not None:
self.location.message(f"{self} died.", exclude={self})
try:
self.location.characters.remove(self)
except ValueError:
pass
self.location = None
self._parser = self._dead_parser
# default user-input parsers
def _join_parser(self, new_name: str):
"""Parser for a newly joined player, used for selecting a valid
name"""
if len(new_name) < 2:
self.message("Names must have at least 2 characters.")
return
if not new_name.isalnum():
self.message("Names must be alphanumeric.")
return
# TODO: perform some kind of check to prevent players
# from having the same name?
self._name = new_name
# move the player to the actual location they should be in
loc = self.location
self.location = None
self.set_location(loc)
self._parser = self._command_parser
def _command_parser(self, line: str):
"""The default parser for a player. Parses"""
# command is always the first word
args = line.split()
cmd_name = args[0]
if not cmd_name in self.cmd_dict:
self.message("Command \'%s\' not recognized." % cmd_name)
return
cmd = self.cmd_dict[cmd_name]
cmd(args)
def _dead_parser(self, line: str):
"""Parser used when a character has died"""
self.message("You have died. Reconnect to this server to start"
" as a new character.")
# string-formatting methods
def __repr__(self):
"""return a representation of Character"""
if self._name is None:
return f"{type(self).__name__}()"
return f"{type(self).__name__}(name={self})"
def __str__(self):
"""return the Character's name"""
if self._name:
return self._name
return "[nameless character]"
def view(self):
"""return a longer, user-focused depiction of Character"""
if self._name is None:
return f"A nameless {type(self)}"
return f"{self._name} the {type(self)}"
#location manipulation methods
def set_location(self, new_location):
"""sets location, updating the previous and new locations as
necessary and gathering commands from any entities in the
location
"""
try:
self.location.characters.remove(self)
# remove commands from all the entities
# in the current location
for entity in self.location.entities:
entity.on_exit(self)
entity.remove_cmds(self)
except AttributeError:
# location was none
pass
self.location = new_location
self.location.add_char(self)
# add commands from all the entities
# in the current locations
for entity in new_location.entities:
entity.on_enter(self)
entity.add_cmds(self)
#inventory/item related methods
def add_item(self, item, amt=1):
"""add [item] to player's inventory"""
# if the item is an ItemStack, unpack it first
if isinstance(item, inv.ItemStack):
self.inv.add_item(item.copy(), item.amount)
self.inv.add_item(item, amt)
def equip(self, item, from_inv=True):
"""place [item] in this player's equip dict
[item]: item to Equip
[from_inv]: if True, [item] should be removed from inventory
first. If item is not found in inventory, the command fails.
if False, [item] is not removed from inventory and will not
be returned to inventory upon unequip.
"""
# duck test that the item is even equippable
try:
target = item.target
except AttributeError:
self.message(f"{item} cannot be equipped.")
return
if target in self.equip_dict:
# check remove_inv, if true, remove item from inventory
# this avoids duplication
if from_inv:
try:
self.inv.remove_item(item)
# item not found
except KeyError:
self.message(f"Cannot equip {item}-"
"not found in inventory.")
return
# check for an already equipped weapon, unequip it
if self.equip_dict[target] is not None:
self.unequip(target)
item.on_equip(self)
item.add_cmds(self)
self.equip_dict[item.target] = item, from_inv
# class doesn't have an equip target for this item, cannot equip
else:
self.message(f"Cannot equip item {item} to {target}.")
return
def unequip(self, target):
"""updates this character's equip_dict such that the [target]
is set to None and any item at that position is unequipped
[target]: an EquipTarget"""
# test if anything is even equipped
# also duck test to see if this character even has [target]
# in its equip slots
try:
if self.equip_dict[target] is None:
self.message(f"No item equipped on target {target}.")
return
except KeyError:
self.message(f"{type(self)} does not possess"
f" equip slot '{target}'.")
return
equipped, from_inv = self.equip_dict[target]
equipped.on_unequip(self)
equipped.remove_cmds(self)
self.equip_dict[target] = None
# if item was from character's inventory, return it
if from_inv:
self.inv.add_item(equipped)
# default commands
@Command
def help(self, args):
"""Show relevant help information for a particular command.
usage: help [command]
If no command is supplied, a list of all commands is shown.
"""
if len(args) < 2:
# TODO: cache this or something
menu = self.help_menu()
self.message(menu)
else:
name = args[1]
try:
self.message(self.cmd_dict[name].help_entry())
except KeyError:
self.message(f"Command '{name}' not recognized.")
@Command
def look(self, args):
"""Gives a description of your current location.
usage: look
"""
# TODO: update to allow players to 'inspect' certain objects
self.message(self.location.view())
@Command
def say(self, args):
"""Send a message to all players in your current location.
usage: say [msg]
"""
msg = ' '.join(args[1:])
if msg and self.location is not None:
self.location.message(f"{self.view()}: {msg}")
@Command
def go(self, args):
"""Go to an accessible location.
usage: go [exit name]
"""
ex_name = " ".join(args[1:])
# Manually iterating over our location's list of exits
# Note! If writing your own method, just do
# util.find(location, ex_name, location.Exit, char=my_char)
# I'm only writing this to avoid a cyclic dependency.
for ex in self.location._exit_list:
if ex_name in ex.names:
found_exit = ex
break
else:
self.message(f"No exit with name '{ex_name}'.")
return
if found_exit.interact.permits(self):
old_location = self.location
new_location = found_exit.destination
new_location.message(f"{self} entered.")
self.set_location(new_location)
# TODO: only show the exit if a character can see it?
old_location.message(f"{self} left through exit "
f"'{ex_name}'.")
else:
if found_exit.perceive.permits(self):
self.message(f"Exit '{ex_name}' is inaccessible to you.")
# if the char can't see or interact with the exit,
# we lie to them and pretend like it doesn't exist
else:
self.message(f"No exit with name '{ex_name}'.")
@Command.with_traits(name="equip")
def cmd_equip(self, args):
"""Equip an equippable item from your inventory."""
if len(args) < 2:
self.message("Provide an item to equip.")
return
item_name = " ".join(args[1::]).lower()
found_items = util.find(self.inv, name=item_name)
if len(found_items) == 1:
self.equip(found_items[0][0])
elif len(found_items) > 1:
#TODO handle ambiguity
self.message(f"Ambigious item name. Results={found_items}")
else:
self.message(f"Could not find item '{item_name}'.")
@Command.with_traits(name="unequip")
def cmd_unequip(self, args):
"""Unequip an equipped item.
Usage: unequip [item]"""
if len(args) < 2:
self.message("Provide an item to equip.")
return
item_name = " ".join(args[1::]).lower()
# search through the items in the equip_dict
found_items = []
for _, equip_data in self.equip_dict.items():
if equip_data is None:
continue
item, _ = equip_data
if str(item).lower() == item_name:
found_items.append(item)
if len(found_items) == 1:
self.unequip(found_items[0].target)
elif len(found_items) > 1:
#TODO handle ambiguity
self.message(f"Ambigious item name. Results={found_items}")
else:
self.message(f"Could not find equipped item '{item_name}'.")
@Command
def pickup(self, args):
"""Pick up item from the environment."""
if len(args) < 2:
self.message("Provide an item to pick up.")
return
item_name = " ".join(args[1::]).lower()
# TODO: find a way to provide type=Item
found_items = util.find(self.location, name=item_name)
if len(found_items) == 1:
item = found_items[0][0]
self.location.inv.remove_item(item)
self.inv.add_item(item)
elif len(found_items) > 1:
#TODO handle ambiguity
self.message(f"Ambigious item name. Results={found_items}")
else:
self.message(f"Could not find item '{item_name}' to pick up.")
@Command
def drop(self, args):
"""Drop an item into the environment"""
if len(args) < 2:
self.message("Provide an item to drop.")
return
item_name = " ".join(args[1:]).lower()
found_items = util.find(self.inv, name=item_name)
if len(found_items) == 1:
item = found_items[0][0]
self.inv.remove_item(item)
self.location.inv.add_item(item)
elif len(found_items) > 1:
#TODO handle ambiguity
self.message(f"Ambigious item name. Results={found_items}")
else:
self.message(f"Could not find item '{item_name}' to drop.")
@Command.with_traits(name="inv")
def cmd_inv(self, args):
"""Show your inventory.
usage: inv"""
# create a string representation of the equipped items
equipped = []
for target, item in self.equip_dict.items():
if item is None:
equipped.append(f"{target}: none")
else:
equipped.append(f"{target}: {item[0]}")
equipped.sort()
self.message("\n".join(equipped))
inv_msg = self.inv.readable()
# only send a message if inv has items
if inv_msg:
self.message(inv_msg)
@Command.with_traits(name="use")
def cmd_use(self, args):
""" Use an item.
usage: use [item] [options for item]
Options may vary per item.
"""
# TODO: allow players to use accessible items in location?
if len(args) < 2:
self.message("Please specify an item.")
return
item_name = args[1]
found_items = util.find(self.inv, name=item_name)
if len(found_items) == 1:
item = found_items[0][0]
self.inv.remove_item(item)
item.on_use(self, args[2:])
# replace the item
self.inv.add_item(item)
elif len(found_items) > 1:
#TODO handle ambiguity
self.message(f"Ambigious item name. Results={found_items}")
else:
self.message(f"Could not find item '{item_name}' to use.")
# miscellaneous methods
def help_menu(self) -> str:
sources = {}
# walk the mro, to get the list of CharacterClasses in order
for cls in reversed(type(self).__mro__):
if isinstance(cls, CharacterClass):
sources[cls.command_label] = []
for name, cmd in self.cmd_dict.items():
try:
sources[cmd.label].append(name)
except KeyError:
sources[cmd.label] = [name]
# unpack the dictionary in reverse order
output = []
while sources:
source, names = sources.popitem()
output.append(f"---{source}---")
output.append(" ".join(names))
return "\n".join(output)
# serialization-related methods
@property
def symbol(self):
"""return a unique symbol for this Character"""
# failsafe to ensure that Character always has a symbol
# even if someone forgets to set self._symbol in the __init__
if not hasattr(self, "_symbol"):
symbol = "{}#{}".format(type(self).__name__,
util.to_base(id(self), 62))
setattr(self, "_symbol", symbol)
return self._symbol
@classmethod
def load(cls, data):
name = data["name"] if "name" in data else None
return cls(name)
def post_load(self, data):
pass
def save(self):
"""return a pythonic representation of this Character"""
return {"_type": type(self), "name": self._name}
def children(self):
"""pass"""
return []
#TODO: handle items here
| 2.59375 | 3 |
app/front/tree/home/api/indicators/route.py | jgphilpott/polyplot | 5 | 12797401 | <filename>app/front/tree/home/api/indicators/route.py
from ast import literal_eval
from flask import jsonify, request
from back.mongo.data.collect.indicators.mongo import find_indicators
def register_api_indicators_route(app):
@app.route("/api/indicators")
def api_indicators():
query = literal_eval(request.args.get("query")) if "query" in request.args else {"countries": {"$exists": True, "$ne": []}, "completeness": {"$gt": 0}}
filter = literal_eval(request.args.get("filter")) if "filter" in request.args else {"countries": 0}
sort = literal_eval(request.args.get("sort")) if "sort" in request.args else [("completeness", -1), ("name", 1)]
limit = literal_eval(request.args.get("limit")) if "limit" in request.args else 100
filter["_id"] = 0
data = find_indicators(query, filter, sort, limit)
return jsonify(data)
| 2.421875 | 2 |
mpunet/errors/deprecated_warnings.py | alexsosn/MultiPlanarUNet | 156 | 12797402 | from mpunet.logging import ScreenLogger
def warn_sparse_param(logger):
logger = logger or ScreenLogger
sparse_err = "mpunet 0.1.3 or higher requires integer targets" \
" as opposed to one-hot encoded targets. Setting the 'sparse'" \
" parameter no longer has any effect and may not be allowed" \
" in future versions."
logger.warn(sparse_err)
| 2.25 | 2 |
cisco_ucs_vlan_cli_interface.py | drew-russell/Cisco-UCS-VLAN-Management | 3 | 12797403 | """
A Python CLI interface that utilizes the Cisco UCS SDK to:
- Connect to a UCSM domain
- View and Add VLANs
- Add a VLAN to a vNIC
Please note that there is very little error handling present so
proceed accordingly.
"""
from UcsSdk import UcsHandle
from UcsSdk import UcsUtils
from UcsSdk.MoMeta.FabricVlan import FabricVlan
from UcsSdk.MoMeta.VnicLanConnTempl import VnicLanConnTempl
from UcsSdk.MoMeta.OrgOrg import OrgOrg
import sys
import warnings
'''
Supress the following warning message which does not affect funcationality:
/Library/Python/2.7/site-packages/UcsSdk/UcsBase.py:1064: UserWarning: [Warning]: AddManagedObject [Description]:Expected Naming Property Name for ClassId VnicLanConnTempl not found
warnings.warn(string)
'''
warnings.filterwarnings("ignore")
def ssl_workaround():
""" Workaround for SSL certification error that prevents proper
UCS domain login when using Python 2.7 or higher. Credit to user <NAME>
(ragupta4) on the Cisco UCS Communities """
is_verify_certificate = False
if not sys.version_info < (2, 6):
from functools import partial
import ssl
ssl.wrap_socket = partial(
ssl.wrap_socket, ssl_version=ssl.PROTOCOL_TLSv1)
if not sys.version_info < (2, 7, 9) and not is_verify_certificate:
ssl._create_default_https_context = ssl._create_unverified_context
def connect():
""" Establish a connection to the UCS Domain """
HANDLE.Login(IP_ADDRESS, USERNAME, PASSWORD)
def current_vlans():
""" Get a list of all current VLANs """
current_vlans.list = {}
obj = HANDLE.GetManagedObject(None, FabricVlan.ClassId())
if obj != None:
for mo in obj:
for prop in UcsUtils.GetUcsPropertyMetaAttributeList(mo.propMoMeta.name):
if str(prop) == "Name":
vlan_name = mo.getattr(prop)
if str(prop) == "Id":
vlan_id = mo.getattr(prop)
current_vlans.list.update({vlan_name: vlan_id})
def add_vlans():
""" Create new VLANs on UCS. """
print ''
add_vlans.confirm_new_vlan = raw_input(
'Would you like to add a new VLAN? (yes/no): ')
while add_vlans.confirm_new_vlan not in ['yes', 'y', 'no', 'n']:
print ''
print '*** Error: Please enter either "yes" or "no". ***'
print ''
add_vlans.confirm_new_vlan = raw_input(
'Would you like to add a new VLAN? (yes/no): ')
if add_vlans.confirm_new_vlan not in ['no' or 'n']:
print
add_vlans.vlan_name = raw_input('Enter the VLAN Name: ')
vlan_id = raw_input('Enter the VLAN ID: ')
obj = HANDLE.GetManagedObject(None, None, {"Dn": "fabric/lan"})
HANDLE.AddManagedObject(obj, "fabricVlan", {"DefaultNet": "no", "PubNwName": "", "Dn": "fabric/lan/net-{}".format(add_vlans.vlan_name), "PolicyOwner": "local",
"CompressionType": "included", "Name": "{}".format(add_vlans.vlan_name), "Sharing": "none", "McastPolicyName": "", "Id": "{}".format(vlan_id)})
current_vlans()
for key, value in current_vlans.list.items():
if add_vlans.vlan_name in str(key):
print ''
print 'The following VLAN has been created: '
print ''
print '- ' + key + ' (' + value + ')'
def current_vnic_templates():
""" Get a list of current vNICs in UCS """
current_vnic_templates.list = []
obj = HANDLE.GetManagedObject(None, VnicLanConnTempl.ClassId())
if obj != None:
for mo in obj:
for prop in UcsUtils.GetUcsPropertyMetaAttributeList(mo.propMoMeta.name):
if str(prop) == "Name":
vnic_template_name = mo.getattr(prop)
current_vnic_templates.list.append(vnic_template_name)
def current_orgs():
""" Get a list of the current organizations in UCS which will be used in
add_vlan_to_vnic """
current_orgs.list = []
obj = HANDLE.GetManagedObject(None, OrgOrg.ClassId())
if obj != None:
for mo in obj:
for prop in UcsUtils.GetUcsPropertyMetaAttributeList(mo.propMoMeta.name):
if str(prop) == "Dn":
org_name = mo.getattr(prop)
current_orgs.list.append(org_name)
current_orgs.list.remove('org-root')
def add_vlan_to_vnic():
""" Add a VLAN to a vNIC template """
print
add_vlan_to_vnic.vnic_name = raw_input('vNIC Template Name: ')
print ''
obj = HANDLE.GetManagedObject(None, VnicLanConnTempl.ClassId(), {
VnicLanConnTempl.RN: "lan-conn-templ-{}".format(add_vlan_to_vnic.vnic_name)})
if obj != None:
for mo in obj:
for prop in UcsUtils.GetUcsPropertyMetaAttributeList(mo.propMoMeta.name):
if str(prop) == "Dn":
dn = mo.getattr(prop)
for org in current_orgs.list:
if org in mo.getattr(prop):
organization = org
else:
organization = "org-root"
if str(prop) == "IdentPoolName":
ident_pool_name = mo.getattr(prop)
if str(prop) == "QosPolicyName":
qos_policy_name = mo.getattr(prop)
if str(prop) == "Descr":
descr = mo.getattr(prop)
if str(prop) == "PolicyOwner":
policy_owner = mo.getattr(prop)
if str(prop) == "NwCtrlPolicyName":
nw_ctrl_policy_name = mo.getattr(prop)
if str(prop) == "TemplType":
templ_type = mo.getattr(prop)
if str(prop) == "StatsPolicyName":
stats_policy_name = mo.getattr(prop)
if str(prop) == "Mtu":
mtu = mo.getattr(prop)
if str(prop) == "PinToGroupName":
pin_to_group_name = mo.getattr(prop)
if str(prop) == "SwitchId":
switch_id = mo.getattr(prop)
HANDLE.StartTransaction()
vnic_obj = HANDLE.GetManagedObject(
None, None, {"Dn": "{}".format(organization)})
mo = HANDLE.AddManagedObject(vnic_obj, "vnicLanConnTempl",
{"IdentPoolName": "{}".format(ident_pool_name),
"Dn": "{}".format(dn),
"QosPolicyName": "{}".format(qos_policy_name),
"Descr": "{}".format(descr),
"PolicyOwner": "{}".format(policy_owner),
"NwCtrlPolicyName": "{}".format(nw_ctrl_policy_name),
"TemplType": "{}".format(templ_type),
"StatsPolicyName": "{}".format(stats_policy_name),
"Mtu": "{}".format(mtu),
"PinToGroupName": "{}".format(pin_to_group_name),
"SwitchId": "{}".format(switch_id)}, True)
mo_1 = HANDLE.AddManagedObject(mo, "vnicEtherIf", {
"DefaultNet": "no",
"Name": "{}".format(add_vlans.vlan_name),
"Dn": "{}/if-{}".format(dn, add_vlans.vlan_name)}, True)
HANDLE.CompleteTransaction()
ssl_workaround()
IP_ADDRESS = ""
USERNAME = ""
PASSWORD = ""
HANDLE = UcsHandle()
HANDLE = UcsHandle()
connect()
print ''
print "Cisco UCS Manager VLAN Management"
print ''
print 'Current VLANs:'
current_vlans()
print ''
for name, ID in current_vlans.list.iteritems():
print '- ' + name + ' (' + ID + ')'
add_vlans()
if add_vlans.confirm_new_vlan not in ['no' or 'n']:
print ''
print 'Current vNIC Templates: '
print ''
current_vnic_templates()
for name in current_vnic_templates.list:
print '- ' + name
print ''
confirm_add_vlan = raw_input(
"Would you like to add the " + '"' + add_vlans.vlan_name + '"' + " VLAN to a vNIC template? (yes/no): ")
while confirm_add_vlan not in ['yes', 'y', 'no', 'n']:
print ''
print '*** Error: Please enter either "yes" or "no". ***'
print ''
cconfirm_add_vlan = raw_input(
"Would you like to add the " + '"' + add_vlans.vlan_name + '"' + " VLAN to a vNIC template? (yes/no): ")
if confirm_add_vlan not in ['no' or 'n']:
current_orgs()
add_vlan_to_vnic()
print ("The " + '"' + add_vlans.vlan_name + '"' + " VLAN has been added to " '"' +
add_vlan_to_vnic.vnic_name + '"' + " vNIC template.")
print
else:
print
HANDLE.Logout()
| 2.5 | 2 |
gridvm/network/protocol/packet/__init__.py | GeorgeTG/gridvm | 0 | 12797404 | <gh_stars>0
from .ptype import PacketType
from .header import PacketHeader
from .packet import Packet
from .factory import *
| 1.164063 | 1 |
setup.py | sauzher/Products.LDAPMultiPlugins | 0 | 12797405 | <filename>setup.py
import os
from setuptools import setup
from setuptools import find_packages
NAME = 'LDAPMultiPlugins'
here = os.path.abspath(os.path.dirname(__file__))
package = os.path.join(here, 'Products', NAME)
def _read(name):
f = open(os.path.join(package, name))
return f.read()
_boundary = '\n' + ('-' * 60) + '\n\n'
setup(name='Products.%s' % NAME,
version=_read('VERSION.txt').strip(),
description='LDAP-backed plugins for the Zope2 PluggableAuthService',
long_description=( _read('README.txt')
+ _boundary
+ _read('CHANGES.txt')
+ _boundary
+ "Download\n========"
),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Zope2",
"Intended Audience :: Developers",
"License :: OSI Approved :: Zope Public License",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP :: Site Management",
"Topic :: Software Development",
"Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP",
],
keywords='web application server zope zope2 ldap',
author="<NAME> and contributors",
author_email="<EMAIL>",
url="http://pypi.python.org/pypi/Products.%s" % NAME,
license="ZPL 2.1 (http://www.zope.org/Resources/License/ZPL-2.1)",
packages=find_packages(),
include_package_data=True,
namespace_packages=['Products'],
zip_safe=False,
install_requires=[
#Zope >= 2.9,
'setuptools',
'python-ldap >= 2.0.6',
'Products.LDAPUserFolder >= 2.9',
'Products.PluggableAuthService >= 1.4.0',
],
extras_require={
'exportimport': [
# Zope >= 2.10.0
'Products.GenericSetup >= 1.4.0'
]
},
entry_points="""
[zope2.initialize]
Products.%s = Products.%s:initialize
""" % (NAME, NAME),
)
| 1.71875 | 2 |
main.py | stspbu/code-change-miner | 1 | 12797406 | import ast
import os
import pickle
import sys
import stackimpact
import datetime
import argparse
import multiprocessing
from log import logger
from patterns import Miner
from patterns.models import Fragment, Pattern
from vcs.traverse import GitAnalyzer, RepoInfo, Method
import pyflowgraph
import changegraph
import settings
class RunModes:
BUILD_PY_FLOW_GRAPH = 'pfg'
BUILD_CHANGE_GRAPH = 'cg'
COLLECT_CHANGE_GRAPHS = 'collect-cgs'
MINE_PATTERNS = 'patterns'
ALL = [BUILD_PY_FLOW_GRAPH, BUILD_CHANGE_GRAPH, COLLECT_CHANGE_GRAPHS, MINE_PATTERNS]
def main():
logger.info('------------------------------ Starting ------------------------------')
if settings.get('use_stackimpact', required=False):
_ = stackimpact.start(
agent_key=settings.get('stackimpact_agent_key'),
app_name='CodeChangesMiner',
debug=True,
app_version=str(datetime.datetime.now())
)
sys.setrecursionlimit(2**31-1)
multiprocessing.set_start_method('spawn', force=True)
parser = argparse.ArgumentParser()
parser.add_argument('mode', help=f'One of {RunModes.ALL}', type=str)
args, _ = parser.parse_known_args()
current_mode = args.mode
if current_mode == RunModes.BUILD_PY_FLOW_GRAPH:
parser.add_argument('-i', '--input', help='Path to source code file', type=str, required=True)
parser.add_argument('-o', '--output', help='Path to output file', type=str, default='pyflowgraph.dot')
parser.add_argument('--no-closure', action='store_true')
parser.add_argument('--show-deps', action='store_true')
parser.add_argument('--hide-op-kinds', action='store_true')
parser.add_argument('--show-data-keys', action='store_true')
args = parser.parse_args()
fg = pyflowgraph.build_from_file(
args.input, show_dependencies=args.show_deps, build_closure=not args.no_closure)
pyflowgraph.export_graph_image(
fg, args.output, show_op_kinds=not args.hide_op_kinds, show_data_keys=args.show_data_keys)
elif current_mode == RunModes.BUILD_CHANGE_GRAPH:
parser.add_argument('-s', '--src', help='Path to source code before changes', type=str, required=True)
parser.add_argument('-d', '--dest', help='Path to source code after changes', type=str, required=True)
parser.add_argument('-o', '--output', help='Path to output file', type=str, default='changegraph.dot')
args = parser.parse_args()
fg = changegraph.build_from_files(args.src, args.dest)
changegraph.export_graph_image(fg, args.output)
elif current_mode == RunModes.COLLECT_CHANGE_GRAPHS:
GitAnalyzer().build_change_graphs()
elif current_mode == RunModes.MINE_PATTERNS:
parser.add_argument('-s', '--src', help='Path to source code before changes', type=str, nargs='+')
parser.add_argument('-d', '--dest', help='Path to source code after changes', type=str, nargs='+')
parser.add_argument('--fake-mining', action='store_true')
args = parser.parse_args()
if args.src or args.dest or args.fake_mining:
if not args.src or len(args.src) != len(args.dest):
raise ValueError('src and dest have different size or unset')
change_graphs = []
for old_path, new_path in zip(args.src, args.dest):
methods = []
for n, path in enumerate([old_path, new_path]):
with open(path, 'r+') as f:
src = f.read()
methods.append(Method(path, 'test_name', ast.parse(src, mode='exec').body[0], src))
mock_commit_dtm = datetime.datetime.now(tz=datetime.timezone.utc)
repo_info = RepoInfo(
'mock repo path', 'mock repo name', 'mock repo url', 'mock hash', mock_commit_dtm,
'mock old file path', 'mock new file path', methods[0], methods[1])
cg = changegraph.build_from_files(old_path, new_path, repo_info=repo_info)
change_graphs.append(cg)
miner = Miner()
if args.fake_mining:
for cg in change_graphs:
fragment = Fragment()
fragment.graph = cg
fragment.nodes = cg.nodes
pattern = Pattern([fragment])
miner.add_pattern(pattern)
else:
miner.mine_patterns(change_graphs)
miner.print_patterns()
else:
storage_dir = settings.get('change_graphs_storage_dir')
file_names = os.listdir(storage_dir)
logger.warning(f'Found {len(file_names)} files in storage directory')
change_graphs = []
for file_num, file_name in enumerate(file_names):
file_path = os.path.join(storage_dir, file_name)
try:
with open(file_path, 'rb') as f:
graphs = pickle.load(f)
for graph in graphs:
change_graphs.append(pickle.loads(graph))
except:
logger.warning(f'Incorrect file {file_path}')
if file_num % 1000 == 0:
logger.warning(f'Loaded [{1+file_num}/{len(file_names)}] files')
logger.warning('Pattern mining has started')
miner = Miner()
try:
miner.mine_patterns(change_graphs)
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt: mined patterns will be stored before exit')
miner.print_patterns()
else:
raise ValueError
if __name__ == '__main__':
main()
| 2.109375 | 2 |
distribution/monty.py | proprefenetre/distributions | 0 | 12797407 | <filename>distribution/monty.py
from distribution import Distribution, DistGroup
# Monty hall problem
# you have chosen door A. Do you switch to win a car?
# D:
# Monty opens door B _and_ there is no car there
# H:
# 1: the car is behind door A:
# p(open A): 0 (we picked A)
# p(open B): .5
# p(open C): .5
hypo_1 = Distribution(name='1')
hypo_1.update({'A': 0, 'B': 0.5, 'C': 0.5})
# 2: the car is behind door B:
# p(open A): 0
# p(open B): 0
# p(open C): 1
hypo_2 = Distribution(name='2')
hypo_2.update({'A': 0, 'B': 0, 'C': 1})
# 3: the car is behind door C:
# p(open A): 0
# p(open B): 1
# p(open C): 0
hypo_3 = Distribution(name='3')
hypo_3.update({'A': 0, 'B': 1, 'C': 0})
# Priors
doors = DistGroup([hypo_1, hypo_2, hypo_3])
print('{:^5} {:<12} {:<12} {:<12} {:<12}'.format(' ', 'P(H)', 'P(D|H)', 'P(H)*P(D|H)', 'P(H|D)'))
for d in doors:
prior = doors.P(d.name)
likelihood = d.P('B')
print('{:^5} {:<12.5f} {:<12.5f} {:<12.5f} {:<12.5f}'.format(d.name,
prior,
likelihood,
prior * likelihood,
prior * likelihood / doors.normalizer('B')))
| 3.1875 | 3 |
morphium/ia.py | pudo/morphium | 1 | 12797408 | <reponame>pudo/morphium
import os
import logging
import boto
from boto.s3.connection import OrdinaryCallingFormat
import mimetypes
from datetime import datetime
from morphium.util import env, TAG_LATEST
log = logging.getLogger(__name__)
config = {}
class InternetArchive(object):
"""A scraper archive on the internet archive. This is called when a
scraper has generated a file which needs to be backed up to a
bucket."""
def __init__(self, item=None, prefix=None):
self.tag = datetime.utcnow().date().isoformat()
self.item = item or env('ia_item')
self.prefix = prefix
self.access_key = env('ia_access_key_id')
self.secret_key = env('ia_secret_access_key')
@property
def bucket(self):
if not hasattr(self, '_bucket'):
config = self.item is not None
config = config and self.access_key is not None
config = config and self.secret_key is not None
if not config:
log.warning("No Internet Archive config, skipping upload.")
self._client = None
return None
conn = boto.connect_s3(self.access_key, self.secret_key,
host='s3.us.archive.org',
is_secure=False,
calling_format=OrdinaryCallingFormat())
if not conn.lookup(self.item, validate=False):
conn.create_bucket(self.item)
self._bucket = conn.get_bucket(self.item)
return self._bucket
def upload_file(self, source_path, file_name=None, mime_type=None):
"""Upload a file to the given bucket."""
if self.bucket is None:
return
if file_name is None:
file_name = os.path.basename(source_path)
if mime_type is None:
mime_type, _ = mimetypes.guess_type(file_name)
mime_type = mime_type or 'application/octet-stream'
date_name = os.path.join(self.tag, file_name)
copy_name = os.path.join(TAG_LATEST, file_name)
for key_name in (date_name, copy_name):
if self.prefix is not None:
key_name = os.path.join(self.prefix, key_name)
log.info("Uploading [%s]: %s", self.item, key_name)
key = self.bucket.get_key(key_name)
if key is None:
key = self.bucket.new_key(key_name)
key.content_type = mime_type
key.set_contents_from_filename(source_path,
policy='public-read')
return key.generate_url(84600, query_auth=False)
| 2.265625 | 2 |
SCRAPERS/pricegrabber.py | tgtads/yahooFinanceEventStudy | 0 | 12797409 | #!/usr/bin/python3
# DESCRIPTION
# An efficient python script that reads a line separated list of stock symbols
# with optional start and end dates for range and saves the relevant daily
# volume and adjusted closing prices from Yahoo Finance.
# a logfile is also created to summarise the outcome in terms of available data
# please note that yahoo will return a different payload than expected if
# the start or end dates requested do not match global calendar dates,
# such as 2015-06-31
# I leave it to the user to check for this.
# for usage, use -h
import argparse
import urllib.request
import re
import csv
import os
from collections import defaultdict
# read a csv file and return dictionary objects
def get_csv_dict(path):
dictObjs = []
with open(path) as fileObj:
# assuming the first line is a header
header = csv.reader(fileObj, delimiter=",", quotechar='"').__next__()
for line in csv.DictReader(fileObj, fieldnames=header):
dictObjs.append(line)
return dictObjs
# why not convert to a list of objects than create and append?
# if it does not exist, it must be created
def init_folder(path):
if os.path.exists(path):
if os.path.isdir(path):
return True
else:
print("File [%s] will not be overwritten" % path)
return False
else:
try:
os.makedirs(path)
return True
except FileExistsError as e:
print("File Error [%s] with [%s]" % (e, path))
return False
# forming urls specifically for the yahoo service
def form_url(symbol, start="", end="", frequency="d"):
# check format, adjust month number, format to string
# or leave blank if does not conform
if re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", start):
dateInts = [int(d) for d in re.split("-", start)]
dateInts[1] -= 1
startDForm = "&c=%d&a=%d&b=%d" % tuple(dateInts)
else:
startDForm = ""
if re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", end):
dateInts = [int(d) for d in re.split("-", end)]
dateInts[1] -= 1
endDForm = "&f=%d&d=%d&e=%d" % tuple(dateInts)
else:
endDForm = ""
url = ("http://real-chart.finance.yahoo.com/table.csv" +
"?s=" + symbol + endDForm + "&g=" + frequency + startDForm +
"&ignore=.csv")
return url
# cleanly return the results of a web request
def req(url):
try:
return urllib.request.urlopen(url)
except urllib.request.URLError as e:
print("HTTP Error [%s] with [%s]" % (e.code, url))
# return the http object contents in usable format
def read_decode(httpObj):
body = httpObj.read()
httpObj.close()
try:
return body.decode('utf-8') # required, but a bottleneck
except UnicodeDecodeError as e:
print("Decode Error [%s]" % e)
# reform provided payload items for legibility and remove irrelevant variables
def reform_payload(items):
# reversing the headed list into continual order
items.append(items[0])
items.reverse()
items.pop()
# rename the header fields
items[0] = re.sub("Date", "date", items[0])
items[0] = re.sub("Volume", "v", items[0])
items[0] = re.sub("Adj Close", "p", items[0])
# determine if the date format requires reformatting
reformDate = True if (re.search("^[0-9]{2}/[0-9]{2}/[0-9]{4},.*",
items[1])) else False
# for each line, split by comma, extract only the desired elements
for i in range(len(items)):
items[i] = re.sub(",[^,]*,[^,]*,[^,]*,[^,]*", "", items[i])
if reformDate:
items[i] = ("%s-%s-%s%s" % (items[i][6:10],
items[i][3:5],
items[i][0:2],
items[i][10:]))
return items
# write list items en masse to a file
def write_items(path, items, mode="at"):
with open(path, mode) as fileObj:
try:
for i in items:
fileObj.write("%s\n" % i)
finally:
fileObj.close()
# write a line of text to a file
def writeln(path, text, mode="at"):
with open(path, mode) as fileObj:
try:
fileObj.write("%s\n" % text)
return True
except:
print("File error: could not write [%s] to [%s]" % (text, path))
return False
finally:
fileObj.close()
# find unique items and preserve order. By <NAME>
def unique(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
# transform a nested dictionary into (unique key), (value) pairs
def reform_items(items):
s = []
for i in items:
s.append((i['symbol'], i['tradingDate']))
d = defaultdict(list)
for k, v in s:
d[k].append(v)
return d
# main section =========================================================
# start with parsing the arguments
argparser = argparse.ArgumentParser()
argparser.add_argument("items", type=str,
help=("CSV format items of stock symbols " +
"and dates of interest, YYYY-MM-DD format"))
argparser.add_argument("folder", type=str,
help=("The path to a folder to which stock " +
"price files are saved."))
argparser.add_argument("log", type=str,
help=("The path to a machine-readable logfile."))
argparser.add_argument("-s", "--startDate", type=str,
help=("Initial date sought for the range of " +
"time series data, YYYY-MM-DD format"))
argparser.add_argument("-e", "--endDate", type=str,
help=("Final date sought for the range of time " +
"series data, YYYY-MM-DD format"))
args = argparser.parse_args()
items = get_csv_dict(args.items)
initFolderSuccess = init_folder(args.folder)
initLogSuccess = writeln(path=args.log, mode="wt",
text="symbol,tradingDate,position,datapoints")
startDate = str(args.startDate) if args.startDate else ""
endDate = str(args.endDate) if args.endDate else ""
if items and initFolderSuccess and initLogSuccess:
uniqueSymbols = unique(list(i['symbol'] for i in items))
rItems = reform_items(items)
for symbol in uniqueSymbols:
print("Accessing %s" % symbol)
# get the raw payload
httpObj = req(form_url(symbol=symbol,
start=startDate,
end=endDate))
if httpObj:
# transform it to list items and check the number of rows
nData = 0
payload = re.split("\n", read_decode(httpObj))
if payload:
if payload[-1] == "":
payload.pop() # workaround for final \n on split
nData = len(payload) - 1
# write the reformed payload
rPayload = reform_payload(payload)
write_items(path=("%s/%s.csv" % (args.folder, symbol)),
items=rPayload,
mode="wt")
# get position of each tradingDate and write it to logfile
for tradingDate in rItems[symbol]:
position = ""
if rPayload:
pattern = re.compile(tradingDate)
for pos in range(len(rPayload)):
if not position:
if pattern.match(rPayload[pos]):
position = str(pos)
# perhaps it might be quicker to make a list of the results
# and use write_items instead?
writeln(path=args.log, mode="at",
text=("%s,%s,%s,%s" % (symbol,
tradingDate,
position,
str(nData))))
| 3.234375 | 3 |
tests/test_warnings.py | flashdagger/conmon | 0 | 12797410 | <reponame>flashdagger/conmon
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
import pytest
from conmon.warnings import Regex
output = [
"src/main/src/Em_FilteringQmFu.c: In function "
"\u2018Em_FilteringQmFu_processSensorSignals\u2019:",
"src/main/src/Em_FilteringQmFu.c:266:5: warning: implicit declaration of function "
"\u2018memset\u2019 [-Wimplicit-function-declaration]",
" memset(&reicevedSignals, 0, sizeof(reicevedSignals));",
" ^~~~~~",
"C:\\source_subfolder\\source\\common\\x86\\seaintegral.asm:92: warning: improperly calling "
"multi-line macro `SETUP_STACK_POINTER' with 0 parameters [-w+macro-params-legacy]",
"some text",
"In file included from C:\\conan\\data\\source_subfolder\\zutil.c:10:",
"C:\\conan\\data\\source_subfolder/gzguts.h(146,52): warning: extension used "
"[-Wlanguage-extension-token]",
"ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int));",
" ^",
"/build/source_subfolder/bzip2.c: In function ‘applySavedFileAttrToOutputFile’:",
"/build/source_subfolder/bzip2.c:1073:11: warning: ignoring return value of ‘fchown’, declared "
"with attribute warn_unused_result [-Wunused-result]",
" 1073 | (void) fchown ( fd, fileMetaInfo.st_uid, fileMetaInfo.st_gid );",
" | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~",
"/source_subfolder/src/constexp.y:35.1-25: warning: deprecated directive: ‘%name-prefix "
'"constexpYY"’, use ‘%define api.prefix {constexpYY}’ [-Wdeprecated]',
' 35 | %name-prefix "constexpYY"',
" | ^~~~~~~~~~~~~~~~~~~~~~~~~" " | %define api.prefix {constexpYY}",
"/source_subfolder/src/constexp.y: warning: fix-its can be applied. Rerun with option "
"'--update'. [-Wother]",
"/source_subfolder/common/socket_utils.cc(43): warning C4312: 'reinterpret_cast': conversion "
"from 'int' to 'HANDLE' of greater size",
"C:\\source_subfolder\\bzlib.c(1418,10): warning C4996: 'strcat': This function or variable "
"may be unsafe. Consider using strcat_s instead. To disable deprecation, use "
"_CRT_SECURE_NO_WARNINGS. See online help for details.",
' strcat(mode2,"b"); /* binary mode */',
" ^",
"Makefile.config:565: No sys/sdt.h found, no SDT events are defined, please install "
"systemtap-sdt-devel or systemtap-sdt-dev",
"CMake Warning:",
" Manually-specified variables were not used by the project:",
"",
" CMAKE_EXPORT_NO_PACKAGE_REGISTRY",
"",
"",
"libjpeg/1.2.3: WARN: package is corrupted",
"WARN: libmysqlclient/8.0.25: requirement openssl/1.1.1m "
"overridden by poco/1.11.1 to openssl/1.1.1l",
"In file included from ../../src/include/c.h:54,",
" from ../../src/include/postgres_fe.h:25,",
" from archive.c:19:",
"../../src/include/pg_config.h:772:24: warning: ISO C does not support \u2018__int128\u2019 "
"types [-Wpedantic]",
" 772 | #define PG_INT128_TYPE __int128",
" | ^~~~~~~~",
"configure: WARNING:",
"*** Without Bison you will not be able to build PostgreSQL from Git nor",
"*** change any of the parser definition files. You can obtain Bison from",
"*** a GNU mirror site. (If you are using the official distribution of",
"*** PostgreSQL then you do not need to worry about this, because the Bison",
"*** output is pre-generated.)",
"end",
"CMake Warning at cmake/ldap.cmake:158 (MESSAGE):",
" Could not find LDAP",
"Call Stack (most recent call first):",
" CMakeListsOriginal.txt:1351 (MYSQL_CHECK_LDAP)",
" CMakeLists.txt:7 (include)",
"",
"",
"CMake Warning at libmysql/authentication_ldap/CMakeLists.txt:30 (MESSAGE):",
" Skipping the LDAP client authentication plugin",
"",
"",
"In file included from /package/include/glib-2.0/gobject/gobject.h:24,",
" from /package/include/glib-2.0/gobject/gbinding.h:29,",
" from /package/include/glib-2.0/glib-object.h:22,",
" from ../source_subfolder/atk/atkobject.h:27,",
" from ../source_subfolder/atk/atk.h:25,",
" from ../source_subfolder/atk/atktext.c:22:",
"../source_subfolder/atk/atktext.c: In function \u2018atk_text_range_get_type_once\u2019:",
"../source_subfolder/atk/atktext.c:1640:52: warning: ISO C prohibits argument conversion to "
"union type [-Wpedantic]",
" 1640 | G_DEFINE_BOXED_TYPE (AtkTextRange, atk_text_range, atk_text_range_copy,",
" | ^~~~~~~~~~~~~~~~~~~",
"CMake Warning:",
" Manually-specified variables were not used by the project:",
"",
" CMAKE_EXPORT_NO_PACKAGE_REGISTRY",
" CMAKE_INSTALL_BINDIR",
" CMAKE_INSTALL_DATAROOTDIR",
" CMAKE_INSTALL_INCLUDEDIR",
" CMAKE_INSTALL_LIBDIR",
" CMAKE_INSTALL_LIBEXECDIR",
" CMAKE_INSTALL_OLDINCLUDEDIR",
" MAKE_INSTALL_SBINDIR",
"",
"",
"source_subfolder/src/tramp.c:215:52: warning: format \u2018%ld\u2019 expects argument of type "
"\u2018long int *\u2019, but argument 8 has type \u2018long unsigned int *\u2019 [-Wformat=]",
' 215 | nfields = sscanf (line, "%lx-%lx %9s %lx %9s %ld %s",',
" | ~~^",
" | |",
" | long int *",
" | %ld",
" 216 | &start, &end, perm, &offset, dev, &inode, file);",
" | ~~~~~~",
" | |",
" | long unsigned int *",
"In file included from ../../src/include/postgres.h:47,",
" from rmtree.c:15:",
"rmtree.c: In function \u2018rmtree\u2019:",
"In file included from ../../src/include/c.h:54,",
" from ../../src/include/postgres.h:46,",
" from stringinfo.c:20:",
"../../src/include/pg_config.h:772:24: warning: ISO C does not support \u2018__int128\u2019 "
"types [-Wpedantic]",
"C:\\src\\bzlib.c(161) : note: index 'blockSize100k' range checked by comparison on this line",
"ebcdic.c:284: warning: ISO C forbids an empty translation unit [-Wpedantic]",
" 284 | #endif",
" | ",
"WARNING: this is important",
"warning: Boost.Build engine (b2) is 4.8.0",
"./src/graph.cc: In member function \u2018void Edge::Dump(const char*) const\u2019:",
"./src/graph.cc:409:16: warning: format \u2018%p\u2019 expects argument of type "
"\u2018void*\u2019, but argument 2 has type \u2018const Edge*\u2019 [-Wformat=]",
' 409 | printf("] 0x%p\\n", this);',
" | ~^",
" | |",
" | void*",
"ninja/1.9.0 (test package): WARN: This conanfile has no build step",
"src/port/pg_crc32c_sse42_choose.c(41,10): warning : passing 'unsigned int [4]' to "
"parameter of type 'int *' converts between pointers to integer types with different sign "
"[-Wpointer-sign] [C:\\conan\\source_subfolder\\libpgport.vcxproj]",
"NMAKE : fatal error U1077: 'C:\\Users\\marcel\\applications\\LLVM\\bin\\clang-cl.EXE' : "
"return code '0x1'",
"clang-cl: warning: /: 'linker' input unused [-Wunused-command-line-argument]",
"In file included from crypto\\asn1\\a_sign.c:22:",
"In file included from include\\crypto/evp.h:11:",
"In file included from include\\internal/refcount.h:21:",
"In file included from C:\\Users\\LLVM\\lib\\clang\\13.0.1\\include\\stdatomic.h:17:",
"C:\\Program Files (x86)\\Microsoft Visual Studio\\include\\stdatomic.h(15,2): "
"error: <stdatomic.h> is not yet supported when compiling as C",
"#error <stdatomic.h> is not yet supported when compiling as C",
" ^",
"C:\\conan\\source_subfolder\\Crypto\\src\\OpenSSLInitializer.cpp(35,10): "
"warning: OpenSSL 1.1.1l 24 Aug 2021 [-W#pragma-messages]",
" #pragma message (OPENSSL_VERSION_TEXT POCO_INTERNAL_OPENSSL_BUILD)",
" ^",
"source_subfolder/meson.build:1559:2: ERROR: Problem encountered: "
"Could not determine size of size_t.",
"",
]
dataset = [
pytest.param(
[
{
"context": "src/main/src/Em_FilteringQmFu.c: In function "
"‘Em_FilteringQmFu_processSensorSignals’:\n",
"file": "src/main/src/Em_FilteringQmFu.c",
"line": "266",
"column": "5",
"severity": "warning",
"info": "implicit declaration of function ‘memset’",
"category": "-Wimplicit-function-declaration",
"project": None,
"hint": ""
" memset(&reicevedSignals, 0, sizeof(reicevedSignals));\n"
" ^~~~~~",
},
{
"context": "",
"file": "C:\\source_subfolder\\source\\common\\x86\\seaintegral.asm",
"line": "92",
"column": None,
"severity": "warning",
"info": ""
"improperly calling multi-line macro `SETUP_STACK_POINTER' with 0 parameters",
"category": "-w+macro-params-legacy",
"project": None,
"hint": None,
},
{
"context": "In file included from C:\\conan\\data\\source_subfolder\\zutil.c:10:\n",
"file": "C:\\conan\\data\\source_subfolder/gzguts.h",
"line": "146",
"column": "52",
"severity": "warning",
"info": "extension used",
"category": "-Wlanguage-extension-token",
"project": None,
"hint": ""
"ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int));\n"
" ^",
},
{
"context": "/build/source_subfolder/bzip2.c: In function "
"‘applySavedFileAttrToOutputFile’:\n",
"file": "/build/source_subfolder/bzip2.c",
"line": "1073",
"column": "11",
"severity": "warning",
"info": ""
"ignoring return value of ‘fchown’, declared with attribute warn_unused_result",
"category": "-Wunused-result",
"project": None,
"hint": ""
" 1073 | (void) fchown ( fd, fileMetaInfo.st_uid, fileMetaInfo.st_gid );\n"
" | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~",
},
{
"context": "",
"file": "/source_subfolder/src/constexp.y",
"line": "35",
"column": "1-25",
"severity": "warning",
"info": 'deprecated directive: ‘%name-prefix "constexpYY"’, use ‘%define '
"api.prefix {constexpYY}’",
"category": "-Wdeprecated",
"project": None,
"hint": ""
' 35 | %name-prefix "constexpYY"\n'
" | ^~~~~~~~~~~~~~~~~~~~~~~~~ | %define api.prefix "
"{constexpYY}",
},
{
"context": "",
"file": "/source_subfolder/src/constexp.y",
"line": None,
"column": None,
"severity": "warning",
"info": "fix-its can be applied. Rerun with option '--update'.",
"category": "-Wother",
"project": None,
"hint": None,
},
{
"context": ""
"In file included from ../../src/include/c.h:54,\n"
" from ../../src/include/postgres_fe.h:25,\n"
" from archive.c:19:\n",
"file": "../../src/include/pg_config.h",
"line": "772",
"column": "24",
"severity": "warning",
"info": "ISO C does not support ‘__int128’ types",
"category": "-Wpedantic",
"project": None,
"hint": ""
" 772 | #define PG_INT128_TYPE __int128\n"
" | ^~~~~~~~",
},
{
"context": ""
"In file included from /package/include/glib-2.0/gobject/gobject.h:24,\n"
" from /package/include/glib-2.0/gobject/gbinding.h:29,\n"
" from /package/include/glib-2.0/glib-object.h:22,\n"
" from ../source_subfolder/atk/atkobject.h:27,\n"
" from ../source_subfolder/atk/atk.h:25,\n"
" from ../source_subfolder/atk/atktext.c:22:\n"
"../source_subfolder/atk/atktext.c: In function "
"\u2018atk_text_range_get_type_once\u2019:\n",
"file": "../source_subfolder/atk/atktext.c",
"line": "1640",
"column": "52",
"severity": "warning",
"info": "ISO C prohibits argument conversion to union type",
"category": "-Wpedantic",
"project": None,
"hint": ""
" 1640 | G_DEFINE_BOXED_TYPE (AtkTextRange, atk_text_range, atk_text_range_copy,\n"
" | ^~~~~~~~~~~~~~~~~~~",
},
{
"context": "",
"file": "source_subfolder/src/tramp.c",
"line": "215",
"column": "52",
"severity": "warning",
"info": "format ‘%ld’ expects argument of type ‘long int *’, but argument 8 "
"has type ‘long unsigned int *’",
"category": "-Wformat=",
"project": None,
"hint": ""
' 215 | nfields = sscanf (line, "%lx-%lx %9s %lx %9s %ld %s",\n'
" | ~~^\n"
" | |\n"
" | long int *\n"
" | %ld\n"
" 216 | &start, &end, perm, &offset, dev, &inode, file);\n"
" | ~~~~~~\n"
" | |\n"
" | long unsigned int *",
},
{
"context": ""
"In file included from ../../src/include/postgres.h:47,\n"
" from rmtree.c:15:\n"
"rmtree.c: In function ‘rmtree’:\n"
"In file included from ../../src/include/c.h:54,\n"
" from ../../src/include/postgres.h:46,\n"
" from stringinfo.c:20:\n",
"file": "../../src/include/pg_config.h",
"line": "772",
"column": "24",
"severity": "warning",
"info": "ISO C does not support ‘__int128’ types",
"category": "-Wpedantic",
"project": None,
"hint": None,
},
{
"context": "",
"file": "C:\\src\\bzlib.c",
"line": "161",
"column": None,
"severity": "note",
"category": None,
"project": None,
"info": "index 'blockSize100k' range checked by comparison on this line",
"hint": None,
},
{
"context": "",
"file": "ebcdic.c",
"line": "284",
"column": None,
"severity": "warning",
"info": "ISO C forbids an empty translation unit",
"category": "-Wpedantic",
"project": None,
"hint": " 284 | #endif\n | ",
},
{
"context": "./src/graph.cc: In member function "
"\u2018void Edge::Dump(const char*) const\u2019:\n",
"file": "./src/graph.cc",
"line": "409",
"column": "16",
"severity": "warning",
"info": ""
"format ‘%p’ expects argument of type ‘void*’, "
"but argument 2 has type ‘const Edge*’",
"category": "-Wformat=",
"project": None,
"hint": ""
' 409 | printf("] 0x%p\\n", this);\n'
" | ~^\n"
" | |\n"
" | void*",
},
{
"context": "",
"file": "src/port/pg_crc32c_sse42_choose.c",
"line": "41",
"column": "10",
"severity": "warning",
"info": "passing 'unsigned int [4]' to parameter of type 'int *' converts "
"between pointers to integer types with different sign",
"category": "-Wpointer-sign",
"project": "C:\\conan\\source_subfolder\\libpgport.vcxproj",
"hint": None,
},
{
"context": "",
"file": "clang-cl",
"severity": "warning",
"info": "/: 'linker' input unused",
"category": "-Wunused-command-line-argument",
"line": None,
"column": None,
"project": None,
"hint": None,
},
{
"context": "In file included from crypto\\asn1\\a_sign.c:22:\n"
"In file included from include\\crypto/evp.h:11:\n"
"In file included from include\\internal/refcount.h:21:\n"
"In file included from "
"C:\\Users\\LLVM\\lib\\clang\\13.0.1\\include\\stdatomic.h:17:\n",
"file": "C:\\Program Files (x86)\\Microsoft Visual Studio\\include\\stdatomic.h",
"line": "15",
"column": "2",
"severity": "error",
"category": None,
"info": "<stdatomic.h> is not yet supported when compiling as C",
"hint": "#error <stdatomic.h> is not yet supported when compiling as C\n"
" ^",
"project": None,
},
{
"context": "",
"file": "C:\\conan\\source_subfolder\\Crypto\\src\\OpenSSLInitializer.cpp",
"line": "35",
"column": "10",
"severity": "warning",
"info": "OpenSSL 1.1.1l 24 Aug 2021",
"category": "-W#pragma-messages",
"project": None,
"hint": " #pragma message (OPENSSL_VERSION_TEXT "
"POCO_INTERNAL_OPENSSL_BUILD)\n"
" ^",
},
{
"context": "",
"file": "source_subfolder/meson.build",
"line": "1559",
"column": "2",
"severity": "ERROR",
"category": None,
"info": "Problem encountered: Could not determine size of size_t.",
"project": None,
"hint": None,
},
],
id="gnu",
),
pytest.param(
[
{
"file": "/source_subfolder/common/socket_utils.cc",
"line": "43",
"column": None,
"severity": "warning",
"info": "'reinterpret_cast': conversion from 'int' to 'HANDLE' of greater size",
"category": "C4312",
"project": None,
"hint": None,
},
{
"file": "C:\\source_subfolder\\bzlib.c",
"line": "1418",
"column": "10",
"category": "C4996",
"severity": "warning",
"info": "'strcat': This function or variable may be unsafe. Consider using "
"strcat_s instead. To disable deprecation, use "
"_CRT_SECURE_NO_WARNINGS. See online help for details.",
"project": None,
"hint": ' strcat(mode2,"b"); /* binary mode */\n ^',
},
{
"file": "NMAKE",
"line": None,
"column": None,
"severity": "fatal error",
"category": "U1077",
"info": "'C:\\Users\\marcel\\applications\\LLVM\\bin\\clang-cl.EXE' : return "
"code '0x1'",
"project": None,
"hint": None,
},
],
id="msvc",
),
pytest.param(
[
{
"context": None,
"severity": "Warning",
"file": None,
"line": None,
"function": None,
"info": ""
" Manually-specified variables were not used by the project:\n"
"\n"
" CMAKE_EXPORT_NO_PACKAGE_REGISTRY",
},
{
"file": "cmake/ldap.cmake",
"line": "158",
"severity": "Warning",
"function": "MESSAGE",
"info": " Could not find LDAP",
"context": ""
"Call Stack (most recent call first):\n"
" CMakeListsOriginal.txt:1351 (MYSQL_CHECK_LDAP)\n"
" CMakeLists.txt:7 (include)",
},
{
"file": "libmysql/authentication_ldap/CMakeLists.txt",
"line": "30",
"severity": "Warning",
"function": "MESSAGE",
"context": None,
"info": " Skipping the LDAP client authentication plugin",
},
{
"context": None,
"severity": "Warning",
"file": None,
"line": None,
"function": None,
"info": " Manually-specified variables were not used by the project:\n"
"\n"
" CMAKE_EXPORT_NO_PACKAGE_REGISTRY\n"
" CMAKE_INSTALL_BINDIR\n"
" CMAKE_INSTALL_DATAROOTDIR\n"
" CMAKE_INSTALL_INCLUDEDIR\n"
" CMAKE_INSTALL_LIBDIR\n"
" CMAKE_INSTALL_LIBEXECDIR\n"
" CMAKE_INSTALL_OLDINCLUDEDIR\n"
" MAKE_INSTALL_SBINDIR",
},
],
id="cmake",
),
pytest.param(
[
{
"from": "Makefile.config",
"info": "No sys/sdt.h found, no SDT events are defined, please install "
"systemtap-sdt-devel or systemtap-sdt-dev",
"line": "565",
"severity": None,
},
{
"from": "configure",
"line": None,
"severity": "WARNING",
"info": ""
"\n*** Without Bison you will not be able to build PostgreSQL from Git nor"
"\n*** change any of the parser definition files. You can obtain Bison from"
"\n*** a GNU mirror site. (If you are using the official distribution of"
"\n*** PostgreSQL then you do not need to worry about this, because the Bison"
"\n*** output is pre-generated.)",
},
],
id="autotools",
),
pytest.param(
[
{
"ref": "libjpeg/1.2.3",
"name": "libjpeg",
"version": "1.2.3",
"user": None,
"channel": None,
"info": "package is corrupted",
"severity": "WARN",
"severity_l": None,
},
{
"ref": "libmysqlclient/8.0.25",
"name": "libmysqlclient",
"version": "8.0.25",
"user": None,
"channel": None,
"severity_l": "WARN",
"severity": None,
"info": "requirement openssl/1.1.1m overridden by poco/1.11.1 to openssl/1.1.1l",
},
{
"channel": None,
"info": "this is important",
"name": None,
"ref": None,
"severity": None,
"severity_l": "WARNING",
"user": None,
"version": None,
},
{
"severity_l": None,
"ref": "ninja/1.9.0",
"name": "ninja",
"version": "1.9.0",
"channel": None,
"user": None,
"info": "This conanfile has no build step",
"severity": "WARN",
},
],
id="conan",
),
pytest.param(
[
{
"severity": "warning",
"info": "Boost.Build engine (b2) is 4.8.0",
},
],
id="build",
),
]
@pytest.mark.parametrize("expected", dataset)
def test_warnings_regex(expected, request):
compiler = request.node.callspec.id
matches = list(
match.groupdict()
for match in re.finditer(Regex.get(compiler), "\n".join(output))
)
assert matches == expected
| 1.890625 | 2 |
src/cybersource/signature.py | thelabnyc/django-oscar-cybersource | 3 | 12797411 | from django.core.exceptions import SuspiciousOperation
import hashlib
import hmac
import base64
class SecureAcceptanceSigner(object):
def __init__(self, secret_key):
self.secret_key = secret_key
def sign(self, data, signed_fields):
key = self.secret_key.encode("utf-8")
msg_raw = self._build_message(data, signed_fields).encode("utf-8")
msg_hmac = hmac.new(key, msg_raw, hashlib.sha256)
return base64.b64encode(msg_hmac.digest())
def verify_request(self, request):
# Ensure the signature is valid and that this request can be trusted
signed_field_names = request.POST.get("signed_field_names")
if not signed_field_names:
raise SuspiciousOperation("Request has no fields to verify")
signed_field_names = signed_field_names.split(",")
signature_given = request.POST["signature"].encode("utf-8")
signature_calc = self.sign(request.POST, signed_field_names)
return signature_given == signature_calc
def _build_message(self, data, signed_fields):
parts = []
for field in signed_fields:
parts.append("%s=%s" % (field, data.get(field, "")))
return ",".join(parts)
| 2.34375 | 2 |
Chapter 1- Arrays and Strings/problem2/method1.py | lorderikir/Cracking-the-Coding-Interview | 6 | 12797412 | <reponame>lorderikir/Cracking-the-Coding-Interview
def permutation(stringA, stringB):
listA = list(stringA)
listB = list(stringB)
listA.sort()
listB.sort()
if(listA != listB):
return False
else:
return True
| 3.546875 | 4 |
web/ancv_html_scraper.py | silvos590/ancv_restaurant_scraper | 1 | 12797413 | from os import makedirs, path
from lxml import html
import requests
import urllib
import getopt, sys, os
import re
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
#old_address = lambda n,city: f'https://guide.ancv.com/recherche/liste/cv?page={n}&rows=30&f%5B0%5D=im_field_ptl_activite_reference%3A6339&f%5B1%5D=im_field_ptl_activite_reference%3A6344&localisation={city}'
address = lambda city: f'https://leguide.ancv.com/ptl/recherche/list?location={city}&filters%5Bdomaine_activite_principale%5D%5BRestauration%5D=Restauration'
# Write the list of sorted items in file
def store(set_items, output):
if output == None:
print('output name is mandatory')
exit(1)
else:
with open(output,"w") as file:
for t in set_items:
str = ''.join(t)
file.writelines(str + '\n')
def getTotalNumberOfRestaurants(browser, city):
# Get the total number of restaurants
page = requests.get(address(city))
browser.get(address(city))
if page.status_code != 200:
print(f'cannot connect to ancv website')
sys.exit(1)
tree = html.fromstring(page.content)
total_resto_number = tree.xpath('//*[@id="spanNbResult"]/text()')
if total_resto_number == None or len(total_resto_number) == 0:
return 0
else:
print(f'Total number of restaurants: {total_resto_number[0]}')
return int(total_resto_number[0])
def restoLookup(city):
print('Start...')
total_resto = 0
resto_set = set()
# Set option to do not open the browser
options = Options()
options.add_argument('--headless')
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
browser = webdriver.Chrome(options=options)
# total restaurants
total_resto = getTotalNumberOfRestaurants(browser, city)
if total_resto == 0:
print(f'no restaurant found')
return
# collect all the restaurants name
restaurants = []
# With the latest version of the site, the list of restaurants is loaded dinamically
# when the user scrolls the page, this made their website much more usable.
# The infinite scroll can be normally stop when the scrolled more than remain scrollHeight
# for some reason in this website thescrollHeight attribute is not updated after each scroll.
# The workaround was to stop the loop we found all the restaurants.
# I will add a safety timer to avoid infinite loop.
time.sleep(2) # Allow 2 seconds for the web page to open
scroll_pause_time = 4 # set pause time between scrolls
screen_height = browser.execute_script("return window.screen.height;") # get the screen height of the web
i = 1
while True:
# scroll one screen height each time
browser.execute_script("window.scrollTo(0, {screen_height}*{i}*10);".format(screen_height=screen_height, i=i))
i += 1
time.sleep(scroll_pause_time)
# update scroll height each time after scrolled, as the scroll height can change after we scrolled the page
#scroll_height = browser.execute_script("return document.body.scrollHeight;")
restaurants = browser.find_elements_by_xpath('//*[@id="ptl-list-content"]/div/div/div[2]/p[2]')
print(f'resto found till now: {len(restaurants)}')
# Break the loop when the height we need to scroll to is larger than the total scroll height
#if (screen_height) * i > scroll_height:
# Warning: stopping when we found all the restaturants
if len(restaurants) >= total_resto:
break
if len(restaurants) == 0:
print(f'no restaurant found')
return
else:
print(f'restaurants {len(restaurants)} found')
# Add restaurants to the set
for r in restaurants:
print(f'Restaurant name: {r.text}')
t = r.text.replace("\'", "")
resto_set.add(t)
print('Removing duplicates and sorting the results...')
sorted_set = sorted(resto_set)
print('Done')
print(f'Restaurants found: {len(sorted_set)}')
return sorted_set
def usage():
print('Usage: ./ancv_html_scraper.py -c <city> -o <output-file>')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:v:c:s", ["help", "output=", "city=", "silent-mode"])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
output = None
city = None
verbose = False
silent_mode = False
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output = a
elif o in ("-c", "--city"):
city = a
elif o in ("-s", "--silent-mode"):
silent_mode = True
else:
assert False, "unhandled option"
if silent_mode == True:
f = open(os.devnull, 'w')
sys.stdout = f
if city == None :
print('city is a mandatory parameter')
exit(1)
if output == None :
output = 'restaurants_cv.txt'
restaurants = restoLookup(city)
store(restaurants, output)
if __name__ == "__main__":
main()
| 2.859375 | 3 |
Backup.py | paringandhi10/AND_lab | 0 | 12797414 | <reponame>paringandhi10/AND_lab
import getpass
import telnetlib
print "******You are now Backing up CISCO and ARISTA router******"
user = raw_input ("Enter your username of CISCO and ARISTA:")
password = getpass.getpass()
HOST =("192.168.13.144","192.168.13.145","192.168.13.146")
for i in HOST:
| 2.34375 | 2 |
index.py | crystal-ctrl/engineering_project | 5 | 12797415 | import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import plotly.express as px
import pandas as pd
import numpy as np
from app import app
from app import server
from apps import state, county
cases = pd.read_csv('data/cases.csv')
cases['report_date']=pd.to_datetime(cases.report_date)
top_layout = html.Div([
#links to other pages
html.Div([
html.Nav(className = "nav nav-pills", children=[
html.A('State', className="nav-item nav-link btn", href='/apps/state',
style={"font-size": "2rem",
"box-shadow": "4px 4px 2px #e3e3e3",
"padding": "5px",
'marginTop':'-15px'}),
html.H5(""),
html.A('County', className="nav-item nav-link active btn", href='/apps/county',
style={"font-size": "2rem",
"box-shadow": "4px 4px 2px #e3e3e3",
"padding": "5px",
'marginTop':'-15px'})
],style = {'marginTop':'-15px'}),
], className='one-third column', id = 'links', style={'textAlign':'center'}),
#title
html.Div([
html.Div([
html.H2('VA x COVID',
style={'marginBottom': '10','marginTop':'-15px'}),
html.H3('Virginia COVID-19 Dashboard',
style={'marginTop':'-15px'})
], style={'textAlign':'center'})
], className='one-third column', id='title'),
# last update date
html.Div([
html.H6('Last Updated: ',
style={'marginTop':'-15px'}),
html.H6(str(cases['report_date'].iloc[-1].strftime('%B %d, %Y')) + ' 13:00 (EST)')
], className='one-third column', id = 'title1', style={'textAlign':'center'})
], id='header',className='row flex-display', style={'margin-bottom': '10px','marginTop':'-15px'})
app.layout = html.Div([
dcc.Location(id='url',refresh=False),
top_layout,
html.Div(id='page-content',children=[])
], id='mainContainer', style={'display': 'flex','flex-direction':'column'})
@app.callback(Output('page-content', 'children'),
[Input('url','pathname')])
def display_page(pathname):
if pathname == '/apps/state':
return state.layout
if pathname == '/apps/county':
return county.layout
else:
return state.layout
if __name__ == '__main__':
app.run_server(debug=True)
| 2.40625 | 2 |
ThreeJson.py | The-Fonz/freecad-parametric-generator | 11 | 12797416 | #
# Adapted from https://github.com/dcowden/cadquery/blob/master/cadquery/freecad_impl/exporters.py
# Objects that represent
# three.js JSON object notation
# https://github.com/mrdoob/three.js/wiki/JSON-Model-format-3
#
JSON_TEMPLATE= """\
{
"metadata" :
{
"formatVersion" : 3,
"generatedBy" : "ParametricParts",
"vertices" : %(nVertices)d,
"faces" : %(nFaces)d,
"normals" : 0,
"colors" : 0,
"uvs" : 0,
"materials" : 1,
"morphTargets" : 0
},
"scale" : 1.0,
"materials": [ {
"DbgColor" : 15658734,
"DbgIndex" : 0,
"DbgName" : "Material",
"colorAmbient" : [0.0, 0.0, 0.0],
"colorDiffuse" : [0.6400000190734865, 0.10179081114814892, 0.126246120426746],
"colorSpecular" : [0.5, 0.5, 0.5],
"shading" : "Lambert",
"specularCoef" : 50,
"transparency" : 1.0,
"vertexColors" : false
}],
"vertices": %(vertices)s,
"morphTargets": [],
"normals": [],
"colors": [],
"uvs": [[]],
"faces": %(faces)s
}
"""
def tessToJson( vert, face, nvert, nface):
'''Specify compatible lists of vertices and faces,
and get a three.js JSON object back. Note: list of face indices
must be compatible, i.e. lead with 0 for each row of 3 indices
to create a triangle. Spec:
https://github.com/mrdoob/three.js/wiki/JSON-Model-format-3'''
return JSON_TEMPLATE % {
'vertices' : str(vert),
'faces' : str(face),
'nVertices': nvert,
'nFaces' : nface
}; | 2.109375 | 2 |
setup.py | dimastark/pyndler | 0 | 12797417 | #!/usr/bin/env python
"""
Bundle Python packages into a single script
A utility needed to build multiple files into a single script.
It can be useful for distribution or for easy writing of code
and bundle it (for example for www.codingame.com)
"""
import re
from distutils.core import setup
description, long_description = re.split('\n{2}', __doc__)
setup(
name='pyndler',
version='1.0.0',
description=description,
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/dimastark/pyndler',
tests_require=['pytest'],
scripts=["scripts/pyndler"],
packages=['pyndler'],
)
| 1.875 | 2 |
admin.py | dev-easyshares/company | 0 | 12797418 | <reponame>dev-easyshares/company
from django.contrib import admin
from django.conf import settings
from django.views.decorators.cache import never_cache
from django.contrib.admin.options import TO_FIELD_VAR
from django.contrib.admin.utils import unquote
from django.urls import reverse, resolve
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from mighty.admin.models import BaseAdmin
from mighty.applications.address import fields as address_fields
from mighty.applications.address.admin import AddressAdminInline
from company import models, fields, translates as _
from company.apps import CompanyConfig as conf
class CompanyAdmin(BaseAdmin):
fieldsets = (
(None, {"classes": ("wide",), "fields": ('denomination', 'is_type', 'since', 'site', 'effective', 'secretary', 'resume', 'share_kind')}),
('comex & purpose', {"classes": ("wide",), "fields": ('purpose', 'instance_comex', 'matrix_skills')}),
('market', {"classes": ("wide",), "fields": (
'capital_socnomtotal',
'capital_division',
'current',
'share_capital',
'turnover',
'floating',
'icb',
'market',
'dowjones',
'nasdaq',
'gaia'
)}),
('rules', {"classes": ("wide",), "fields": (
'duration_mandate',
'settle_internal',
'age_limit_pdg',
'age_limit_dg',
'stock_min_rule',
'stock_min_status'
)}),
('sieges', {"classes": ("wide",), "fields": (
'siege_fr',
)}))
list_display = ('denomination', 'since', 'siege_fr')
search_fields = ("denomination", "company_fr__siret")
change_list_template = "admin/company_change_list.html"
change_form_template = "admin/company_change_form.html"
change_form_logs_template = "admin/company_change_form_logs.html"
readonly_fields = ('siege_fr',)
search_template = None
def __init__(self, model, admin_site):
super().__init__(model, admin_site)
if conf.named_id:
self.readonly_fields += ('named_id',)
self.add_field('Informations', ('named_id',))
def render_change_form(self, request, context, add=False, change=False, form_url="", obj=None):
response = super().render_change_form(request, context, add, change, form_url, obj)
if hasattr(self.model, "changelog_model"):
response.template_name = self.change_form_logs_template
return response
def country_choice_view(self, request, object_id=None, extra_context=None):
current_url = resolve(request.path_info).url_name
opts = self.model._meta
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
obj = self.get_object(request, unquote(object_id), to_field) if object_id else None
context = {
**self.admin_site.each_context(request),
"current_url": current_url,
"title": "%s (%s)" % (_.countries, obj) if obj else _.countries,
"object_name": str(opts.verbose_name),
"object": obj,
"opts": opts,
"app_label": opts.app_label,
"media": self.media
}
request.current_app = self.admin_site.name
defaults = {
"extra_context": context,
"template_name": self.search_template or "admin/company_country_choice.html",
}
from company.views import ChoiceCountry
return ChoiceCountry.as_view(**defaults)(request)
@never_cache
def country_search_view(self, request, country, object_id=None, extra_context=None):
current_url = resolve(request.path_info).url_name
opts = self.model._meta
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
parent_object = self.get_object(request, unquote(object_id), to_field) if object_id else None
context = {
**self.admin_site.each_context(request),
"parent_object": parent_object,
"app_path": request.get_full_path(),
"username": request.user.get_username(),
"current_url": current_url,
"country": country,
"title": _.search,
"opts": opts,
"app_label": opts.app_label,
"media": self.media
}
defaults = {
"extra_context": context,
"country": country,
"parent_object": parent_object if parent_object else None,
"success_url": current_url,
"template_name": self.search_template or "admin/company_country_search.html",
}
from company.views import SearchByCountry
return SearchByCountry.as_view(**defaults)(request=request)
@never_cache
def country_add_view(self, request, country, object_id=None, extra_context=None):
current_url = resolve(request.path_info).url_name
opts = self.model._meta
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
parent_object = self.get_object(request, unquote(object_id), to_field) if object_id else None
context = {
**self.admin_site.each_context(request),
"parent_object": parent_object,
"app_path": request.get_full_path(),
"username": request.user.get_username(),
"current_url": current_url,
"country": country,
"title": _.search,
"opts": opts,
"app_label": opts.app_label,
"media": self.media
}
defaults = {
"extra_context": context,
"admin": True,
"country": country,
"parent_object": parent_object if parent_object else None,
"template_name": self.search_template or "admin/company_country_add.html",
}
from company.views import AddByCountry
return AddByCountry.as_view(**defaults)(request)
def get_urls(self):
from django.urls import path, include
urls = super().get_urls()
info = self.model._meta.app_label, self.model._meta.model_name
my_urls = [
path("choices/", include([
path("", self.country_choice_view, name="%s_%s_country_choice" % info),
path("<str:country>/search/", include([
path("", self.country_search_view, name="%s_%s_country_search" % info),
path("add/", self.country_add_view, name="%s_%s_country_add" % info),
]))
])),
path("<path:object_id>/choices/", include([
path("", self.country_choice_view, name="%s_%s_country_choice_extend" % info),
path("<str:country>/search/", include([
path("", self.country_search_view, name="%s_%s_country_search_extend" % info),
path("add/", self.country_add_view, name="%s_%s_country_add_extend" % info),
]))
]))
]
return my_urls + urls
#####################
# FR
#####################
class CompanyFRAdminInline(admin.StackedInline):
fields = fields.country + fields.fr
extra = 0
max_num = 0
class CompanyAddressFRAdminInline(AddressAdminInline):
fields = address_fields + ('is_siege', 'is_active')
class BaloAdmin(BaseAdmin):
fieldsets = ((None, {"classes": ("wide",), "fields": fields.balo}),)
#####################
# US
##################### | 1.734375 | 2 |
tmeval/corpora/generate/uci.py | arlenk/topic-model-evaluation | 0 | 12797419 | <filename>tmeval/corpora/generate/uci.py<gh_stars>0
"""
Datasets from UCI
https://archive.ics.uci.edu/ml/datasets.html
"""
import urllib
import os
from itertools import islice
import copy
from gensim.models import VocabTransform
from gensim.corpora.dictionary import Dictionary
from gensim.corpora import MmCorpus, UciCorpus
from gensim.interfaces import CorpusABC
def generate_mmcorpus_files(corpus_name: str,
target_path: str,
training_pct: float = .8):
"""
Output training and validation MM corpus files
:param corpus_name:
:param target_path:
:param training_pct:
:return:
"""
corpus = download_corpus(corpus_name, target_path)
print("downloaded {} corpus [num_docs={}, num_terms={}]".format(corpus_name,
corpus.num_docs,
corpus.num_terms))
print("dropping top/bottom words in dictionary")
corpus, dictionary = filter_corpus(corpus)
# output mm files
num_documents = len(corpus)
num_documents_training = int(training_pct * num_documents)
num_documents_validation = num_documents - num_documents_training
num_vocab = len(dictionary)
print("outputting")
print(" num documents: {:,.0f}".format(num_documents))
print(" num training: {:,.0f}".format(num_documents_training))
print(" num validation: {:,.0f}".format(num_documents_validation))
print(" vocab size: {:,.0f}".format(num_vocab))
# output same dictionary for training and validation
output_prefix = os.path.join(target_path, "{}.filtered".format(corpus_name))
dictionary_file = output_prefix + ".dictionary"
dictionary.save(dictionary_file)
# training data
output_file = output_prefix + ".training.mm"
training_corpus = islice(corpus, num_documents_training)
MmCorpus.serialize(output_file, training_corpus, dictionary)
# validation
output_file = output_prefix + ".validation.mm"
validation_corpus = islice(corpus, num_documents_training, num_documents)
MmCorpus.serialize(output_file, validation_corpus, dictionary)
def download_corpus(corpus_name: str,
target_path: str) -> UciCorpus:
"""
Download corpus from UCI website
:param corpus_name:
:param target_path:
:return:
"""
url_root = "https://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/"
target_path = os.path.join(target_path, "raw", "uci")
if not os.path.exists(target_path):
print("creating target path: {}".format(target_path))
os.makedirs(target_path)
vocab_file = os.path.join(target_path, "vocab.{}.txt".format(corpus_name))
print("downloading {} vocab file to: {}".format(corpus_name, vocab_file))
urllib.request.urlretrieve(url_root + "vocab.{}.txt".format(corpus_name),
filename=vocab_file)
docword_file = os.path.join(target_path, "docword.{}.txt.gz".format(corpus_name))
print("downloading {} bag of words to: {}".format(corpus_name, docword_file))
urllib.request.urlretrieve(url_root + "docword.{}.txt.gz".format(corpus_name),
filename=docword_file)
corpus = UciCorpus(docword_file, vocab_file)
return corpus
def download_dictionary(corpus_name: str,
target_path: str) -> Dictionary:
"""
Download dictionary only for a corpus from UCI website
:param corpus_name: name of UCI corpus
:param target_path: output directory for dictionary file
:return: gensim Dictionary
"""
url_root = "https://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/"
target_path = os.path.join(target_path, "uci", "raw")
if not os.path.exists(target_path):
print("creating target path: {}".format(target_path))
os.makedirs(target_path)
vocab_file = os.path.join(target_path, "vocab.{}.txt".format(corpus_name))
print("downloading {} vocab file to: {}".format(corpus_name, vocab_file))
urllib.request.urlretrieve(url_root + "vocab.{}.txt".format(corpus_name),
filename=vocab_file)
dictionary = Dictionary()
with open(vocab_file) as f:
for line in f:
dictionary.add_documents([[line.strip()]])
dictionary.compactify()
return dictionary
def filter_corpus(corpus: UciCorpus) -> (CorpusABC, Dictionary):
"""
Filter extreme (frequent and infrequent) words from dictionary
:param corpus:
:return: (filtered corpus, filtered dictionary)
"""
# filter dictionary first
original_dict = corpus.create_dictionary()
filtered_dict = copy.deepcopy(original_dict)
filtered_dict.filter_extremes(no_below=20, no_above=.1)
# now transform the corpus
old2new = {original_dict.token2id[token]: new_id for new_id, token in filtered_dict.iteritems()}
vt = VocabTransform(old2new)
return vt[corpus], filtered_dict
| 2.953125 | 3 |
fusion_tests/fsnTicketsByAddress.py | ReDeFinance/web3fsnpy | 4 | 12797420 | #!/usr/bin/env python3
from datetime import datetime
#web3fusion
from web3fsnpy import Fsn
linkToChain = {
'network' : 'mainnet', # One of 'testnet', or 'mainnet'
'provider' : 'WebSocket', # One of 'WebSocket', 'HTTP', or 'IPC'
'gateway' : 'wss://mainnetpublicgateway1.fusionnetwork.io:10001',
#'gateway' : 'wss://testnetpublicgateway1.fusionnetwork.io:10001',
}
web3fsn = Fsn(linkToChain)
pub_key = "0x3333333333333333333333333333333333333333"
Tckts = web3fsn.ticketsByAddress(pub_key)
#print(Tckts)
print('Total number of tickets: ',len(Tckts))
print('\nor using totalNumberOfTicketsByAddress: ',web3fsn.totalNumberOfTicketsByAddress(pub_key),'\n')
for a in Tckts:
tck = Tckts[a]
st = datetime.fromtimestamp(tck.StartTime).strftime('%c')
ex = datetime.fromtimestamp(tck.ExpireTime).strftime('%c')
print('Block Height: ',tck.Height,' Start Time: ',st,' Expiry Time: ',ex)
| 2.28125 | 2 |
link_provider/py/provide_raw_dataset_links.py | WiSig-dataset/wisig-process-raw | 0 | 12797421 | <gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[2]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import matplotlib.pyplot as plt
import numpy as np
import pickle
import os
import scipy.optimize
# In[3]:
import os
GPU = ""
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=GPU
# In[9]:
with open('raw_info_dct.pkl','rb') as f:
dct = pickle.load(f)
# In[7]:
print('https://drive.google.com/u/0/uc?export=download&id={}'.format('1bQbswsPLZy5lSe5Lu_IpRoaV4wtlcLBH'))
# In[17]:
def provide_links(day_list=['2021_03_01','2021_03_08','2021_03_15','2021_03_23'],rx_list=None,tx_groups=range(10)):
total_size = 0
with open('raw_info_dct.pkl','rb') as f:
dct = pickle.load(f)
for day in day_list:
print('Day {} links'.format(day))
print('----------------------')
if rx_list is None:
rx_list=dct[day].keys()
for rx in rx_list:
print('Rx {} links'.format(rx))
if rx in dct[day].keys():
for tx_g in tx_groups:
if tx_g in dct[day][rx].keys():
lnk = dct[day][rx][tx_g][1]
total_size= total_size + dct[day][rx][tx_g][2]
print('https://drive.google.com/u/0/uc?export=download&id={}'.format(lnk))
print('')
print('')
print('')
print('Total download size is {} GB'.format(total_size/1e9))
# In[22]:
provide_links()
# In[ ]:
| 2.28125 | 2 |
src/coolbeans/tools/folding.py | runarp/coolbeans | 5 | 12797422 | import re
import datetime
class DateFoldStreamProxy:
old_date = None
date_re = re.compile(r"(?P<year>\d\d\d\d)-(?P<month>\d\d)-(?P<day>\d\d).*")
def __init__(self, stream):
self.stream = stream
def close(self):
self.stream.close()
def render_month(self, date: datetime.date):
return f"* {date.strftime('%B %Y')}\n"
def render_date(self, date: datetime.date):
return f"** {date.strftime('%Y-%m-%d - %A')}\n"
def write(self, content):
match = self.date_re.match(content)
if match:
g = dict((k, int(v)) for k, v in match.groupdict().items())
new_date = datetime.date(**g)
old_date = self.old_date
self.old_date = new_date
if not old_date or new_date.month != old_date.month:
self.stream.write(self.render_month(new_date))
if not old_date or new_date.day != old_date.day:
self.stream.write(self.render_date(new_date))
# Now write the Original Content
content = re.sub(r'\s+\n', r'\n', content, 999)
self.stream.write(content)
| 3.015625 | 3 |
create_dataset.py | Jarema/ml-cluster-strings | 0 | 12797423 |
import extract_features as ef
import numpy as np
with open('./input.csv','r',encoding='utf-8') as input_file:
with open('./dataset.csv','w',encoding='utf-8') as dataset:
for line in input_file:
r = line.split(',')
x = r[0].strip()
y = r[1].strip()
example = ef.extractFeatures(x)
result = '{0},{1}\n'.format(
np.array2string(example, separator=','),
y
)
result = result.replace('[','')
result = result.replace(']','')
result = result.replace(' ','')
dataset.write(result)
| 2.875 | 3 |
orm.py | macTracyHuang/cs50w_project1 | 0 | 12797424 | import os
from flask import Flask
from application import get_app
from models import *
from flask_session import Session
app = get_app()
session = Session(app)
# Tell Flask what SQLAlchemy databas to use.
# app.config["SQLALCHEMY_DATABASE_URI"] = "postgresql://postgres:a1234567@localhost"
# app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DATABASE_URL")
# app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
# Link the Flask app with the database (no Flask app is actually being run yet).
db.init_app(app)
def main():
Create tables based on each table definition in `models`
db.drop_all()
db.create_all()
session.app.session_interface.db.create_all()
if __name__ == "__main__":
# Allows for command line interaction with Flask application
with app.app_context():
main()
| 2.734375 | 3 |
yolo_labels/LabelBox/reader.py | AIForMobility/datasets | 0 | 12797425 | <filename>yolo_labels/LabelBox/reader.py
import pandas as pd
from typing import Union
import json
import os
from yolo_labels.shared import LabelReader
class LabelBoxLabelReader(LabelReader):
def __init__(self, input_path: str, label_id_mapper: dict, separator: str = ',', images_folder: str = '',
ignore_unmapped_labels: bool = True):
self.input_path = input_path
self.separator = separator
self.images_folder = images_folder
self.ignore_unmapped_labels = ignore_unmapped_labels
super(LabelBoxLabelReader, self).__init__(input_path, label_id_mapper)
self.data = self.read_source_file() # type: pd.DataFrame
def read_source_file(self, *args, **kwargs) -> pd.DataFrame:
return pd.read_csv(self.input_path, sep=self.separator, index_col='ID')
def next_labels(self) -> tuple:
for index, row in self.data.iterrows():
objects = json.loads(row['Label'])['objects']
objects_to_be_yielded = []
for obj in objects:
labels = self.extract_object_labels(obj)
if labels is None:
continue
objects_to_be_yielded += [labels]
yield os.path.join(self.images_folder, row['External ID']), objects_to_be_yielded
def extract_object_labels(self, obj) -> Union[tuple, None]:
label_id = self.get_label_id(obj['title'])
if label_id is None:
return None
bbox = obj['bbox']
x, y, h, w = bbox['top'], bbox['left'], bbox['height'], bbox['width']
# example of labels: [('provider', ['evo']),
# ('parking_place', ['sidewalk', 'reserved_parking_space']), ('is_well_parked', ['yes'])]
# labels = [self.get_classification_values(cls) for cls in obj['classifications']]
return x, y, h, w, label_id
def get_classification_values(self, classification: dict) -> tuple:
title = classification['title']
if 'answers' in classification: # self.has_key(classification, 'answers')
return title, self.get_object_labels(classification['answers'])
elif 'answer' in classification:
return title, self.get_object_label(classification['answer'])
return title, []
@classmethod
def get_object_label(cls, classification: dict) -> list:
return [classification['value']]
@classmethod
def get_object_labels(cls, classifications: list) -> list:
return [cls['value'] for cls in classifications]
@classmethod
def has_key(cls, obj: dict, key: str):
try:
_ = obj[key]
return True
except KeyError:
return False
| 2.859375 | 3 |
tests/test_documentation.py | datosgobar/pydatajson | 13 | 12797426 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests del modulo pydatajson."""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import with_statement
import os.path
import unittest
import nose
from .context import pydatajson
from pydatajson.documentation import field_to_markdown
from pydatajson.documentation import distribution_to_markdown
from pydatajson.documentation import dataset_to_markdown
class DocumentationTestCase(unittest.TestCase):
SAMPLES_DIR = os.path.join("tests", "samples")
RESULTS_DIR = os.path.join("tests", "results")
def test_field_to_markdown(self):
field = {
"title": "procedimiento_id",
"type": "integer",
"description": "Identificador único del procedimiento"
}
result = field_to_markdown(field)
expected = "**procedimiento_id** (integer): " \
"Identificador único del procedimiento"
self.assertEqual(result, expected)
# elimino campo de type
field.pop("type")
result = field_to_markdown(field)
expected = "**procedimiento_id**: " \
"Identificador único del procedimiento"
self.assertEqual(result, expected)
# elimino campo de description
field.pop("description")
result = field_to_markdown(field)
expected = "**procedimiento_id**"
self.assertEqual(result, expected)
def test_distribution_to_markdown(self):
distribution = {"title": "Convocatorias abiertas durante el año 2015",
"description": "Listado de las convocatorias abiertas "
"durante el año 2015 en el sistema de "
"contrataciones electrónicas",
"field": [{"title": "procedimiento_id",
"type": "integer",
"description": "Identificador único del "
"procedimiento"},
{"title": "unidad_operativa_"
"contrataciones_id",
"type": "integer",
"description": "Identificador único de la "
"unidad operativa de "
"contrataciones"}]}
result = distribution_to_markdown(distribution)
expected = '\n### Convocatorias abiertas durante el año 2015\n\n' \
'Listado de las convocatorias abiertas durante el año ' \
'2015 en el sistema de contrataciones electrónicas\n\n' \
'#### Campos del recurso\n\n' \
'- **procedimiento_id** (integer): Identificador único ' \
'del procedimiento\n' \
'- **unidad_operativa_contrataciones_id** (integer): ' \
'Identificador único de la unidad operativa de' \
' contrataciones\n'
self.assertEqual(result, expected)
# elimino campos no obligatiorios
distribution.pop("field")
distribution.pop("description")
result = distribution_to_markdown(distribution)
expected = '\n### Convocatorias abiertas durante el año 2015\n\n' \
'\n\n#### Campos del recurso'
print(result)
self.assertEqual(result.strip(), expected.strip())
def test_dataset_to_markdown(self):
dataset = {"title": "Sistema de contrataciones electrónicas",
"description":
"Datos correspondientes al Sistema de "
"Contrataciones Electrónicas (Argentina Compra)",
"distribution": [{"title": "Convocatorias abiertas durante "
"el año 2015",
"description":
"Listado de las convocatorias "
"abiertas durante el año 2015 en el "
"sistema de contrataciones "
"electrónicas",
"field": [{"title": "procedimiento_id",
"type": "integer",
"description":
"Identificador único del "
"procedimiento"},
{"title":
"unidad_operativa_"
"contrataciones_id",
"type": "integer",
"description":
"Identificador único de la"
" unidad operativa de "
"contrataciones"}]},
{"title":
"Convocatorias abiertas durante el "
"año 2016",
"description":
"Listado de las convocatorias "
"abiertas durante el año 2016 en el "
"sistema de contrataciones "
"electrónicas",
}]}
result = dataset_to_markdown(dataset)
expected = '\n# Sistema de contrataciones electrónicas\n\n'\
'Datos correspondientes al Sistema de Contrataciones ' \
'Electrónicas (Argentina Compra)\n\n'\
'## Recursos del dataset\n\n\n'\
'### Convocatorias abiertas durante el año 2015\n\n'\
'Listado de las convocatorias abiertas durante el año ' \
'2015 en el sistema de contrataciones electrónicas\n\n'\
'#### Campos del recurso\n\n'\
'- **procedimiento_id** (integer): Identificador ' \
'único del procedimiento\n'\
'- **unidad_operativa_contrataciones_id** (integer): ' \
'Identificador único de la unidad operativa de ' \
'contrataciones\n\n'\
'### Convocatorias abiertas durante el año 2016\n\n'\
'Listado de las convocatorias abiertas durante el año ' \
'2016 en el sistema de contrataciones electrónicas\n\n'\
'#### Campos del recurso'
self.assertEqual.__self__.maxDiff = None
self.assertEqual(result.strip(), expected.strip())
if __name__ == '__main__':
nose.run(defaultTest=__name__)
| 2.5625 | 3 |
imapEmailClient.py | fjh1997/Socket-SMTP-POP3-IMAP | 1 | 12797427 | import socket, ssl, base64, sys, time, math
print "Connecting.."
username = ""
password = ""
# Choose a mail server (e.g. Google mail server) and call it mailserver
mailserver = ('imap.163.com', 143)
# Create socket called clientSocket and establish a TCP connection with mailserver
#Fill in start
#sslSocket = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM),
#ssl_version = ssl.PROTOCOL_SSLv23)
sslSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sslSocket.connect(mailserver)
recv = sslSocket.recv(1024)
if recv.find('OK Gimap ready for requests from') != -1:
print "Connected.\n"
else:
print "Problem connecting.\n"
#################################
#Done connecting
#################################
#################################
#Global Variables
#################################
currentFolder = "None" #Stores the currently selected mail box
#################################
#imap Methods
#################################
def imap_login():#login
print("Login information:")
username = "<EMAIL>"
password = "password"
print("Attempting to login.\n")
sslSocket.send('A001 LOGIN ' + username + ' ' + password + '\r\n')
recv = sslSocket.recv(1024)
if recv.find("completed") != -1:
print "Successfully logged in to: " + username + "\n"
return 1
else:
print "Login failed!\n"
return -1
#List
def imap_list(dir = "", type = "*"):#return list of mailboxes with optional parameters to change what is listed
sslSocket.send('A101 List ' + "\"\"" +' '+ type + '\r\n')
recv = recv_all()
return recv
#Search
def imap_search(x):
sslSocket.send('A201 SEARCH ' + x + '\r\n')
recv = recv_all()
return recv
#Examine
def imap_examine(x):
sslSocket.send('A900 EXAMINE "' + x + '"\r\n')
recv = sslSocket.recv(1024)
return recv
#Fetch
def imap_fetch(x):
sslSocket.send('A301 FETCH ' + x + '\r\n')
recv = recv_all()
return recv
#Create
def imap_create(x):
sslSocket.send('A401 CREATE "' + x + '"\r\n')
recv = sslSocket.recv(1024)
return recv
#Delete
def imap_delete(x):
print 'A501 DELETE "' + x + '"\r\n'
sslSocket.send('A501 DELETE "' + x + '"\r\n')
recv = sslSocket.recv(1024)
print recv
return recv
#UID Copy
def imap_uid_copy(x):
sslSocket.send('A300 COPY "' + x + '"\r\n')
recv = recv_all()
return recv
#UID Fetch
def imap_uid_fetch(x):
sslSocket.send('A999 UID FETCH ' + x + '\r\n')
recv = recv_all()
return recv
#UID Store
def imap_uid_store(x):
print 'A003 UID STORE ' + x + '\r\n'
sslSocket.send('A003 UID STORE ' + x + '\r\n')
recv = sslSocket.recv(1024)
print recv
return recv
#UID Search
def imap_uid_search(x):
sslSocket.send('A999 UID SEARCH ' + x + '\r\n')
recv = recv_all()
return recv
#Expunge
def imap_expunge():
sslSocket.send('A202 EXPUNGE\r\n')
recv = sslSocket.recv(1024)
print recv
#Select
def imap_select(x):
sslSocket.send('A142 SELECT "' + x + '"\r\n')
recv = recv_all()
return recv
#################################
#Extra Methods
#################################
#Receive using a timeout
def recv_all(timeout = 2):#can either pass a different timeout or use the default set in the parameter
global sslSocket
#set the socket to non-blocking
sslSocket.setblocking(0)
#array of the data received
data_str = ''
#time this method started
begin = time.time()
#loop till the timeout has passed
while True:
#If some data was receive and it passed the timeout, then stop
if data_str and time.time()-begin > timeout:
break
#If no data has been retrieved and it has passed the timeout by 2, then there is probably nothing to get, so stop
elif time.time()-begin > timeout*2:
break
#try and get the data using a try/except to catch errors with getting data from the server
try:
data = sslSocket.recv(1024)
if data: #if there is some data that was received then store that data
data_str += data
begin=time.time()#reset the begin time so that it doesn't timeout while still getting data
else:
time.sleep(0.1)#give a slight pause before trying to read again
except:
pass #just let it reloop if there was an error thrown
#set the socket back to blocking since only this method is designed for non=blocking
sslSocket.setblocking(1)
return data_str
#Returns a list containing the mailbox names
#The parameter is the string returned from the imap_list command
def get_mailbox_list_array(mailbox_string):
temp_list = mailbox_string.split('\r\n')#Split the returned string by the new line indicators
del temp_list[len(temp_list)-1]#The last element is an empty string, so it isn't needed
mail_list = []#Will hold the list of mailbox names to be returned
for x in temp_list:
if x.find('/" "') != -1 and x.find('\Noselect') == -1 and x.find('[Gmail]/All Mail') == -1:#The line has a mail box (AND) doesn't contain the /Noselect flag (And) isn't the all mail folder
pos = x.find('/" "')+4
mail_list.append(x[pos:-1]) #Store the substring of the line containing the mailbox name
return mail_list
#Print the mailbox names in a list
def print_mailboxes_list(mailbox_list):
print ""
print "---------------------------------"
print "Mail Boxes:"
index = 0
while index < len(mailbox_list):
print str(index) + ":" + mailbox_list[index]
index = index + 1
print "---------------------------------"
#Print the mailbox names in a list with additional information
def print_mailboxes_list_with_info(mailbox_list, info):
print ""
print "---------------------------------"
print "Mail Boxes:"
index = 0
while index < len(mailbox_list):
print str(index) + ":" + mailbox_list[index] + " " + info[index]
index = index + 1
print "---------------------------------"
#Print the email list in the format (box number or uid number): Subject: This is the email subject
def print_mail_list_with_subject(nums, subs, start = 1, end = -1):#nums = the numbers of the emails, subs = the subjects of the emails
if end == -1:#If the point to stop printing in the list wasn't specified in the params, set it to the end of the list
end = len(subs)
while start <= len(nums) and start <= end:#Print the specified elements of the lists
print str(nums[start-1]) + ": " + str(subs[start-1])
start += 1
#Get how many emails each mailbox contains
def get_mailboxes_info_array(mail_list):
mail_info_list = [] #Holds the list of info for the emails
for x in mail_list:
recv = imap_examine(x)
tmp = recv.split(" ")
amtExist = 'N/A'
amtRecent = 'N/A'
try:
amtExist = tmp[17]
amtRecent = tmp[19]
except:
pass
mail_info_list.append('(Emails: ' + amtExist + ' Recent: ' + amtRecent + ')')#Add the formated string to the list
return mail_info_list
#Return a string of the numbers returned from the search command
def get_mail_numbers_from_search(recv):
pos1 = recv.find('SEARCH')+7
pos2 = recv.find('\r')
r = recv[pos1:pos2]
tmp = r.split(' ')
temp_list = []
for t in tmp:
try:
temp_list.append(int(t))
except:
pass
return temp_list
#Return the text of the email body
def format_email_body(recv):
first = 0
last = len(recv)-1
if recv.find('}') != -1:#Find the first }
first = recv.find('}') + 3
for index in reversed(range(len(recv))): #Loop from the end til it find the A
if recv[index] == 'A':
last = index - 5
break
return recv[first:last]
#Return true if the mail box has child boxes
def has_children(strg, mailbox_list):
for s in mailbox_list:
if s.find(strg) != -1 and strg != s:
return True
return False
def delete_all_children_with_child(strg, mailbox_list):
for s in mailbox_list:
if s.find(strg) != -1:
imap_delete(s)
def filter_list_of_subjects(l):
li = []
if l.find('\nSubject: ') != -1:
tmp = l.find('\nSubject: ')
l = l[tmp:]
while l.find('\nSubject: ') != -1:
pos1 = l.find('\nSubject: ')+1
pos2 = l.find('\r')
new = l[pos1:pos2]
if new != '':
li.append(new)
l = l[pos2+1:]
return li
def email_is_read(mail_type, ch):#0 == norm #1 == UID
recv = ''
if mail_type == 0:
recv = imap_search(str(ch) + ' SEEN')
else:
recv = imap_uid_search(str(ch) + ' SEEN')
pos1 = recv.find('SEARCH')+7
pos2 = recv.find('\r')
r = recv[pos1:pos2]
if r != '':
return True
else:
return False
#################################
#Mail Client Methods
#################################
def view_mailboxes():
mailbox_string = imap_list()
mail_list = get_mailbox_list_array(mailbox_string)
mailboxes_info = get_mailboxes_info_array(mail_list)
print_mailboxes_list_with_info(mail_list, mailboxes_info)
def examine_mailbox():
global currentFolder
mailbox_string = imap_list()
mail_list = get_mailbox_list_array(mailbox_string)
mailboxes_info = get_mailboxes_info_array(mail_list)
print_mailboxes_list_with_info(mail_list, mailboxes_info)
choice = raw_input("Which mail box do you want to open: ")
try:
if int(choice) < len(mail_list):
currentFolder = mail_list[int(choice)]
print "\nSelected " + currentFolder
else:
print "\nNot a valid mail box."
except:
print "\nNot a valid mail box."
def get_mail_boxnum():
get_mail(0)
def get_mail_uid():
get_mail(1)
def get_mail(mail_type):#0 == search #1 == UID search
imap_select(currentFolder)
email_list_length = 10
if mail_type == 0:
recv = imap_search('ALL')
else:
recv = imap_uid_search('ALL')
mail_nums = get_mail_numbers_from_search(recv)
string_list = ''
for x in range(len(mail_nums)):
string_list += str(mail_nums[x]) + ','
if string_list:
string_list = string_list[:-1]
if mail_type == 0:
subject_list = imap_fetch(string_list + " BODY[HEADER.FIELDS (SUBJECT)]")
else:
subject_list = imap_uid_fetch(string_list + " BODY[HEADER.FIELDS (SUBJECT)]")
mailboxes_info = filter_list_of_subjects(subject_list)
max_pages = int(len(mail_nums)/email_list_length)
if (len(mail_nums)%email_list_length) > 0:
max_pages += 1
current_page = 0
again = 1
while again == 1:
again = 1
start = current_page * email_list_length + 1
end = start + email_list_length - 1
if len(mail_nums) > 0:
print '\n-------------------------------\nEmails~ Page: ' + str(current_page+1) + '/' + str(max_pages)
print_mail_list_with_subject(mail_nums, mailboxes_info, start, end)
print "\nTo view more mail type, NEXT or PREV"
choice = raw_input("Which email do you want to open? ")
if choice == 'NEXT' or choice == 'next':
again = 1
if current_page < max_pages-1:
current_page+=1
elif choice == 'PREV' or choice == 'prev':
again = 1
if current_page > 0:
current_page-=1
else:
again = 0
else:
print "Mail box is empty."
again = 0
if len(mail_nums) > 0:
try:
ch = int(choice)
if mail_type == 0:
recv_body = imap_fetch(str(ch) + " BODY[1]")
recv_header = imap_fetch(str(ch) + " BODY[HEADER.FIELDS (DATE SUBJECT FROM TO)]")
recv_uid = imap_fetch(str(ch) + " (UID)")
else:
recv_body = imap_uid_fetch(str(ch) + " BODY[1]")
recv_header = imap_uid_fetch(str(ch) + " BODY[HEADER.FIELDS (DATE SUBJECT FROM TO)]")
recv_uid = imap_uid_fetch(str(ch) + " (UID)")
print "\n==============================================================================="
pos = recv_uid.find('(UID')+5
pos2 = recv_uid.find(')')
msg_uid = recv_uid[pos:pos2]
print "Email UID: " + msg_uid
pos = recv_header.find("Date: ")
pos2 = recv_header.find('OK Success\r\n')-11
print recv_header[pos:pos2]
print "--------------------------------------------------------------------"
print format_email_body(recv_body)
print "===============================================================================\n"
email_read = email_is_read(mail_type, ch)#0 == norm #1 == UID #false = unread true = read
print email_read
if email_read:
print "\nMark as unread = 1, Delete = 2, Leave as is = 3"
choice = raw_input("Choice: ")
if choice == "1":
#mark as unread
recv = imap_uid_store(msg_uid + ' -FLAGS (\seen)')
elif choice == "2":
#delete
recv = imap_uid_store(msg_uid + ' +FLAGS (\deleted)')
print recv
imap_expunge()
else:
print "\nMark as read = 1, Delete = 2, Leave as is = 3"
choice = raw_input("Choice: ")
if choice == "1":
#mark as read
recv = imap_uid_store(msg_uid + ' +FLAGS (\seen)')
elif choice == "2":
#delete
recv = imap_uid_store(msg_uid + ' +FLAGS (\deleted)')
print recv
imap_expunge()
except:
print "Email not available."
def create_dir():
name = raw_input("\nName of new mailbox to create:")
recv = imap_create(name)
if recv.find("OK Success") != -1:
print "Created " + name + "!"
else:
print "Failed to create."
def delete_dir():
mailbox_string = imap_list("/", "*")
mail_list = get_mailbox_list_array(mailbox_string)
print_mailboxes_list(mail_list)
choice = raw_input("Enter the number for the box to delete: ")
try:
ch_num = int(choice)
if ch_num > len(mail_list):
cmd = "No Box available"
else:
name = mail_list[ch_num]
if has_children(name, mail_list):
decision = raw_input("Are you sure you want to delete " + name + " and all children.(1=yes, 2=no)")
if decision == "1":
delete_all_children_with_child(name, mail_list)
cmd = "all"
else:
cmd = ""
else:
cmd = name
except:
cmd = ""
print "Checking: " + cmd
if cmd == "":
print "\nNo Box chosen"
if cmd == "all":
print "Deleted"
else:
imap_delete(cmd)
if recv.find("OK Success") != -1:
print "Deleted " + name + "!"
else:
print "Failed to delete."
def search_mail_search():
search_mail(0)
def search_mail_uid_search():
search_mail(1)
def search_mail(search_type):#0 == search #1 == UID search
imap_examine(currentFolder)
options = ["All", "Unread", "Old", "Drafts", "Text", "Date"]
print "\n------------------------------"
print "Search by:"
inc = 0
while inc < len(options):
print str(inc) + ":" + options[inc]
inc = inc + 1
print "------------------------------"
choice = raw_input("Choice: ")
cmd = ""
#all
if choice == "0":
cmd = 'ALL'
#unread
elif choice == "1":
cmd = 'UNSEEN'
#old
elif choice == "2":
cmd = 'OLD'
#drafts
elif choice == "3":
cmd = 'DRAFT'
#text
elif choice == "4":
search_text = raw_input("Search for text: ")
cmd = 'TEXT "' + search_text + '"'
#date
elif choice == "5":
when_ch = raw_input("(Before date = 1)(On date = 2)(Since date = 3):")
date_ch = raw_input("Date(dd-mm-yyyy)(ex. 1-Sep-2013):")
date_opt = "ON"
if when_ch == "1":
date_opt = "BEFORE"
elif when_ch == "2":
date_opt = "ON"
elif when_ch == "3":
date_opt = "SINCE"
cmd = date_opt + ' ' + date_ch
if search_type == 0:
recv = imap_search(cmd)
else:
recv = imap_uid_search(cmd)
mail_nums = get_mail_numbers_from_search(recv)
if len(mail_nums) > 0:
string_list = ''
for x in range(len(mail_nums)):
string_list += str(mail_nums[x]) + ','
if string_list:
string_list = string_list[:-1]
if search_type == 0:
subject_list = imap_fetch(string_list + " BODY[HEADER.FIELDS (SUBJECT)]")
else:
subject_list = imap_uid_fetch(string_list + " BODY[HEADER.FIELDS (SUBJECT)]")
mailboxes_info = filter_list_of_subjects(subject_list)
print_mail_list_with_subject(mail_nums, mailboxes_info)
else:
print "Mail box is empty."
def copy_mail():
imap_examine(currentFolder)
recv = imap_uid_search('ALL')
mail_nums = get_mail_numbers_from_search(recv)
if len(mail_nums) > 0:
print '\n-------------------------------\nEmails:'
string_list = ''
for x in range(len(mail_nums)):
string_list += str(mail_nums[x]) + ','
if string_list:
string_list = string_list[:-1]
subject_list = imap_uid_fetch(string_list + " BODY[HEADER.FIELDS (SUBJECT)]")
mailboxes_info = filter_list_of_subjects(subject_list)
print_mail_list_with_subject(mail_nums, mailboxes_info)
else:
print "Mail box is empty."
choice = raw_input('\nWhich email do you want to copy: ')
choice_dest = raw_input('To which folder: ')
try:
if len(mail_nums)+2 > int(choice) and int(choice) > 0:
print imap_uid_copy(choice + ' ' + choice_dest)
else:
print "Not a valid email."
except:
print "Invalid input."
def testing():
imap_expunge()
#Done running
def close_mail():
sslSocket.close() #close the socket
print "Done."
#Main Method
def main():
log_success = imap_login()
if log_success == -1:
close_mail()
return 0
choice = ""
#The main loop
while choice != "0":
print "\n"
print "---------------------------------------------"
print "-- Currently Selected Mailbox: " + currentFolder
print "---------------------------------------------"
print "What would you like to do? (0 to quit.)"
print "1: View all mail boxes"
print "2: Examine a mail box"
print "3: Search selected mail box"
print "4: Search using message unique id"
print "5: Get mail from selected mail box"
print "6: Get mail from selected mail box using unique id"
print "7: Create a mail box"
print "8: Delete a mail box"
print "9: Copy email from selected mail box to another"
choice = raw_input("Choice: ")
if choice == "1":
view_mailboxes()
elif choice == "2":
examine_mailbox()
elif choice == "3":
if currentFolder == "None":
print "\nNo mail box selected."
else:
search_mail_search()
elif choice == "4":
if currentFolder == "None":
print "\nNo mail box selected."
else:
search_mail_uid_search()
elif choice == "5":
if currentFolder == "None":
print "\nNo mail box selected."
else:
get_mail_boxnum()
elif choice == "6":
if currentFolder == "None":
print "\nNo mail box selected."
else:
get_mail_uid()
elif choice == "7":
create_dir()
elif choice == "8":
delete_dir()
elif choice == "9":
copy_mail()
elif choice == "10":
testing()
if choice == "0":
close_mail()
if __name__ == '__main__':
main()
| 3.4375 | 3 |
backend/server.py | Longi94/rl-loadout | 17 | 12797428 | <reponame>Longi94/rl-loadout
import logging
from datetime import timedelta
from flask import jsonify
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from werkzeug.exceptions import HTTPException, default_exceptions
import connexion
from utils.network import log_endpoints
from utils.network.exc import HttpException
from config import config
from database import database
from logging_config import logging_config
from blueprints import blueprints
from _version import __version__
log = logging.getLogger(__name__)
connexion_app = connexion.App(__name__, arguments={
'server_host': config.get('server', 'host'),
'version': __version__
})
connexion_app.add_api('api_swagger.yml')
app = connexion_app.app
for blueprint in blueprints:
app.register_blueprint(blueprint)
app.config['JWT_SECRET_KEY'] = config.get('server', 'jwt_secret')
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(days=1)
jwt = JWTManager(app)
CORS(app)
@app.teardown_request
def teardown_request(exception):
if exception:
database.Session.rollback()
database.Session.remove()
@app.errorhandler(HttpException)
def handle_http_exception(e: HttpException):
return jsonify({
'status': e.code,
'detail': e.message
}), e.code
@app.errorhandler(Exception)
def handle_error(e):
code = 500
if isinstance(e, HTTPException):
code = e.code
return jsonify({
'status': code,
'detail': str(e)
}), code
if __name__ == '__main__':
logging_config()
port = int(config.get('server', 'port'))
log.info(f'Running rl-loadout {__version__} on port {port}')
log_endpoints(log, app)
for ex in default_exceptions:
app.register_error_handler(ex, handle_error)
connexion_app.run(host='0.0.0.0', port=port)
| 1.875 | 2 |
source-code-from-author-book/Listings-for-Second-Edition/listing_8_29.py | robrac/algorithms-exercises-with-python | 0 | 12797429 | def mismatchLinks(pattern):
augPattern = "0"+pattern
links = {}
links[1] = 0
for k in range(2,len(augPattern)):
s = links[k-1]
stop = False
while s>=1 and not stop:
if augPattern[s] == augPattern[k-1]:
stop = True
else:
s = links[s]
links[k] = s+1
return links
| 3.25 | 3 |
src/pretalx/person/exporters.py | MaxRink/pretalx | 0 | 12797430 | <reponame>MaxRink/pretalx
import csv
import io
from django.utils.translation import ugettext_lazy as _
from pretalx.common.exporter import BaseExporter
from pretalx.submission.models import SubmissionStates
class CSVSpeakerExporter(BaseExporter):
public = False
icon = 'fa-users'
identifier = 'speakers.csv'
verbose_name = _('Speaker CSV')
def render(self, **kwargs):
content = io.StringIO()
writer = csv.DictWriter(content, fieldnames=['name', 'email', 'confirmed'])
writer.writeheader()
for speaker in self.event.submitters:
accepted_talks = speaker.submissions.filter(
event=self.event, state=SubmissionStates.ACCEPTED
).exists()
confirmed_talks = speaker.submissions.filter(
event=self.event, state=SubmissionStates.CONFIRMED
).exists()
if not accepted_talks and not confirmed_talks:
continue
writer.writerow(
{
'name': speaker.get_display_name(),
'email': speaker.email,
'confirmed': str(bool(confirmed_talks)),
}
)
return (f'{self.event.slug}-speakers.csv', 'text/plain', content.getvalue())
| 2.125 | 2 |
djay/blueprints/command/context.py | aleontiev/django-cli | 24 | 12797431 | import click
import inflection
@click.command()
@click.argument("name")
@click.option("--doc")
def get_context(name, doc):
"""Generate a command with given name.
The command can be run immediately after generation.
For example:
dj generate command bar
dj run manage.py bar
"""
name = inflection.underscore(name)
return {"name": name, "doc": doc or name}
| 2.578125 | 3 |
tests/reader/test_iso19139.py | cehbrecht/md-ingestion | 4 | 12797432 | <reponame>cehbrecht/md-ingestion<gh_stars>1-10
import os
import pytest
from mdingestion.reader import ISO19139Reader
from tests.common import TESTDATA_DIR
def test_envidat_iso19139():
point_file = os.path.join(
TESTDATA_DIR, 'envidat-iso19139', 'SET_1', 'xml', 'bbox_2ea750c6-4354-5f0a-9b67-2275d922d06f.xml')
reader = ISO19139Reader()
doc = reader.read(point_file)
assert 'Number of avalanche fatalities' in doc.title[0]
assert 'Avalanche Warning Service SLF' in doc.creator[0]
assert 'WSL Institute for Snow' in doc.publisher[0]
assert '2018' == doc.publication_year
assert ['AVALANCHE ACCIDENT STATISTICS', 'AVALANCHE ACCIDENTS', 'AVALANCHE FATALITIES'] == doc.keywords
# assert "POLYGON ((45.81802 10.49203, 45.81802 47.80838, 5.95587 47.80838, 5.95587 10.49203, 45.81802 10.49203))" == doc.spatial_coverage # noqa
# assert "{'type': 'Polygon', 'coordinates': (((45.81802, 10.49203), (45.81802, 47.80838), (5.95587, 47.80838), (5.95587, 10.49203), (45.81802, 10.49203)),)}" == doc.spatial # noqa
# assert '2018-12-31T00:00:00Z' == doc.temporal_coverage_begin_date
# assert '2018-12-31T00:00:00Z' == doc.temporal_coverage_end_date
def test_boundingbox():
point_file = os.path.join(
TESTDATA_DIR, 'deims', 'raw', '8708dd68-f413-5414-80fb-da439a4224f9.xml')
reader = ISO19139Reader()
doc = reader.read(point_file)
# <gmd:westBoundLongitude>
# <gco:Decimal>34.611499754704</gco:Decimal>
# </gmd:westBoundLongitude>
# <gmd:eastBoundLongitude>
# <gco:Decimal>35.343095815055</gco:Decimal>
# </gmd:eastBoundLongitude>
# <gmd:southBoundLatitude>
# <gco:Decimal>29.491402811787</gco:Decimal>
# </gmd:southBoundLatitude>
# <gmd:northBoundLatitude>
# <gco:Decimal>30.968572510749</gco:Decimal>
# </gmd:northBoundLatitude>
assert '(34.611W, 29.491S, 35.343E, 30.969N)' == doc.spatial_coverage
@pytest.mark.xfail(reason='missing in reader')
def test_iso19139_temporal_coverage():
point_file = os.path.join(
TESTDATA_DIR, 'envidat-iso19139', 'SET_1', 'xml', 'bbox_2ea750c6-4354-5f0a-9b67-2275d922d06f.xml')
reader = ISO19139Reader()
doc = reader.read(point_file)
# assert "POLYGON ((45.81802 10.49203, 45.81802 47.80838, 5.95587 47.80838, 5.95587 10.49203, 45.81802 10.49203))" == doc.spatial_coverage # noqa
# assert "{'type': 'Polygon', 'coordinates': (((45.81802, 10.49203), (45.81802, 47.80838), (5.95587, 47.80838), (5.95587, 10.49203), (45.81802, 10.49203)),)}" == doc.spatial # noqa
assert '2018-12-31T00:00:00Z' == doc.temporal_coverage_begin_date
assert '2018-12-31T00:00:00Z' == doc.temporal_coverage_end_date
| 2.015625 | 2 |
printerc/printerc.py | lopezpdvn/printer73x | 2 | 12797433 | #!/usr/bin/env python
# coding=utf-8
# Author: <NAME>
# Contact: <EMAIL>
'''Numerical controller for the printer73x system.
**printerc** is an interactive command line interface for the numerical control
of the **printer73x** system. printerc drives printerm through mcircuit;
printerc stablishes a serial connection with mcircuit, and mcircuit is coupled
to printerm through the motors. mcircuit (actually MM12, its subsystem) loads
in its memory the routines that correspond to the translations across the
:math:`X`, :math:`Y` and :math:`Z` axis, and then printerc execute these
routines in order to produce the trajectory of the tool printing the image.
'''
# Standard library imports.
from __future__ import division
from pprint import pprint
import sys
import os
import atexit
import gc
import time
# Related third party imports.
import serial
import IPython
import numpy as np # remove
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Same as **printer73x**.
__version__ = '0.09'
# GLOBAL CONSTANT names. *if main* section at bottom sets global names too.
# ==========================================================================
# ==========================================================================
LOGF = 'log.rst'
'''Path to the log file.'''
INTRO_MSG = '''\
**printerc**
Welcome!
'''
'''Welcome message for command line interface.'''
# MM12
# ==========================================================================
TRANSITIONS_PER_STEP = 2
'''The board sets the microstepping format with the jumpers *MS1* and *MS2*.
Use the following table to set this constant:
============ ============ ===============
MS1 MS2 TRANSITIONS_PER_STEP
============ ============ ===============
connected connected 1
disconnected connected 2
connected disconnected 4
disconnected disconnected 8
============ ============ ===============
.. note:: Both stepper motor driver boards must have the same jumper
configuration.
'''
STEPS_PER_PIXEL = 90
'''Number of steps the stepper motor needs to translate 1 pixel across the
:math:`X` or :math:`Y` axes.'''
TRANSITIONS_PER_PIXEL = STEPS_PER_PIXEL * TRANSITIONS_PER_STEP
'''Number of low-to-high transitions the stepper motors need to translate 1
pixel across the :math:`X` or :math:`Y` axes.'''
SRV_SIGNAL_CHANNEL_TARGET_OFF = 940
''':math:`Z` axis servo motor pulse width in units of quarter-:math:`\\mu s`
that enables printing (moves the tool down).'''
SRV_SIGNAL_CHANNEL_TARGET_ON = 2175
SRV_SIGNAL_CHANNEL_TARGET_ON = 1580
''':math:`Z` axis servo motor pulse width in units of quarter-:math:`\\mu s`
that disables printing (moves the tool up).'''
STEPPER_CHANNELS_TARGET_ON = 6800
'''Target value in units of quarter-:math:`\\mu s` that drives the stepper
channels high.'''
STEPPER_CHANNELS_TARGET_OFF = 5600
'''Target value in units of quarter-:math:`\\mu s` that drives the stepper
channels low.'''
MM12_AXES_CHANNELS = {
'X' : {
'dir_channel' : 0,
'dir_positive' : STEPPER_CHANNELS_TARGET_OFF,
'dir_negative' : STEPPER_CHANNELS_TARGET_ON,
'step_channel': 1,
},
'Y' : {
'dir_channel' : 2,
'dir_positive' : STEPPER_CHANNELS_TARGET_ON,
'dir_negative' : STEPPER_CHANNELS_TARGET_OFF,
'step_channel': 3,
},
'Z' : {
'channel' : 4,
'on' : SRV_SIGNAL_CHANNEL_TARGET_OFF,
'off' : SRV_SIGNAL_CHANNEL_TARGET_ON,
},
}
'''Configuration of the MM12 channels for the servo and stepper motors outputs.'''
SUB_STEPPER_PIXEL_TEMPLATE = '''sub {name}
{{{{ntransitions}}}}
{dir} {dir_channel} servo # set direction
begin
dup
while
{off} {step_channel} servo
{{delay}} delay
{on} {step_channel} servo
{{delay}} delay
1 minus
repeat
quit
'''
'''Template for the MM12 script subroutines that drive a stepper motor in units
of pixels,'''
SUB_STEPPER_PULSE_TEMPLATE = '''sub {name}
{dir} {dir_channel} servo # set direction
{off} {step_channel} servo
{{delay}} delay
{on} {step_channel} servo
{{delay}} delay
quit
'''
'''Template for the MM12 script subroutines that drive a stepper motor in units
of low-to-high transitions, for a precise but slow translation.'''
SUB_SERVO_TEMPLATE = '''sub {name}
{position} {channel} servo
begin
get_moving_state
while
# wait until is is no longer moving.
repeat
75 delay
quit
'''
'''Template for the MM12 script subroutine that drives the servo motor.'''
MM12_SCRIPT_INIT = '''\
{{servo_acceleration}} {servo_channel} acceleration
{{servo_speed}} {servo_channel} speed
'''.format(servo_channel=MM12_AXES_CHANNELS['Z']['channel'])
'''MM12 script initialization.'''
MM12_SUBROUTINES = {
'X-p' : {
'subroutine_id' : 0,
'subroutine_body' :
SUB_STEPPER_PULSE_TEMPLATE.format(
name='x_neg_pulse', dir=MM12_AXES_CHANNELS['X']['dir_negative'],
dir_channel=MM12_AXES_CHANNELS['X']['dir_channel'],
off=STEPPER_CHANNELS_TARGET_OFF,
step_channel=MM12_AXES_CHANNELS['X']['step_channel'],
on=STEPPER_CHANNELS_TARGET_ON),
},
'X+p' : {
'subroutine_id' : 1,
'subroutine_body' :
SUB_STEPPER_PULSE_TEMPLATE.format(
name='x_pos_pulse', dir=MM12_AXES_CHANNELS['X']['dir_positive'],
dir_channel=MM12_AXES_CHANNELS['X']['dir_channel'],
off=STEPPER_CHANNELS_TARGET_OFF,
step_channel=MM12_AXES_CHANNELS['X']['step_channel'],
on=STEPPER_CHANNELS_TARGET_ON),
},
'X-P' : {
'subroutine_id' : 2,
'subroutine_body' :
SUB_STEPPER_PIXEL_TEMPLATE.format(
name='x_neg_pixel', dir=MM12_AXES_CHANNELS['X']['dir_negative'],
dir_channel=MM12_AXES_CHANNELS['X']['dir_channel'],
off=STEPPER_CHANNELS_TARGET_OFF,
step_channel=MM12_AXES_CHANNELS['X']['step_channel'],
on=STEPPER_CHANNELS_TARGET_ON ),
},
'X+P' : {
'subroutine_id' : 3,
'subroutine_body' :
SUB_STEPPER_PIXEL_TEMPLATE.format(
name='x_pos_pixel', dir=MM12_AXES_CHANNELS['X']['dir_positive'],
dir_channel=MM12_AXES_CHANNELS['X']['dir_channel'],
off=STEPPER_CHANNELS_TARGET_OFF,
step_channel=MM12_AXES_CHANNELS['X']['step_channel'],
on=STEPPER_CHANNELS_TARGET_ON ),
},
'Y-p' : {
'subroutine_id' : 4,
'subroutine_body' :
SUB_STEPPER_PULSE_TEMPLATE.format(
name='y_neg_pulse', dir=MM12_AXES_CHANNELS['Y']['dir_negative'],
dir_channel=MM12_AXES_CHANNELS['Y']['dir_channel'],
off=STEPPER_CHANNELS_TARGET_OFF,
step_channel=MM12_AXES_CHANNELS['Y']['step_channel'],
on=STEPPER_CHANNELS_TARGET_ON),
},
'Y+p' : {
'subroutine_id' : 5,
'subroutine_body' :
SUB_STEPPER_PULSE_TEMPLATE.format(
name='y_pos_pulse', dir=MM12_AXES_CHANNELS['Y']['dir_positive'],
dir_channel=MM12_AXES_CHANNELS['Y']['dir_channel'],
off=STEPPER_CHANNELS_TARGET_OFF,
step_channel=MM12_AXES_CHANNELS['Y']['step_channel'],
on=STEPPER_CHANNELS_TARGET_ON),
},
'Y-P' : {
'subroutine_id' : 6,
'subroutine_body' :
SUB_STEPPER_PIXEL_TEMPLATE.format(
name='y_neg_pixel', dir=MM12_AXES_CHANNELS['Y']['dir_negative'],
dir_channel=MM12_AXES_CHANNELS['Y']['dir_channel'],
off=STEPPER_CHANNELS_TARGET_OFF,
step_channel=MM12_AXES_CHANNELS['Y']['step_channel'],
on=STEPPER_CHANNELS_TARGET_ON ),
},
'Y+P' : {
'subroutine_id' : 7,
'subroutine_body' :
SUB_STEPPER_PIXEL_TEMPLATE.format(
name='y_pos_pixel', dir=MM12_AXES_CHANNELS['Y']['dir_positive'],
dir_channel=MM12_AXES_CHANNELS['Y']['dir_channel'],
off=STEPPER_CHANNELS_TARGET_OFF,
step_channel=MM12_AXES_CHANNELS['Y']['step_channel'],
on=STEPPER_CHANNELS_TARGET_ON ),
},
'Z-' : {
'subroutine_id' : 8,
'subroutine_body' :
SUB_SERVO_TEMPLATE.format(
name='z_position_off',
channel=MM12_AXES_CHANNELS['Z']['channel'],
position=MM12_AXES_CHANNELS['Z']['off']*4)
},
'Z+' : {
'subroutine_id' : 9,
'subroutine_body' :
SUB_SERVO_TEMPLATE.format(
name='z_position_on',
channel=MM12_AXES_CHANNELS['Z']['channel'],
position=MM12_AXES_CHANNELS['Z']['on']*4)
},
}
'''Structure that builds and identifies the MM12 script subroutines.'''
MM12_SCRIPT_RUNNING = '\x00'
'''Byte value that the MM12 returns when the script is running.'''
MM12_SCRIPT_STOPPED = '\x01'
'''Byte value that the MM12 returns when the script is stopped.'''
# ==========================================================================
# ==========================================================================
# ==========================================================================
class Getch:
"""Gets a single character from standard input. Does not echo to the
screen.
References
==========
.. [GETCHRECIPE] http://code.activestate.com/recipes/134892/
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
'''Unix implementation of class ``Getch``.'''
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
'''Windows implementation of class ``Getch``.'''
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
def on_exit():
'''Actions to do on exit.'''
try:
print >>logf, 'Closing ``{0}`` *command port*'.format(sp.port)
sp.close()
except NameError:
pass
print >>logf, 'END'
logf.close()
print '\nThanks for using ``printerc``!\n'
# Invoke the garbage collector.
gc.collect()
def scan_serial_ports():
'''Scan system for available physical or virtual serial ports.
Returns
-------
available : list of tuples
Each element of the list is a ``(num, name)`` tuple with the number and
name of the port.
Notes
-----
Directly copied from example from `pyserial
<http://sourceforge.net/projects/pyserial/files/package>`_ project.
'''
available = []
for i in range(256):
try:
s = serial.Serial(i)
available.append( (i, s.portstr))
s.close()
except serial.SerialException:
pass
return available
def mm12_script_status():
'''Indicate whether the MM12 script is running or stopped.
Returns
-------
script_status : {``MM12_SCRIPT_RUNNING``, ``MM12_SCRIPT_STOPPED``}
'''
assert sp.write('\xae') == 1
return sp.read(1)
def translate(adm, confirm=False):
'''Translate the printerm tool across the :math:`XYZ` space.
printer73x can only perform translations across a single axis at a time.
Parameters
----------
adm: str
*adm* stands for Axis, Direction, Mode. Use the following table to
select the kind of translation you want to perform (where :math:`n` is
the number of pulses for the printerm tool to translate a pixel unit
across the respective axis).
======= ================================================================
*adm* translation
======= ================================================================
``X-p`` send 1 single pulse for negative translation across :math:`X`.
``X+p`` send 1 single pulse for positive translation across :math:`X`.
``X-P`` send :math:`n` pulses for negative translation across :math:`X`.
``X+P`` send :math:`n` pulses for positive translation across :math:`X`.
``Y-p`` send 1 single pulse for negative translation across :math:`Y`.
``Y+p`` send 1 single pulse for positive translation across :math:`Y`.
``Y-P`` send :math:`n` pulses for negative translation across :math:`Y`.
``Y+P`` send :math:`n` pulses for positive translation across :math:`Y`.
``Z-`` move the tool to the off position (:math:`Z`).
``Z+`` move the tool to the on position (:math:`Z`).
======= ================================================================
confirm: boolean, optional
If ``True``, the user must confirm the translation by pressing Enter
(default is ``False``).
'''
# Start until script is not running.
while mm12_script_status() == MM12_SCRIPT_RUNNING:
pass
subroutine_id = chr( MM12_SUBROUTINES[adm]['subroutine_id'])
str2write = ''.join(['\xa7', subroutine_id])
if confirm:
raw_input()
assert sp.write(str2write) == 2
#if 'Z' in adm:
#time.sleep(0.1)
def build_mm12_script(fpath, ntransitions=TRANSITIONS_PER_PIXEL, delay=1,
servo_acceleration=0, servo_speed=100):
'''Build a script to be loaded on the MM12.
Parameters
----------
fpath : str-like
Path location where to save the script file.
ntransitions : int, optional
Number of low-to-high transitions to perform in the subroutines that
performs translation in units of pixels through the stepper motor
(default is ``TRANSITIONS_PER_PIXEL``).
delay : int, optional
Delay (in milliseconds) between each transition in the subroutines that
perform translation through the stepper motors (default is 1).
servo_acceleration : int, optional
Sets the acceleration of the servo signal channel in units of (0.25
us)/(10 ms)/(80 ms) (default is 0).
servo_speed : int, optional
Sets the speed of the servo signal channel in units of (0.25 us)/(10
ms) (default is 100).
'''
def get_subroutine_key_by_id(subroutine_id):
for key, value in MM12_SUBROUTINES.items():
if subroutine_id is value['subroutine_id']:
return key
for intarg in (ntransitions, delay, servo_acceleration, servo_speed):
assert isinstance(intarg, int)
with open(fpath, 'w') as f:
print >>f, MM12_SCRIPT_INIT.format(servo_acceleration=servo_acceleration,
servo_speed=servo_speed)
for i in range(len( MM12_SUBROUTINES)):
subroutine_key = get_subroutine_key_by_id(i)
subroutine_body = MM12_SUBROUTINES[subroutine_key]['subroutine_body']
if 'Z' not in subroutine_key:
subroutine_body = subroutine_body.format(delay=delay)
if 'P' in subroutine_key:
subroutine_body = subroutine_body.format(ntransitions=ntransitions)
print >>f, subroutine_body
def prepare_img(imgpath, invert=False, show=False):
'''Perform any necessary processing for the input image to be reproduced by
printerm.
Parameters
----------
imgpath : str-like
Path to the image file. Must be PNG, 8-bit grayscale, non-interlaced.
invert : boolean, optional
Invert the image if ``True`` (default is ``False``).
show : boolean, optional
Show the image if ``True`` (default is ``False``).
Notes
-----
This function sets the following global names:
**img** : array of booleans
2-d array representation of the image.
**b** : int
Image's height, number of rows in the array representation.
**w** : int
Image's width, number of columns in the array representation.
'''
global img, b, w
print 'Loading ``{0}``...'.format(imgpath)
img = mpimg.imread(fname=imgpath, format='png')
b, w = img.shape
npixels = b * w
nprints = nnotprints = 0
assert (b > 0) and (w > 0)
print 'Processing the image...'
# only total black and white, no grays.
for i in range(b):
for j in range(w):
if img[i][j] < 0.9:
img[i][j] = 0.0
else:
img[i][j] = 1.0
if invert:
print 'Inverting image...'
for i in range(b):
for j in range(w):
if img[i][j] > 0.0:
img[i][j] = 0.0
else:
img[i][j] = 1.0
# Check for pixel with and without color.
for i in range(b):
for j in range(w):
if img[i][j] > 0.0:
nnotprints += 1
else:
nprints += 1
assert (nnotprints + nprints) == npixels
# If ``nprints == 0`` then no pixel will be printed.
assert nprints > 0
print 'Loaded ``{0}`` with {1} pixels, {2} of which have color'.format(
imgpath, npixels, nprints)
plt.close('all')
if show:
plt.imshow(img, cmap=cm.gray)
plt.show()
def connect_printerm(commandport_id):
'''Connect printerc with printerm through the MM12 command port.
Parameters
----------
commandport_id : str or int
Serial device name or port number number of the MM12 serial command
port.
'''
global sp
sp = serial.Serial(port=commandport_id)
assert sp.isOpen()
print >>logf, '``{0}`` just opened *command port* ``{1}``'.format(PN, sp.port)
msg = '``{0}`` is now connected to ``printerm`` through ``{1}``'.format(
PN, sp.port)
for f in (logf, sys.stdout):
print >>f, msg
def manual_translation_mode(precise=True):
'''Manually translate the printerm tool across the :math:`XY` plane.
Parameters
----------
precise : boolean, optional
If ``True``, perform translation in units of single low-to-high transitions
sent to the stepper motor drivers (how much the tool is translated
depends on the microstep format selected through the XMS1, XMS2, YMS1,
YMS2 jumpers in mcircuit). If ``False`` perform translation in units
of pixels (default is True).
'''
if precise:
keys2translation = {
'h' : 'X-p',
'l' : 'X+p',
'j' : 'Y+p',
'k' : 'Y-p',
'i' : 'Z+',
'o' : 'Z-',
}
else:
keys2translation = {
'h' : 'X-P',
'l' : 'X+P',
'j' : 'Y+P',
'k' : 'Y-P',
'i' : 'Z+',
'o' : 'Z-',
}
getch = Getch()
while True:
ch = getch()
if ch not in keys2translation.keys():
break
while mm12_script_status() == MM12_SCRIPT_RUNNING:
pass
translate(keys2translation[ch], confirm=False)
def print_pixel(confirm=False):
if confirm:
raw_input()
translate('Z+', confirm=False)
translate('Z-', confirm=False)
def print_image(confirm=False):
def report_position(x, y):
print 'At row {0}, column {1}'.format(y, x)
def print_img_pixel(x, y, confirm=False):
if img[y][x] == 0.0:
print_pixel(confirm)
try:
msg = 'Preparing to print an image with {0} rows and {1} columns'.format(b,
w)
print msg
x = y = 0 # We are at HOME position.
while True:
print 'Printing across the row {0}'.format(y)
while True:
report_position(x, y)
print_img_pixel(x, y, confirm)
if x == w - 1:
break
translate('X+P', confirm)
x += 1
print 'Returning to a_{{{0}, 0}}'.format(y)
while True:
if x == 0:
break
translate('X-P', confirm)
x -= 1
if y == b - 1:
break
translate('Y+P', confirm)
y += 1
print 'Returning to a_{{0, 0}}'.format(y)
while True:
if y == 0:
break
translate('Y-P', confirm)
y -= 1
print 'The image has been printed'
except KeyboardInterrupt:
sp.flush()
print 'Operation interrupted, flushing command port'
def print_image_better(confirm=False):
def report_position(x, y):
print 'At row {0}, column {1}'.format(y, x)
def print_img_pixel(x, y, confirm=False):
if img[y][x] == 0.0:
print_pixel(confirm)
def color_in_this_row(row):
for pixel in row:
if pixel == 0.0:
return True
return False
try:
msg = 'Preparing to print an image with {0} rows and {1} columns'.format(b,
w)
print msg
x = y = 0 # We are at HOME position.
while True:
print 'Printing across the row {0}'.format(y)
while True:
report_position(x, y)
print_img_pixel(x, y, confirm)
if x == w - 1:
break
translate('X+P', confirm)
x += 1
if y == b - 1:
break
translate('Y+P', confirm)
y += 1
print 'Printing across the row {0}'.format(y)
while True:
report_position(x, y)
print_img_pixel(x, y, confirm)
if x == 0:
break
translate('X-P', confirm)
x -= 1
if y == b - 1:
break
translate('Y+P', confirm)
y += 1
print 'Returning to a_{{0, 0}}'.format(y)
while True:
if y == 0:
break
translate('Y-P', confirm)
y -= 1
while True:
if x == 0:
break
translate('X-P', confirm)
x -= 1
print 'The image has been printed'
except KeyboardInterrupt:
sp.flush()
print 'Operation interrupted, flushing command port'
def print_image_better_better(confirm=False):
'''Automatically print the input image.
Parameters
----------
confirm : boolean, optional
Wait for confirmation before any translation (default is ``False``).
Notes
-----
Let :math:`\mathbf{A}` be the matrix representation of the input image.
:math:`a_{y,x}` as an element of :math:`\mathbf{A}`, represents a pixel.
Comparing :math:`\mathbf{A}` with the input image from a front perspective:
#. :math:`a_{0,0}` corresponds to the upper left corner of the input image.
#. :math:`a_{b-1,w-1}` corresponds to the lower right corner of the input
image.
Starting from the HOME position the printerm tool visit every element of
the row from :math:`a_{0,0}` to :math:`a_{0,w-1}` then moves to the next
row (:math:`a_{1,w-1}`) and visits every element of the row from
:math:`a_{1,w-1}` to :math:`a_{1,0}`, and so on until there are no more
rows to visit. In any position, if the corresponding pixel is black then
the tool prints it.
'''
def report_position(x, y):
print 'At row {0}, column {1}'.format(y, x)
def print_img_pixel(x, y, confirm=False):
if img[y][x] == 0.0:
print_pixel(confirm)
def color_in_this_row(row):
for pixel in row:
if pixel == 0.0:
return True
return False
try:
msg = 'Preparing to print an image with {0} rows and {1} columns'.format(b,
w)
print msg
x = y = z = 0 # We are at HOME position.
while True:
# To the right.
print 'Printing across the row {0}'.format(y)
while True:
report_position(x, y)
if img[y][x] == 0.0:
translate('Z+', confirm)
try:
if img[y][x+1] != 0.0:
translate('Z-', confirm)
except IndexError as e:
pass
if x == w - 1:
translate('Z-')
break
translate('X+P', confirm)
x += 1
if y == b - 1:
translate('Z-')
break
translate('Y+P', confirm)
y += 1
# To the left.
print 'Printing across the row {0}'.format(y)
while True:
report_position(x, y)
if img[y][x] == 0.0:
translate('Z+', confirm)
if img[y][x-1] != 0.0:
translate('Z-', confirm)
if x == 0:
translate('Z-')
break
translate('X-P', confirm)
x -= 1
if y == b - 1:
translate('Z-')
break
translate('Y+P', confirm)
y += 1
print 'Returning to a_{{0, 0}}'.format(y)
while True:
if y == 0:
break
translate('Y-P', confirm)
y -= 1
while True:
if x == 0:
break
translate('X-P', confirm)
x -= 1
print 'The image has been printed'
except KeyboardInterrupt:
sp.flush()
print 'Operation interrupted, flushing command port'
if __name__ == "__main__":
# program name from file name.
PN = os.path.splitext(sys.argv[0])[0]
logf = open(LOGF, 'w')
print >>logf, 'START'
atexit.register(on_exit)
IPython.Shell.IPShellEmbed()( INTRO_MSG)
| 3.125 | 3 |
utils.py | wiaderwek/musebert | 24 | 12797434 | <reponame>wiaderwek/musebert<filename>utils.py
from torch.optim.lr_scheduler import LambdaLR
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps,
num_training_steps, final_lr_factor,
last_epoch=-1):
"""
Copied and **modified** from: https://github.com/huggingface/transformers
Create a schedule with a learning rate that decreases linearly from the
initial lr set in the optimizer to 0, after a warmup period during which
it increases linearly from 0 to the initial lr set in the optimizer.
:param optimizer: The optimizer for which to schedule the learning rate.
:param num_warmup_steps: The number of steps for the warmup phase.
:param num_training_steps: The total number of training steps.
:param final_lr_factor: Final lr = initial lr * final_lr_factor
:param last_epoch: the index of the last epoch when resuming training.
(defaults to -1)
:return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
final_lr_factor, float(num_training_steps - current_step) /
float(max(1, num_training_steps - num_warmup_steps))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def augment_note_matrix(nmat, length, shift):
"""Pitch shift a note matrix in R_base format."""
aug_nmat = nmat.copy()
aug_nmat[0: length, 1] += shift
return aug_nmat
| 2.921875 | 3 |
tests/test_examplegen.py | matth79/multispecies-whale-detection | 1 | 12797435 | <gh_stars>1-10
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from typing import Optional, Tuple
import unittest
import apache_beam as beam
from dateutil import tz
from multispecies_whale_detection import examplegen
def relative_endpoints_to_file_annotation_fixture(
begin_seconds: float,
end_seconds: float) -> Optional[examplegen.ClipAnnotation]:
"""Template testing FileAnnotations relative to a fixture clip.
The fixture clip covers the time interval [10s, 20s] relative to the file.
Args:
begin_seconds: Beginning of the test FileAnnotation relative to the file.
end_seconds: Ending of the test FileAnnotation relative to the file.
Returns:
Annotation over the restriction of the given time interval to the fixture
clip or None if they don't overlap.
"""
start_relative_to_file = datetime.timedelta(seconds=20)
clip_metadata = examplegen.ClipMetadata(
filename='test.wav',
sample_rate=10000,
duration=datetime.timedelta(seconds=10),
index_in_file=2,
start_relative_to_file=start_relative_to_file,
start_utc=None,
)
annotation = examplegen.FileAnnotation(
begin=datetime.timedelta(seconds=begin_seconds),
end=datetime.timedelta(seconds=end_seconds),
label='Oo',
)
return annotation.make_relative(clip_metadata)
def relative_endpoints_to_utc_annotation_fixture(
begin_seconds: float,
end_seconds: float) -> Optional[examplegen.ClipAnnotation]:
"""Template testing UTCAnnotations relative to a fixture clip.
This is a modification of the FileAnnotation version to convert identical
offset arguments (relative to the start of the file) to the corresponding UTC
times, based on a hard-coded fixture file_start_utc.
The fixture clip covers the time interval [10s, 20s] relative to the file.
Args:
begin_seconds: Beginning of the test UTCAnnotation relative to the file.
end_seconds: Ending of the test UTCAnnotation relative to the file.
Returns:
Annotation over the restriction of the given time interval to the fixture
clip or None if they don't overlap.
"""
file_start_utc = datetime.datetime(2012, 2, 3, 11, 45, 20, tzinfo=tz.UTC)
start_relative_to_file = datetime.timedelta(seconds=20)
clip_metadata = examplegen.ClipMetadata(
filename='test.wav',
sample_rate=10000,
duration=datetime.timedelta(seconds=10),
index_in_file=2,
start_relative_to_file=start_relative_to_file,
start_utc=file_start_utc + start_relative_to_file,
)
annotation = examplegen.UTCAnnotation(
begin=file_start_utc + datetime.timedelta(seconds=begin_seconds),
end=file_start_utc + datetime.timedelta(seconds=end_seconds),
label='Oo',
)
return annotation.make_relative(clip_metadata)
class TestAnnotations(unittest.TestCase):
def test_parse_annotation_rel_file(self):
annotation = examplegen.Annotation.parse_csv_row({
'label': 'Mn',
'begin': '1.2',
'end': '1.8',
})
expected = examplegen.FileAnnotation(
label='Mn',
begin=datetime.timedelta(seconds=1.2),
end=datetime.timedelta(seconds=1.8),
)
self.assertEqual(expected, annotation)
def test_parse_annotation_utc(self):
annotation = examplegen.Annotation.parse_csv_row({
'label': 'Mn',
'begin_utc': '2008-05-06 11:24:41.268000',
'end_utc': '2008-05-06 11:24:42.472000',
})
expected = examplegen.UTCAnnotation(
label='Mn',
begin=datetime.datetime(2008, 5, 6, 11, 24, 41, 268000, tzinfo=tz.UTC),
end=datetime.datetime(2008, 5, 6, 11, 24, 42, 472000, tzinfo=tz.UTC),
)
self.assertEqual(expected, annotation)
def test_file_endpoints_override_utc_endpoints(self):
annotation = examplegen.Annotation.parse_csv_row({
'label': 'Mn',
'begin': '1.2',
'end': '1.8',
'begin_utc': '2008-05-06 11:24:41.268000',
'end_utc': '2008-05-06 11:24:42.472000',
})
expected = examplegen.FileAnnotation(
label='Mn',
begin=datetime.timedelta(seconds=1.2),
end=datetime.timedelta(seconds=1.8),
)
self.assertEqual(expected, annotation)
def test_parse_annotation_missing_fields(self):
with self.assertRaises(ValueError):
examplegen.Annotation.parse_csv_row({'label': 'Mn'})
def test_round_trip_file_annotation(self):
annotation = examplegen.FileAnnotation(
label='Mn',
begin=datetime.timedelta(seconds=2.1),
end=datetime.timedelta(seconds=3.1),
)
coder = beam.coders.registry.get_coder(examplegen.Annotation)
encoded = coder.encode(annotation)
self.assertEqual(annotation, coder.decode(encoded))
def test_file_annotation_relative_endpoints_within_clip(self):
clip_annotation = relative_endpoints_to_file_annotation_fixture(22, 23.2)
self.assertEqual(datetime.timedelta(seconds=2), clip_annotation.begin)
self.assertEqual(datetime.timedelta(seconds=3.2), clip_annotation.end)
def test_file_annotation_relative_endpoints_before_clip(self):
self.assertIsNone(relative_endpoints_to_file_annotation_fixture(1.5, 2.5))
def test_file_annotation_relative_endpoints_after_clip(self):
self.assertIsNone(relative_endpoints_to_file_annotation_fixture(42, 45))
def test_file_annotation_relative_endpoints_overlap_begin(self):
clip_annotation = relative_endpoints_to_file_annotation_fixture(19.5, 22.1)
self.assertEqual(datetime.timedelta(seconds=0), clip_annotation.begin)
self.assertEqual(datetime.timedelta(seconds=2.1), clip_annotation.end)
def test_round_trip_utc_annotation(self):
begin = datetime.datetime(2012, 2, 3, 11, 45, 15, tzinfo=tz.UTC)
end = begin + datetime.timedelta(seconds=1.7)
annotation = examplegen.UTCAnnotation(label='Mn', begin=begin, end=end)
coder = beam.coders.registry.get_coder(examplegen.Annotation)
encoded = coder.encode(annotation)
self.assertEqual(annotation, coder.decode(encoded))
def test_utc_annotation_relative_endpoints_within_clip(self):
clip_annotation = relative_endpoints_to_utc_annotation_fixture(22, 23.2)
self.assertEqual(datetime.timedelta(seconds=2), clip_annotation.begin)
self.assertEqual(datetime.timedelta(seconds=3.2), clip_annotation.end)
def test_utc_annotation_relative_endpoints_before_clip(self):
self.assertIsNone(relative_endpoints_to_utc_annotation_fixture(1.5, 2.5))
def test_utc_annotation_relative_endpoints_after_clip(self):
self.assertIsNone(relative_endpoints_to_utc_annotation_fixture(42, 45))
def test_utc_annotation_relative_endpoints_overlap_begin(self):
clip_annotation = relative_endpoints_to_utc_annotation_fixture(19.5, 22.1)
self.assertEqual(datetime.timedelta(seconds=0), clip_annotation.begin)
self.assertEqual(datetime.timedelta(seconds=2.1), clip_annotation.end)
if __name__ == '__main__':
unittest.main()
| 1.984375 | 2 |
evogtk/widgets.py | R3v1L/evogtk | 0 | 12797436 | # -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2008 EVO Sistemas Libres <<EMAIL>>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
# widgets
# EVOGTK Custom Widgets
###############################################################################
# EVOGTK base widgets
from gui.widgetlib.cairocanvas import CairoCanvas
from gui.widgetlib.cairoscroller import CairoScroller
from gui.widgetlib.floatingwindow import FloatingWindow
from gui.widgetlib.srceditor import SourceEditor
from gui.widgetlib.trayicon import TrayIcon
from gui.widgetlib.trayicon import AppIndicator
from gui.widgetlib.webbrowser import WebBrowser
from gui.widgetlib.datepicker import DatePicker
from gui.widgetlib.regexpentry import RegExpEntry
from gui.widgetlib.colorpicker import ColorPicker
from gui.widgetlib.fontcombo import FontCombo
# EVOGTK database widgets
from gui.widgetlib.dbentry import DBEntry
from gui.widgetlib.dbspinbutton import DBSpinButton
from gui.widgetlib.dbcombobox import DBComboBox
from gui.widgetlib.dbcheckbutton import DBCheckButton
from gui.widgetlib.dbdatepicker import DBDatePicker
from gui.widgetlib.dbcalendar import DBCalendar
from gui.widgetlib.dbregexpentry import DBRegExpEntry
| 1.203125 | 1 |
Codes/Abaqus_Indentation/2D/scripts_2D/geom_specimen.py | materialsguy/Predict_Nanoindentation_Tip_Wear | 0 | 12797437 | s = mdb.models['Model-1'].ConstrainedSketch(name='__profile__', sheetSize=200.0)
g, v, d, c = s.geometry, s.vertices, s.dimensions, s.constraints
s.sketchOptions.setValues(viewStyle=AXISYM)
s.setPrimaryObject(option=STANDALONE)
s.ConstructionLine(point1=(0.0, -100.0), point2=(0.0, 100.0))
s.FixedConstraint(entity=g[2])
s.rectangle(point1=(0.0, 0.0), point2=(35*indent_width, -t))
p = mdb.models['Model-1'].Part(name='Part-1', dimensionality=AXISYMMETRIC, type=DEFORMABLE_BODY)
p = mdb.models['Model-1'].parts['Part-1']
p.BaseShell(sketch=s)
s.unsetPrimaryObject()
p = mdb.models['Model-1'].parts['Part-1']
session.viewports['Viewport: 1'].setValues(displayedObject=p)
del mdb.models['Model-1'].sketches['__profile__']
p = mdb.models['Model-1'].parts['Part-1']
f, e, d = p.faces, p.edges, p.datums
t = p.MakeSketchTransform(sketchPlane=f[0], sketchPlaneSide=SIDE1, origin=(0.0, 0.0, 0.0))
s = mdb.models['Model-1'].ConstrainedSketch(name='__profile__', sheetSize=181.1, gridSpacing=4.52, transform=t)
g, v, d1, c = s.geometry, s.vertices, s.dimensions, s.constraints
s.setPrimaryObject(option=SUPERIMPOSE)
p = mdb.models['Model-1'].parts['Part-1']
p.projectReferencesOntoSketch(sketch=s, filter=COPLANAR_EDGES)
s.rectangle(point1=(0.0, 0.0), point2=(3*indent_width, -4*indent_depth))# size of the fine area
p = mdb.models['Model-1'].parts['Part-1']
f = p.faces
pickedFaces = f.getSequenceFromMask(mask=('[#1 ]', ), )
e1, d2 = p.edges, p.datums
p.PartitionFaceBySketch(faces=pickedFaces, sketch=s)
s.unsetPrimaryObject()
del mdb.models['Model-1'].sketches['__profile__']
p = mdb.models['Model-1'].parts['Part-1']
s = p.edges
side1Edges = s.getSequenceFromMask(mask=('[#60 ]', ), )
p.Surface(side1Edges=side1Edges, name='Surf-spec')
#p = mdb.models['Model-1'].parts['Part-1']
#s = p.edges
#side1Edges = s.getSequenceFromMask(mask=('[#1 ]', ), )
#p.Surface(side1Edges=side1Edges, name='Surf-spec')
#p = mdb.models['Model-1'].parts['Part-1']
#e = p.edges
#edges = s.getSequenceFromMask(mask=('[#4 ]', ), )
#p.Set(edges=edges, name='Set-spec')
| 1.742188 | 2 |
automounter.py | bpayne-novanta/automounter | 1 | 12797438 | from contextlib import contextmanager
import requests
def _validate_response(response):
if response.status_code == 200:
return response
json = response.json()
raise Exception(json["message"])
def _release_lease(lease_id):
_validate_response(requests.post("http://localhost:3000/leases/release", json={"leaseId": lease_id})).json()
def _create_lease(media_id):
lease = _validate_response(requests.post("http://localhost:3000/leases/create", json={"mediaId": media_id})).json()
if lease["success"] is False:
raise Exception(lease["message"])
return lease
def _get_usb_drives():
drives = []
result = _validate_response(requests.get("http://localhost:3000/media")).json()
for media in result:
if media["provider"] == "udisks":
drives.append(media)
return drives
@contextmanager
def lease_first_drive_path():
drives = _get_usb_drives()
if len(drives) == 0:
yield
return
lease = _create_lease(drives[0]["id"])
mount_path = lease["mountPath"]
try:
yield mount_path
finally:
_release_lease(lease["leaseId"]) | 2.609375 | 3 |
src/python/CreateTraversalGraphs.py | furodet/xpegraph | 0 | 12797439 | <gh_stars>0
"""
Reads a collection of partitions, created by BuildFragments, to produce their traversal graphs.
Input to this program are:
* The base name of fragment files
* The number of fragments to process
Output is a collection of mtx files, with the same base name, suffixed by the fragment number
and a capital T.
"""
from sys import argv
from typing import List, TextIO, Set
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.utils.graph import single_source_shortest_path_length
class ProgramConfiguration:
"""User arguments
Verifies that the program is supplied enough arguments and provides one function for
each argument type.
"""
def __init__(self, args: List[str]):
if len(args) != 3:
print("Usage: %s <fragments base name> <number of fragments>" % args[0])
exit(1)
self.args = args
def input_basename(self) -> str:
return self.args[1]
def nr_fragments(self) -> int:
return int(self.args[2])
class Node:
def __init__(self, partition: int, index: int):
self.partition = partition
self.index = index
def __str__(self) -> str:
return "%d.%d" % (self.partition, self.index)
class Edge:
def __init__(self, node1: Node, node2: Node):
self.x = node1
self.y = node2
def __str__(self) -> str:
return "(%s):(%s)" % (self.x, self.y)
class CooMatrixBuilder:
def __init__(self, nodes: Set[int]):
self.nodes = nodes
def execute(self) -> coo_matrix:
i = []
j = []
v = []
nodes = sorted(self.nodes)
for each_node in nodes:
for each_other_node in nodes:
if each_other_node > each_node:
i.append(each_node - 1)
j.append(each_other_node - 1)
v.append(1)
npi = np.array(i)
npj = np.array(j)
npv = np.array(v)
nr_nodes = nodes[-1]
return coo_matrix((npv, (npi, npj)), shape=(nr_nodes, nr_nodes))
class PartitionDescriptor:
def __init__(self, partition_id: int):
self.pid = partition_id
self.borders: Set[int] = set()
self.all_nodes: Set[int] = set()
self.inner_nodes: Set[int] = set()
self.edges = []
def add_edge(self, edge: Edge):
self.edges.append(edge)
self.__add_node_from_edge_if_in(edge.x, edge.y)
self.__add_node_from_edge_if_in(edge.y, edge.x)
def __add_node_from_edge_if_in(self, x: Node, edge_peer: Node):
if x.partition == self.pid:
self.all_nodes.add(x.index)
if edge_peer.partition != self.pid:
self.borders.add(x.index)
def get_inner_nodes(self) -> Set[int]:
if len(self.inner_nodes) == 0:
result = set()
for each_node in self.all_nodes:
if each_node not in self.borders:
result.add(each_node)
return result
else:
return self.inner_nodes
def q(self) -> float:
return float(len(self.borders)) / float(len(self.all_nodes))
def summarize(self):
inners = self.get_inner_nodes()
print("Partition %d" % self.pid)
print("\tInner nodes = %d = %s" % (len(inners), inners))
print("\tBorders = %d = %s" % (len(self.borders), self.borders))
print("\tQ%% = %d" % (100.0 * self.q()))
def get_initial_graph(self) -> coo_matrix:
return CooMatrixBuilder(self.all_nodes).execute()
class FragmentProcessor:
"""
Assuming that a fragment name is <basename>_<n>.txt, creates the associated fragment information.
"""
def __init__(self, fragment_file: TextIO, partition_id: int):
self.file = fragment_file
self.pid = partition_id
def get_descriptor(self) -> PartitionDescriptor:
result = PartitionDescriptor(self.pid)
each_line = self.file.readline()
while each_line:
items = each_line.replace("(", "").replace(")", "").split(":")
if len(items) == 2:
(node1, node2) = map(lambda s: s.split("."), items)
self.__add_edge_to(result, node1, node2)
each_line = self.file.readline()
return result
@classmethod
def __add_edge_to(cls, d: PartitionDescriptor, x: (str, str), y: (str, str)):
nx = Node(int(x[0]), int(x[1]))
ny = Node(int(y[0]), int(y[1]))
d.add_edge(Edge(nx, ny))
class WeightedEdge:
def __init__(self, x: int, y: int, weight: int):
self.x = x
self.y = y
self.weight = weight
class TraversalGraphBuilder:
def __init__(self, partition: PartitionDescriptor):
self.partition = partition
self.max_node = 0
self.edges: List[WeightedEdge] = []
def create_graph(self):
initial_graph = self.partition.get_initial_graph()
for each_node in self.partition.borders:
shortest_paths = single_source_shortest_path_length(initial_graph, each_node - 1)
for each_other_node in self.partition.borders:
if each_other_node > each_node:
shortest_distance = shortest_paths[each_other_node - 1]
edge = WeightedEdge(each_node, each_other_node, shortest_distance)
self.edges.append(edge)
if each_node > self.max_node:
self.max_node = each_node
return self
class Main:
def __init__(self, configuration: ProgramConfiguration):
self.configuration = configuration
def execute(self):
for each_fragment_id in range(0, self.configuration.nr_fragments()):
with open("%s_%d.txt" % (self.configuration.input_basename(), each_fragment_id), "rt") as f:
descriptor = FragmentProcessor(f, each_fragment_id).get_descriptor()
descriptor.summarize()
tg = TraversalGraphBuilder(descriptor).create_graph()
self.__write_graph_into("%s_%dT.mtx" % (self.configuration.input_basename(), each_fragment_id),
tg.max_node, tg.edges)
@classmethod
def __write_graph_into(self, file_name: str, nr_nodes: int, weighted_edges: List[WeightedEdge]):
with open(file_name, "wt") as f:
f.write("%%MatrixMarket matrix coordinate pattern symmetric\n")
f.write("%d %d %d\n" % (nr_nodes, nr_nodes, len(weighted_edges)))
for each_edge in weighted_edges:
f.write("%d %d %d\n" % (each_edge.x, each_edge.y, each_edge.weight))
# ================================================================================
Main(ProgramConfiguration(argv)).execute()
| 2.9375 | 3 |
backend/handlers/graphql/utils/subscription.py | al-indigo/vmemperor | 0 | 12797440 | <filename>backend/handlers/graphql/utils/subscription.py
import asyncio
from dataclasses import dataclass
from typing import Dict, Type
import graphene
from graphene import ObjectType
from graphene.types.resolver import dict_resolver
from graphql import ResolveInfo
from rethinkdb import RethinkDB
from rethinkdb.errors import ReqlOpFailedError
from rx import Observable
from enum import Enum
import constants.re as re
from authentication import BasicAuthenticator
from connman import ReDBConnection
from handlers.graphql.types.deleted import Deleted
from handlers.graphql.utils.querybuilder.changefeedbuilder import ChangefeedBuilder
from handlers.graphql.utils.querybuilder.get_fields import get_fields
from utils.user import user_entities
from xenadapter.xenobject import XenObject
from xenadapter.aclxenobject import ACLXenObject
class Change(graphene.Enum):
Initial = 'initial'
Add = 'add'
Remove = 'remove'
Change = 'change'
def str_to_changetype(s: str) -> Change:
if s == 'initial':
return Change.Initial
elif s == 'add':
return Change.Add
elif s == 'remove':
return Change.Remove
elif s == 'change':
return Change.Change
else:
raise ValueError(f"No such ChangeType: {s}")
@dataclass
class TaskCounter:
task : asyncio.Task
count = 1
async def create_single_changefeeds(queue: asyncio.Queue, info: ResolveInfo, user_authenticator : BasicAuthenticator, xenobject_type: Type[XenObject], with_initials : bool, filter_function=None):
async with ReDBConnection().get_async_connection() as conn:
tasks: Dict[str, TaskCounter] = {}
try:
if not user_authenticator or user_authenticator.is_admin() or not issubclass(xenobject_type, ACLXenObject):
table = re.db.table(xenobject_type.db_table_name)
else:
table = re.db.table(f'{xenobject_type.db_table_name}_user').get_all(*[entity for entity in user_entities(user_authenticator)], index='userid')
changes = await table.pluck('ref').changes(include_types=True, include_initial=True).run(conn)
while True:
try:
change = await changes.next()
except ReqlOpFailedError:
return
if not change:
break
if change['type'] == 'remove':
value = change['old_val']
task_counter = tasks[value['ref']]
task_counter.count -= 1
if task_counter.count == 0:
if not task_counter.task.done():
task_counter.task.cancel()
await queue.put({
'type': 'remove',
'old_val':
{
'ref' : value['ref']
}
})
del tasks[value['ref']]
elif change['type'] == 'change':
print(f"Ref change?: {change}")
continue
else:
value = change['new_val']
if filter_function and not (await filter_function(value['ref'], conn)):
continue
builder = ChangefeedBuilder(id=value['ref'],
info=info,
queue=queue,
additional_string=None,
select_subfield=['value'], # { value : {...} <-- this is what we need in info
status=change['type'],
ignore_initials=not with_initials)
if not value['ref'] in tasks:
tasks[value['ref']] = TaskCounter(task=asyncio.create_task(builder.put_values_in_queue()))
else:
tasks[value['ref']].count += 1
except asyncio.CancelledError:
for task_counter in tasks.values():
task_counter.task.cancel()
return
except Exception as e:
import sentry_sdk
sentry_sdk.capture_exception(e)
return
def MakeSubscriptionWithChangeType(_class : type) -> type:
"""
Create a subscription type with change tracking. If an object is deleted and it's a XenObject, only its ref is returned
:param _class: GraphQL type to track changes on
:return: GraphQL Union type: _class OR Deleted
"""
class Meta:
types = (_class, Deleted, )
change_type = type(f'{_class.__name__}OrDeleted', (graphene.Union, ), {
"Meta": Meta,
})
class Meta:
default_resolver = dict_resolver
return type(f'{_class.__name__}sSubscription',
(ObjectType, ),
{
'change_type': graphene.Field(Change, required=True, description="Change type"),
'value': graphene.Field(change_type, required=True),
'Meta': Meta
})
def MakeSubscription(_class : type) -> type:
'''
Creates a subscription type for resolve_item_by_pkey
This is suitable when one wants to subscribe to changes for one particular item
:param _class:
:return:
'''
#return type(f'{_class.__name__}Subscription',
# (ObjectType, ),
# {
# _class.__name__: graphene.Field(_class)
# })
return _class
def resolve_xen_item_by_key(key_name:str = 'ref'):
"""
Returns an asynchronous function that resolves every change in RethinkDB table with item with said primary key
If item is deleted or does not exist, returns null in place of an item
:param item_class: A GraphQL object type that has the same shape as a table
:param table: a RethinkDB table to retrieve updates from
:return: function that returns Observable. Works with asyncio
"""
def resolve_item(root, info, **args) -> Observable:
'''
Create a field with MakeSubscription(type)
:param root:
:param info:
:param args:
:return:
'''
async def iterable_to_item():
key = args.get(key_name, None)
if not key:
yield None
return
builder = ChangefeedBuilder(key, info)
async for change in builder.yield_values():
if not change:
break
if change['type'] == 'remove' or change['new_val'] is None:
yield None
continue
else:
value = change['new_val']
yield value
return Observable.from_future(iterable_to_item())
return resolve_item
def resolve_all_xen_items_changes(item_class: type, filter_function=None):
"""
Returns an asynchronous function that resolves every change in RethinkDB table
:param item_class: GraphQL object type that has same shape as a table
:param filter_function: this function is given a ref of potential subscription candidate (0th arg) and an asyncio connection to work with DB (1st arg).
This function should return true or false answering whether we should include this item in our subscripion
resolve_vdis is usage example.
Bear in mind that this function is called only once when a new item is added, and with all initial items
:return:
"""
def resolve_items(root, info : ResolveInfo, with_initials : bool, **kwargs) -> Observable:
'''
Returns subscription updates with the following shape:
{
changeType: one of Initial, Add, Mod, Remove
value: of type item_class
}
Create a field with MakeSubscriptionWithChangeType(type)
:param info:
:param with_initials: Supply subscription with initial values (default: False). Use True, when a Subscription is not used as a backer for Query
'''
async def iterable_to_items():
fields_for_return_type = get_fields(info, ['value'])
xenobject_type = fields_for_return_type['_xenobject_type_']
queue = asyncio.Queue()
authenticator = info.context.user_authenticator
creator_task = asyncio.create_task(create_single_changefeeds(queue, info, authenticator, xenobject_type, with_initials, filter_function))
try:
while True:
change = await queue.get()
if change['type'] == 'remove':
value = change['old_val']
value['__typename'] = 'Deleted'
else:
value = change['new_val']
value['__typename'] = item_class.__name__
yield dict(change_type=str_to_changetype(change['type']),
value=value)
except asyncio.CancelledError:
creator_task.cancel()
return
return Observable.from_future(iterable_to_items())
return resolve_items
def resolve_item_by_key(item_class: type, table_name : str, key_name:str = 'ref'):
"""
Returns an asynchronous function that resolves every change in RethinkDB table with item with said primary key
If item is deleted or does not exist, returns null in place of an item
:param item_class: A GraphQL object type that has the same shape as a table
:param table: a RethinkDB table to retrieve updates from
:return: function that returns Observable. Works with asyncio
"""
def resolve_item(root, info, **args) -> Observable:
'''
Create a field with MakeSubscription(type)
:param root:
:param info:
:param args:
:return:
'''
async def iterable_to_item():
async with ReDBConnection().get_async_connection() as conn:
key = args.get(key_name, None)
if not key:
yield None
return
table = re.db.table(table_name)
changes = await table.get_all(key) \
.pluck(*item_class._meta.fields)\
.changes(include_types=True, include_initial=True).run(conn)
while True:
change = await changes.next()
if not change:
break
if change['type'] == 'remove' or change['new_val'] is None:
yield None
continue
else:
value = change['new_val']
yield item_class(**value)
return Observable.from_future(iterable_to_item())
return resolve_item
def resolve_all_items_changes(item_class: type, table_name : str):
"""
Returns an asynchronous function that resolves every change in RethinkDB table
:param item_class: GraphQL object type that has same shape as a table
:param table: RethinkDB table
:return:
"""
def resolve_items(root, info, with_initials: bool) -> Observable:
'''
Returns subscription updates with the following shape:
{
changeType: one of Initial, Add, Mod, Remove
value: of type item_class
}
Create a field with MakeSubscriptionWithChangeType(type)
:param info:
:return:
'''
async def iterable_to_items():
async with ReDBConnection().get_async_connection() as conn:
table = re.db.table(table_name)
changes = await table.pluck(*item_class._meta.fields.keys()).changes(include_types=True, include_initial=with_initials).run(conn)
while True:
change = await changes.next()
if not change:
break
if change['type'] == 'remove':
value = change['old_val']
else:
value = change['new_val']
value = item_class(**value)
yield MakeSubscriptionWithChangeType(item_class)(change_type=str_to_changetype(change['type']),
value=value)
return Observable.from_future(iterable_to_items())
return resolve_items
| 1.984375 | 2 |
basic_qlestimator.py | marwenbelkaid/basic-ql-model-fitting | 0 | 12797441 | <filename>basic_qlestimator.py
from sklearn.base import BaseEstimator
import inspect
import numpy as np
import random
import math
''' Define softmax function '''
def softmax(opts, tau):
norm = 0
ret = []
for i in range(0,len(opts)):
ret.append(math.exp(opts[i]/tau))
norm += ret[i]
for i in range(0,len(opts)):
ret[i] /= norm
return ret
''' Define function for random draw form discrete proba dist '''
def draw_from_discrete_proba(dist):
d = random.uniform(0, 1)
cumul = 0
for i in range(0, len(dist)):
if d < dist[i] + cumul:
return i
cumul += dist[i]
''' Define QL algorithm for this problem '''
def rl_3target_vc(alpha, tau, max_steps):
''' Define states, actions and reward probabilities '''
IN_A = GOTO_A = 0
IN_B = GOTO_B = 1
IN_C = GOTO_C = 2
states = [IN_A, IN_B, IN_C]
actions = [GOTO_A, GOTO_B, GOTO_C]
labels = ["A", 'B', "C"]
RW_A = 1.
RW_B = 0.
RW_C = 1.
rewards = [RW_A, RW_B, RW_C]
''' Define output data variables '''
out_choices = [] # append choices
str_choices = "" # useful for vc
out_vcrw = [] # append rewards given by vc
''' Intialize Q table '''
init_Qval = 1.1
Q = np.full( (len(states), len(actions)), init_Qval ) # optimistic initialization
''' Start simulation '''
s = IN_A
step = 0
while step < max_steps:
''' Act using softmax policy '''
a = s
opts = np.copy(Q[s][:])
opts[s] = -99
dist = softmax(opts[:], tau)
while a==s: # try until GOTO != s, because agent cannot choose the same target in two consecutive trials
a = draw_from_discrete_proba(dist)
''' Get reward: GOTO_X -> RW_X '''
draw = random.uniform(0, 1)
if draw < rewards[a]:
r = 1
else:
r = 0
''' Update Q table '''
delta = alpha*( r - Q[s][a] ) # gamma = 0
Q[s][a] += delta
''' Update state '''
s = a
''' Update loop variable '''
step += 1
''' Save output data '''
out_choices.append(a)
str_choices = str_choices + str(a)
if r <= 0:
out_vcrw.append(0)
else:
out_vcrw.append(1)
return [out_choices, out_vcrw]
class QLEstimator(BaseEstimator):
"""
This is my toy example of a Q-Learning estimator using softmax policy
It has a constructor and it inherits the get_params and set_params methods from BaseEstimator
It overrides the methods fit(), predict() and score()
"""
def __init__(self, alpha=0.1, tau=5.0, average_line=-1, sample_index=-1, run=1, log_results_internally=1, log_sequences_internally=1):
"""
This is the constructor
Parameters
----------
alpha, tau : float
QL parameters, resp. learning rate and softmax temperature
average_line : int
if one of the line in the dataset corresponds to the average or has to be fitted,
otherwise -1 means the average will be computed online
sample_index : int
index of current estimator, the default value -1 should be overriden
run : int
index of run (if many executions per paramset)
log_results_internally, log_sequences_internally : int
flags indicating whether to save log and result files within estimator
this is useful when using Grid or Random Search Optimization methods
however Bayesian Optimization has built-in log functions
"""
''' Initialize the estimator with contructor arguments '''
args, _, _, values = inspect.getargvalues(inspect.currentframe())
values.pop("self")
for arg, val in values.items():
setattr(self, arg, val)
''' Add hard-coded parameters '''
self.max_steps = 10000 # number of trials per run
self.lensession = 200 # number of trials in one session
self.nb_test_sessions = 10 # number of sessions at the end of run of which the average is considered as the final results of the run
''' Print attributes '''
#for arg, val in values.items():
#print("{} = {}".format(arg,val))
def fit(self, X, y=None):
"""
This is where the QL is run and the results are saved
"""
''' Check parameters '''
self.__check_params(X)
''' Run QL '''
out_choices, out_vcrw = rl_3target_vc(self.alpha, self.tau, self.max_steps)
''' Set attributes '''
self.rw_ = out_vcrw
self.s_ = out_choices
''' Save data '''
if self.log_sequences_internally == 1:
datafile = "log/fit_" + str(self.sample_index) + "_run_" + str(self.run)
data_to_save = np.transpose((out_choices, out_vcrw))
np.savetxt(datafile, data_to_save, fmt='%d')
def predict(self, X, y=None):
"""
This is where the output data is predicted.
Here, it amounts to evaluate the success rate, u-turn rate, and complexity of the QL agent.
"""
try:
getattr(self, "rw_")
getattr(self, "s_")
except AttributeError:
raise RuntimeError("You must train estimator before predicting data!")
# use check_is_fitted(self, ['X_', 'y_']) instead ?
''' Get average result over nb_test_sessions last sessions '''
max_steps = self.max_steps
lensession = self.lensession
nb_test_sessions = self.nb_test_sessions
mean_rw_rate = 0
mean_ut_rate = 0
for i in range(0, nb_test_sessions):
last_step = max_steps - i*lensession
first_step = last_step - lensession
rw_in_session = self.rw_[first_step:last_step]
s_in_session = self.s_[first_step:last_step]
# rw
nb_rw = np.bincount(rw_in_session)[1]
rw_rate = nb_rw * 1. / lensession
# ut
nb_ut = 0
for k in range(2, lensession):
if s_in_session[k] == s_in_session[k-2]:
nb_ut += 1
ut_rate = nb_ut * 1. / (lensession-2)
mean_rw_rate += rw_rate
mean_ut_rate += ut_rate
mean_rw_rate /= nb_test_sessions
mean_ut_rate /= nb_test_sessions
return [mean_rw_rate, mean_ut_rate]
def score_wrt_average(self, X, y=None):
"""
This is where the scor eof the estimator wrt the average is computed.
Here, the score is the distance between prediction and data
"""
score = 0
pred = self.predict(X)
if self.average_line == -1:
nb_datapoints = len(X)
data0 = 0.
data1 = 0.
for i in range(0, nb_datapoints):
data0 += X[i][0]
data1 += X[i][1]
data0 /= (nb_datapoints * 100.) # data given as percentage but score measured based on rates
data1 /= (nb_datapoints * 100.) # data given as percentage but score measured based on rates
else:
data0 = X[self.average_line][0] / 100. # data given as percentage but score measured based on rates
data1 = X[self.average_line][1] / 100. # data given as percentage but score measured based on rates
pred0 = pred[0]
pred1 = pred[1]
score = 1 - ( (abs(pred0 - data0) + abs(pred1 - data1)) / 2. )
if self.log_results_internally == 1:
logfile = "results/score_a_fit_" + str(self.sample_index) + "_run_" + str(self.run)
with open(logfile,'a') as outfp:
data_to_save = '{:4.3f} -1 {:4.3f} {:06.3f} -1 {:4.3f} {:4.3f} {:4.3f} {:4.3f}\n'.format(score, self.alpha, self.tau, data0, data1, pred0, pred1)
outfp.write(data_to_save)
return score
def score_wrt_closest(self, X, y=None):
closest_score = 0
closest_index = -1
pred = self.predict(X)
pred0 = pred[0]
pred1 = pred[1]
nb_datapoints = len(X)
for i in range(0, nb_datapoints):
if i != self.average_line :
data0 = X[i][0] / 100.
data1 = X[i][1] / 100.
score = 1 - ( (abs(pred0 - data0) + abs(pred1 - data1)) / 2. )
if score > closest_score:
closest_score = score
closest_index = i
if self.log_results_internally == 1:
logfile = "results/score_c_fit_" + str(self.sample_index) + "_run_" + str(self.run)
with open(logfile,'a') as outfp:
data_to_save = '{:4.3f} {:d} {:4.3f} {:06.3f} {:d} {:4.3f} {:4.3f}\n'.format(closest_score, closest_index, self.alpha, self.tau, closest_index, pred0, pred1)
outfp.write(data_to_save)
"""
These are private methods
"""
def __check_params(self, X):
""" This is called by fit() to check parameters """
if (self.alpha - 1 > 0.0000001) or (self.alpha - 0 < 0.0000001):
print "Error: Invalid value for parameter alpha given to {0}. Value must be in [0,1].".format(self)
exit()
if (self.tau - 0 < 0.0000001):
print "Error: Invalid value for parameter tau given to {0}. Value must be > 0".format(self)
exit()
if (self.sample_index == -1) :
print "Error: Invalid value for parameter sample_index given to {0}. Default value (-1) should be overriden with a positive value.".format(self)
exit()
| 2.859375 | 3 |
PYTHON/pythonDesafios/desafio048.py | Santos1000/Curso-Python | 0 | 12797442 | <reponame>Santos1000/Curso-Python
cont = 0
soma = 0
for c in range(1, 501, 2):
if c % 3 ==0:
soma = soma + c
cont = cont + 1
print(f'A soma de todos os dos {cont} numeros impares sera:{soma} ')
| 3.625 | 4 |
TestMovimientos/unitary_derivative.py | JDanielGar/ConvolutionMovements | 0 | 12797443 | # 05 de Junio del 2018
# 31 Mayo 2018
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
import cv2
class Recurrent_Photo:
'''
Recurrent Photo only for testing
'''
def __init__(self, iterations=100, resize=(1280, 720)):
self.camera = cv2.VideoCapture(0)
self.video = np.zeros([iterations, resize[1], resize[0], 3])
for iteration in range(iterations):
self.video[iteration, :, :] = cv2.resize(
(self.camera.read()[1]/255),
# FIXME: AHORA TRABAJAMOS CON LOS TRES CANALES
resize
)
cv2.imshow('Prueba', self.video[iteration, :, :])
cv2.waitKey(1)
self.camera.release()
self.resize = resize
def get_recurrence(self, alpha=(0.75555, 0.25555)):
'''
Alpha are 2 float numbers represented by the amount of
superposition that you want to have in te current image.
Example:
alpha = (0.5, 0.5) is neutral change, were the last
image will have the same intensity of first image.
'''
first = np.array(self.video[0:self.video.shape[0]-1, :, :,])
second = np.array(self.video[1:self.video.shape[0], :, :])
diferences = self.get_diference(
second,
first
)
for image in range(len(diferences)):
diferences[image] = diferences[image-1]* alpha[0] + diferences[image]* alpha[1]
# Mirar ecuacion del cuaderno.
return diferences
def get_diference(self, A, B):
'''
Get diference from two items
'''
return np.abs(A - B)
def resize_images(X, dimensions=(100, 75)):
if len(X.shape) == 3:
X = cv2.resize(X, dimensions)
else:
for image in X:
image = cv2.resize(image, dimensions)
return X
def show_image(X):
if len(X.shape) == 3:
cv2.imshow('image', X)
else:
for image in X:
cv2.imshow('X', image)
cv2.waitKey(1)
sleep(0.05)
non_movement = Recurrent_Photo(50)
print('Prepare next movement...')
sleep(2)
movement = Recurrent_Photo(50)
non_movement_recurrence = non_movement.get_recurrence()
movement_recurrence = movement.get_recurrence()
X = resize_images(non_movement_recurrence)
Y = resize_images(movement_recurrence)
| 3.15625 | 3 |
msldap/authentication/spnego/asn1_structs.py | blshkv/msldap | 1 | 12797444 | #!/usr/bin/env python3
#
# Author:
# <NAME> (@skelsec)
#
# https://www.rfc-editor.org/rfc/rfc4178.txt
from asn1crypto.core import ObjectIdentifier, Sequence, SequenceOf, Enumerated, GeneralString, OctetString, BitString, Choice, Any, Boolean
import enum
import os
import io
TAG = 'explicit'
# class
UNIVERSAL = 0
APPLICATION = 1
CONTEXT = 2
class MechType(ObjectIdentifier):
_map = {
'1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider',
'1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos 5',
'1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5',
'1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User to User',
'1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism',
}
class MechTypes(SequenceOf):
_child_spec = MechType
class ContextFlags(BitString):
_map = {
0: 'delegFlag',
1: 'mutualFlag',
2: 'replayFlag',
3: 'sequenceFlag',
4: 'anonFlag',
5: 'confFlag',
6: 'integFlag',
}
class NegState(Enumerated):
_map = {
0: 'accept-completed',
1: 'accept-incomplete',
2: 'reject',
3: 'request-mic',
}
class NegHints(Sequence):
_fields = [
('hintName', GeneralString, {'explicit': 0, 'optional': True}),
('hintAddress', OctetString, {'explicit': 1, 'optional': True}),
]
# https://www.rfc-editor.org/rfc/rfc4178.txt 4.2.1
# EXTENDED IN: https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-spng/8e71cf53-e867-4b79-b5b5-38c92be3d472
class NegTokenInit2(Sequence):
#explicit = (APPLICATION, 0)
_fields = [
('mechTypes', MechTypes, {'tag_type': TAG, 'tag': 0}),
('reqFlags', ContextFlags, {'tag_type': TAG, 'tag': 1, 'optional': True}),
('mechToken', OctetString, {'tag_type': TAG, 'tag': 2, 'optional': True}),
('negHints', NegHints, {'tag_type': TAG, 'tag': 3, 'optional': True}),
('mechListMIC', OctetString, {'tag_type': TAG, 'tag': 4, 'optional': True}),
]
# https://www.rfc-editor.org/rfc/rfc4178.txt 4.2.2
class NegTokenResp(Sequence):
#explicit = (APPLICATION, 1)
_fields = [
('negState', NegState, {'tag_type': TAG, 'tag': 0, 'optional': True}),
('supportedMech', MechType, {'tag_type': TAG, 'tag': 1, 'optional': True}),
('responseToken', OctetString, {'tag_type': TAG, 'tag': 2, 'optional': True}),
('mechListMIC', OctetString, {'tag_type': TAG, 'tag': 3, 'optional': True}),
]
class NegotiationToken(Choice):
_alternatives = [
('negTokenInit', NegTokenInit2, {'explicit': (CONTEXT, 0) } ),
('negTokenResp', NegTokenResp, {'explicit': (CONTEXT, 1) } ),
]
class GSS_SPNEGO(Sequence):
class_ = 2
tag = 0
_fields = [
('NegotiationToken', NegotiationToken),
]
### I have 0 idea where this is tandardized :(
class GSSType(ObjectIdentifier):
_map = {
#'': 'SNMPv2-SMI::enterprises.311.2.2.30',
'1.3.6.1.5.5.2': 'SPNEGO',
}
class GSSAPI(Sequence):
class_ = 1
tag = 0
_fields = [
('type', GSSType, {'optional': False}),
('value', Any, {'optional': False}),
]
_oid_pair = ('type', 'value')
_oid_specs = {
'SPNEGO': NegotiationToken,
}
# https://tools.ietf.org/html/rfc2743#page-81
# You may think this is ASN1. But it truth, it's not.
# Below is a fucking disgrace of a protocol design.
class KRB5Token:
def __init__(self, data = None, tok_id = b'\x01\x00'):
self.tok_id = tok_id
self.data = data
@staticmethod
def from_bytes(data):
return KRB5Token.from_buffer(io.BytesIO(data))
@staticmethod
def from_buffer(buff):
t = KRB5Token()
buff.read(1)
length = -1
x = int.from_bytes(buff.read(1), 'big', signed = False)
input(x)
if x <= 127:
length = x
else:
x &= ~0x80
input(x)
length = int.from_bytes(buff.read(x), 'big', signed = False)
input('length: %s' % length)
oid_asn1 = buff.read(11)
t.tok_id = int.from_bytes(buff.read(2), 'big', signed = False)
t.data = buff.read(length-13)
input(t.tok_id )
return t
def length_encode(self, x):
if x <= 127:
return x.to_bytes(1, 'big', signed = False)
else:
lb = x.to_bytes((x.bit_length() + 7) // 8, 'big')
t = (0x80 | len(lb)).to_bytes(1, 'big', signed = False)
return t+lb
def to_bytes(self):
t = b'\x60' #
t += self.length_encode(11 + 2 + len(self.data))
t += bytes.fromhex('06092a864886f712010202') #OID length + OID for kerberos
t += self.tok_id
t += self.data
return t | 1.929688 | 2 |
ecr-build-push.py | diegoaltx/drone-ecr-build-push | 1 | 12797445 | import docker
import boto3
import os
import sys
import base64
from datetime import datetime, timezone
def get_docker_client():
return docker.from_env()
def get_ecr_clients(settings):
clients = []
for region in settings['regions']:
clients.append(boto3.client('ecr',
aws_access_key_id=settings['access_key_id'],
aws_secret_access_key=settings['secret_access_key'],
region_name=region
))
return clients
def get_sts_client(settings):
return boto3.client('sts',
aws_access_key_id=settings['access_key_id'],
aws_secret_access_key=settings['secret_access_key']
)
def exit_with_error(message, *args):
print('Something went wrong:', message.format(*args), file=sys.stderr, flush=True)
sys.exit(1)
def get_aws_account_id(sts_client):
return sts_client.get_caller_identity().get('Account')
def get_regions(env):
regions = env.get('PLUGIN_REGION')
if not regions:
return None
return regions.split(',')
def get_repo(env):
return env.get('PLUGIN_REPO', env.get('DRONE_REPO_NAME'))
def get_dockerfile(env):
return env.get('PLUGIN_DOCKERFILE', './Dockerfile')
def get_tags(env):
user_tags = env.get('PLUGIN_TAGS')
tags = [tag for tag in user_tags.split(',')]
return tags
def get_settings(env):
return {
'access_key_id': env.get('PLUGIN_ACCESS_KEY_ID'),
'secret_access_key': env.get('PLUGIN_SECRET_ACCESS_KEY'),
'regions': get_regions(env),
'repo': get_repo(env),
'dockerfile': get_dockerfile(env),
'commit': env.get('DRONE_COMMIT'),
'repo_link': env.get('DRONE_REPO_LINK'),
'tags': get_tags(env)
}
def get_ecr_login(ecr_client, registry_id):
response = ecr_client.get_authorization_token(registryIds=[registry_id])
registry = response['authorizationData'][0]['proxyEndpoint']
token = response['authorizationData'][0]['authorizationToken']
username, password = base64.b64decode(token).decode().split(':')
return {
'username': username,
'password': password,
'registry': registry
}
def get_repos(settings, ecr_clients, aws_account_id):
repos = []
for client in ecr_clients:
response = client.describe_repositories(
registryId=aws_account_id,
repositoryNames=[settings['repo']]
)
repo = response['repositories'][0]
repos.append({
'registry_id': repo['registryId'],
'name': repo['repositoryName'],
'uri': repo['repositoryUri'],
'login': get_ecr_login(client, repo['registryId'])
})
return repos
def login_to_registries(docker_client, repos):
for repo in repos:
login = repo['login']
docker_client.login(
login['username'],
login['password'],
registry=login['registry']
)
def build_image(docker_client, settings):
build_tag = ':'.join((settings['repo'], settings['tags'][0]))
build_date = datetime.now(timezone.utc).astimezone().isoformat()
image, *_ = docker_client.images.build(
path="./",
tag=build_tag,
dockerfile=settings['dockerfile'],
rm=True,
forcerm=True,
buildargs={
'CI_BUILD_DATE': build_date,
'CI_VCS_URL': settings['repo_link'],
'CI_VCS_REF': settings['commit']
},
labels={
'org.label-schema.schema-version': '1.0',
'org.label-schema.build-date': build_date,
'org.label-schema.vcs-url': settings['repo_link'],
'org.label-schema.vcs-ref': settings['commit']
}
)
return image
def tag_image(image, settings, repos):
for tag in settings['tags']:
for repo in repos:
image.tag(
repository=repo['uri'],
tag=tag
)
def push_image(docker_client, settings, repos):
for tag in settings['tags']:
for repo in repos:
docker_client.images.push(
repository=repo['uri'],
tag=tag
)
def build_and_push_image():
settings = get_settings(os.environ)
sts_client = get_sts_client(settings)
ecr_clients = get_ecr_clients(settings)
docker_client = get_docker_client()
print('Finding AWS account id...')
aws_account_id = get_aws_account_id(sts_client)
print('AWS account id is {0}.'.format(aws_account_id))
print('Repo name is', settings['repo'], flush=True)
print('Regions:')
for region in settings['regions']:
print('- ', region)
print('Fetching repos info from ECR across regions...', flush=True)
repos = get_repos(settings, ecr_clients, aws_account_id)
print('Fetched repos info.')
print('Repos:')
for repo in repos:
print('- ', repo['uri'])
print('Logging in to registries...', flush=True)
login_to_registries(docker_client, repos)
print('Logged in. Building image...', flush=True)
try:
image = build_image(docker_client, settings)
except docker.errors.BuildError as e:
for line in e.build_log:
if 'stream' in line:
print(line['stream'].strip())
raise
print('Build finished.')
print('Tags:')
for tag in settings['tags']:
print('- ', tag)
print('Tagging image...', flush=True)
tag_image(image, settings, repos)
print('Tagged. Pushing image tags to registries...', flush=True)
push_image(docker_client, settings, repos)
print('Pushed. All done.')
if __name__ == '__main__':
build_and_push_image()
| 2.0625 | 2 |
samcli/commands/local/lib/local_context.py | Bradleywboggs/aws-sam-cli | 0 | 12797446 | from samcli.cli.context import Context
import click
class LocalContext(Context):
def __init__(self):
super().__init__()
self._aws_account_id = None
@property
def aws_account_id(self):
return self._aws_account_id
@aws_account_id.setter
def aws_account_id(self, value):
self._aws_account_id = value
self._refresh_session()
pass_context = click.make_pass_decorator(LocalContext)
| 2.203125 | 2 |
amfe/mor/hyper_red/__init__.py | sivasanarul/amfe_topopt | 0 | 12797447 | #
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
Hyper reduction module
"""
from .ecsw import *
from .ecsw_assembly import *
from .poly3 import *
from .training_set_generation import *
| 0.996094 | 1 |
traffic_engineering/benchmarks/benchmark_helpers.py | stanford-futuredata/POP | 15 | 12797448 | <gh_stars>10-100
from collections import defaultdict
from glob import iglob
import argparse
import os
import sys
sys.path.append("..")
from lib.partitioning import FMPartitioning, SpectralClustering
PROBLEM_NAMES = [
"GtsCe.graphml",
"UsCarrier.graphml",
"Cogentco.graphml",
"Colt.graphml",
"TataNld.graphml",
"Deltacom.graphml",
"DialtelecomCz.graphml",
"Kdl.graphml",
]
TM_MODELS = [
"uniform",
"gravity",
"bimodal",
"poisson-high-intra",
"poisson-high-inter",
]
SCALE_FACTORS = [1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0]
PATH_FORM_HYPERPARAMS = (4, True, "inv-cap")
NCFLOW_HYPERPARAMS = {
"GtsCe.graphml": (4, True, "inv-cap", FMPartitioning, 3),
"UsCarrier.graphml": (4, True, "inv-cap", FMPartitioning, 3),
"Cogentco.graphml": (4, True, "inv-cap", FMPartitioning, 3),
"Colt.graphml": (4, True, "inv-cap", FMPartitioning, 3),
"TataNld.graphml": (4, True, "inv-cap", FMPartitioning, 3),
"Deltacom.graphml": (4, True, "inv-cap", FMPartitioning, 3),
"DialtelecomCz.graphml": (4, True, "inv-cap", FMPartitioning, 3),
"Uninett2010.graphml": (4, True, "inv-cap", FMPartitioning, 3),
"Interoute.graphml": (4, True, "inv-cap", SpectralClustering, 2),
"Ion.graphml": (4, True, "inv-cap", FMPartitioning, 3),
"Kdl.graphml": (4, True, "inv-cap", FMPartitioning, 3),
"erdos-renyi-1260231677.json": (4, True, "inv-cap", FMPartitioning, 3),
}
PROBLEM_NAMES_AND_TM_MODELS = [
(prob_name, tm_model) for prob_name in PROBLEM_NAMES for tm_model in TM_MODELS
]
PROBLEMS = []
GROUPED_BY_PROBLEMS = defaultdict(list)
HOLDOUT_PROBLEMS = []
GROUPED_BY_HOLDOUT_PROBLEMS = defaultdict(list)
for problem_name in PROBLEM_NAMES:
if problem_name.endswith(".graphml"):
topo_fname = os.path.join("..", "topologies", "topology-zoo", problem_name)
else:
topo_fname = os.path.join("..", "topologies", problem_name)
for model in TM_MODELS:
for tm_fname in iglob(
"../traffic-matrices/{}/{}*_traffic-matrix.pkl".format(model, problem_name)
):
vals = os.path.basename(tm_fname)[:-4].split("_")
_, traffic_seed, scale_factor = vals[1], int(vals[2]), float(vals[3])
GROUPED_BY_PROBLEMS[(problem_name, model, scale_factor)].append(
(topo_fname, tm_fname)
)
PROBLEMS.append((problem_name, topo_fname, tm_fname))
for tm_fname in iglob(
"../traffic-matrices/holdout/{}/{}*_traffic-matrix.pkl".format(
model, problem_name
)
):
vals = os.path.basename(tm_fname)[:-4].split("_")
_, traffic_seed, scale_factor = vals[1], int(vals[2]), float(vals[3])
GROUPED_BY_HOLDOUT_PROBLEMS[(problem_name, model, scale_factor)].append(
(topo_fname, tm_fname)
)
HOLDOUT_PROBLEMS.append((problem_name, topo_fname, tm_fname))
GROUPED_BY_PROBLEMS = dict(GROUPED_BY_PROBLEMS)
for key, vals in GROUPED_BY_PROBLEMS.items():
GROUPED_BY_PROBLEMS[key] = sorted(vals)
GROUPED_BY_HOLDOUT_PROBLEMS = dict(GROUPED_BY_HOLDOUT_PROBLEMS)
for key, vals in GROUPED_BY_HOLDOUT_PROBLEMS.items():
GROUPED_BY_HOLDOUT_PROBLEMS[key] = sorted(vals)
def get_problems(args):
problems = []
for (
(problem_name, tm_model, scale_factor),
topo_and_tm_fnames,
) in GROUPED_BY_PROBLEMS.items():
for slice in args.slices:
if (
("all" in args.topos or problem_name in args.topos)
and ("all" in args.tm_models or tm_model in args.tm_models)
and ("all" in args.scale_factors or scale_factor in args.scale_factors)
):
topo_fname, tm_fname = topo_and_tm_fnames[slice]
problems.append((problem_name, topo_fname, tm_fname))
return problems
def get_args_and_problems(output_csv_template, additional_args=[]):
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", dest="dry_run", action="store_true", default=False)
parser.add_argument("--obj", type=str, choices=["total_flow", "mcf"], required=True)
parser.add_argument(
"--tm-models", type=str, choices=TM_MODELS + ["all"], nargs="+", default="all",
)
parser.add_argument(
"--topos", type=str, choices=PROBLEM_NAMES + ["all"], nargs="+", default="all",
)
parser.add_argument(
"--scale-factors",
type=lambda x: x if x == "all" else float(x),
choices=SCALE_FACTORS + ["all"],
nargs="+",
default="all",
)
parser.add_argument(
"--slices", type=int, choices=range(5), nargs="+", required=True
)
for additional_arg in additional_args:
name_or_flags, kwargs = additional_arg[0], additional_arg[1]
parser.add_argument(name_or_flags, **kwargs)
args = parser.parse_args()
slice_str = "slice_" + "_".join(str(i) for i in args.slices)
output_csv = output_csv_template.format(args.obj, slice_str)
return args, output_csv, get_problems(args)
def print_(*args, file=None):
if file is None:
file = sys.stdout
print(*args, file=file)
file.flush()
| 1.773438 | 2 |
Products/GSContentManager/add_page.py | groupserver/Products.GSContentManager | 0 | 12797449 | # coding=utf-8
'''Implementation of the Add Page form.
'''
try:
from five.formlib.formbase import AddForm
except ImportError:
from Products.Five.formlib.formbase import AddForm # lint:ok
from zope.component import createObject
from zope.formlib import form
from Products.Five.browser.pagetemplatefile import ZopeTwoPageTemplateFile
from zope.app.form.browser import TextAreaWidget
from zope.app.apidoc.interface import getFieldsInOrder
from zope.schema import *
import interfaces
from page import GSContentPage
def wym_editor_widget(field, request):
retval = TextAreaWidget(field, request)
retval.cssClass = 'wymeditor'
return retval
class AddPageForm(AddForm):
label = u'Add Page'
pageTemplateFileName = 'browser/templates/edit_page.pt'
template = ZopeTwoPageTemplateFile(pageTemplateFileName)
def __init__(self, context, request):
self.context = context
self.request = request
self.interface = interface = getattr(interfaces, 'IGSContentPage')
AddForm.__init__(self, context, request)
self.siteInfo = createObject('groupserver.SiteInfo', context)
site_root = context.site_root()
assert hasattr(site_root, 'GlobalConfiguration')
self.form_fields = form.Fields(interface, render_context=True,
omit_readonly=True)
self.form_fields['content'].custom_widget = wym_editor_widget
self.form_fields['content'].field.default = u'<p>Enter content '\
u'here.</p>'
self.mode = 'add'
@property
def id(self):
return self.form_fields['id']
@property
def title(self):
return self.form_fields['title']
@property
def description(self):
return self.form_fields['description']
@property
def content(self):
return self.form_fields['content']
# --=mpj17=--
# The "form.action" decorator creates an action instance, with
# "handle_reset" set to the success handler,
# "handle_reset_action_failure" as the failure handler, and adds the
# action to the "actions" instance variable (creating it if
# necessary). I did not need to explicitly state that "Edit" is the
# label, but it helps with readability.
@form.action(label=u'Add', failure='handle_set_action_failure')
def handle_set(self, action, data):
return self.set_data(data)
def handle_set_action_failure(self, action, data, errors):
if len(errors) == 1:
self.status = u'<p>There is an error:</p>'
else:
self.status = u'<p>There are errors:</p>'
def set_data(self, data):
assert self.context
assert self.form_fields
alteredFields = []
for datum in getFieldsInOrder(self.interface):
if datum[0] in data:
if data[datum[0]] != getattr(self.context, datum[0]):
alteredFields.append(datum[0])
# Create the content folder and object and apply changes.
folder = GSContentPage(self.context, mode='add', id=data['id'])
if folder.status['error']:
retval = u'%s' % folder.status['msg']
changed = form.applyChanges(folder, self.form_fields, data)
# All good, so redirect to the edit page.
if changed:
url = '%s/edit_page.html' % folder.context.absolute_url(0)
self.request.response.redirect(url)
return
else:
retval = u'Problem creating page'
assert retval
assert type(retval) == unicode
self.status = retval
| 2.09375 | 2 |
Array/Partition_List.py | shua2018ti/Google | 87 | 12797450 | '''
Given a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.
You should preserve the original relative order of the nodes in each of the two partitions.
For example,
Given 1->4->3->2->5->2 and x = 3,
return 1->2->2->4->3->5.
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} head
# @param {integer} x
# @return {ListNode}
def partition(self, head, x):
if not head: return None
head1 = ListNode(0)
head2 = ListNode(0)
lower = head1
higher = head2
while head:
if head.val < x:
lower.next = head
head = head.next
lower = lower.next
lower.next = None
else:
higher.next = head
head = head.next
higher = higher.next
higher.next = None
lower.next = head2.next
return head1.next
# if not head: return None
# dummy = ListNode(0)
# dummy.next = head
# lower = cur = dummy
# while cur.next:
# if cur.next.val >= x:
# cur = cur.next
# else:
# tmp = cur.next
# cur.next = cur.next.next
# tmp.next = lower.next
# lower.next = tmp
# lower = lower.next
# return dummy.next
# # The above method does not work
# # Test Case: 1) [2,1], 2; [1], 2; [1,3,2], 3
| 3.96875 | 4 |
Subsets and Splits