id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1765954
|
<reponame>ErickGallani/lunchticketcontrol
""" This module is for aggregate all configs strategies """
class Config(object):
""" Default config """
# Application name
APP_NAME = 'Lunch ticket'
# Application host
HOST = '127.0.0.1'
# Application port
PORT = 5050
# Application protocol
PROTOCOL = 'https'
# Debug mode. This should be False in production mode
DEBUG = False
# Secret key to be used on authentoication using JWT
SECRET_KEY = '3beeddd1e32bdadf478cdeba2c9222c36ca0b12a4808c7a35380e5a74ca4513f'
TESTING = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///data.db'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SESSION_COOKIE_NAME = 'lunchticketapp'
PASSWORD_SCHEMES = '<PASSWORD>'
SWAGGER_DOCS_URL = '/api/docs'
API_SPEC_URL = '/api/swagger'
API_VERSION = '1.0'
@classmethod
def get_swagger_api_url(cls):
"""
Get full swagger api url.
Eg.: http://127.0.0.1:5050/api/swagger.json
"""
return "%s://%s:%s%s.json" % (cls.PROTOCOL, cls.HOST, cls.PORT, cls.API_SPEC_URL)
class ProductionConfig(Config):
""" Production configuration """
SQLALCHEMY_DATABASE_URI = 'mysql://user@localhost/foo'
class DevelopmentConfig(Config):
""" Development configuration """
DEBUG = True
class StagingConfig(Config):
""" Staging configuration """
DEBUG = True
class TestingConfig(Config):
""" Test configuration """
# in memory database
SQLALCHEMY_DATABASE_URI = 'sqlite://'
TESTING = True
APP_CONFIGS_TYPES = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'staging': StagingConfig,
'production': ProductionConfig,
}
def get_app_config(config_name):
"""
Get the application configuration by configuration name
:param config_name: Configuration name to retrieve the config class
"""
return APP_CONFIGS_TYPES[config_name]
|
StarcoderdataPython
|
3223425
|
import torch
import torch.nn.functional as F
from scipy.stats import wasserstein_distance
def MMD(samples_A, samples_B, sigma=1, biased=True):
alpha = 1 / (2 * sigma**2)
B = samples_A.size(0)
AA, BB = torch.mm(samples_A, samples_A.t()), torch.mm(samples_B, samples_B.t())
AB = torch.mm(samples_A, samples_B.t())
rA = (AA.diag().unsqueeze(0).expand_as(AA))
rB = (BB.diag().unsqueeze(0).expand_as(BB))
K = torch.exp(- alpha * (rA.t() + rA - 2*AA))
L = torch.exp(- alpha * (rB.t() + rB - 2*BB))
P = torch.exp(- alpha * (rA.t() + rB - 2*AB))
if biased:
return K.mean() + L.mean() - 2 * P.mean()
beta = (1./(B*(B-1)))
gamma = (2./(B*B))
return beta*(torch.sum(K)+torch.sum(L)) - gamma * torch.sum(P)
def KL_divergence(samples_A, sample_B):
return F.kl_div(samples_A, sample_B)
pass
def Wasserstein_distance(sample_A, sample_B):
# TODO: BUG
if isinstance(sample_A, torch.Tensor):
sample_A = sample_A.cpu().numpy()
if isinstance(sample_B, torch.Tensor):
sample_B = sample_B.cpu().numpy()
return wasserstein_distance(sample_A, sample_B)
def FID(samples_A, smaples_B):
pass
if __name__ == '__main__':
x = torch.rand((8, 1600)).float()
y = torch.rand((8, 1600)).float()
mmd_dis = MMD(x, y)
kl_dis = KL_divergence(x, y)
wasser_dis = Wasserstein_distance(x, y)
print(mmd_dis)
print(kl_dis)
print(wasser_dis)
|
StarcoderdataPython
|
3398855
|
"""
Copyright 2018 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import urllib
def get_query_parameter(request):
# use python shortcut to create list
query_parameters = {}
parsed_query = urllib.parse.parse_qs(request.uri_query)
for parameter in parsed_query:
query_parameters[parameter] = parsed_query[parameter][0]
return query_parameters
def parse_url(url):
return urllib.parse.urlparse(url)
def generate_uri(ip, paths, parameters={}):
"""
Generates an URI based on the protocol, the ip/hostname, multiple paths and parameters
"""
uri = 'coap://' + ip
for path in paths:
uri += path
#todo if parameters set ? only
return uri + '?' + urllib.urlencode(parameters)
|
StarcoderdataPython
|
1710988
|
<filename>src/refline/srccheck/testing/ignored_bad.py<gh_stars>0
# this is a file full with problems
# but ignored with:
# checker_ignore_this_file
import os
def doit():
foo = bar
def with_tab():
print "there's a tab"
|
StarcoderdataPython
|
3313958
|
<filename>scripts/tms_writer.py<gh_stars>10-100
# -*- coding: utf-8 -*-
import os
import sys
import getopt
from textwrap import dedent
from forge.lib.tiler import TilerManager
from forge.lib.helpers import error
def usage():
print(dedent('''\
Usage: venv/bin/python scripts/tms_writer.py
[-d database.cfg|--database=database.cfg]
[-c tms.cfg|--config=tms.cfg]
<command>
Commands:
create: create the tiles and write them to S3
metadata: create the metadata file (layer.json)
stats: provides a report containing the stats
for a given TMS config
statsnodb: provides a short report containing the stats
for a given TMS config
'''))
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'c:', ['config='])
except getopt.GetoptError as err:
error(str(err), 2, usage=usage)
dbConfigFile = 'configs/terrain/database.cfg'
tmsConfigFile = 'configs/terrain/tms.cfg'
for o, a in opts:
if o in ('-d', '--database'):
dbConfigFile = a
elif o in ('-c', '--config'):
tmsConfigFile = a
if not os.path.exists(dbConfigFile) and os.path.exists(tmsConfigFile):
error('config file(s) does/do not exist(s)', 1, usage=usage)
if len(args) < 1:
error('you must specify a command', 3, usage=usage)
tiler = TilerManager(dbConfigFile, tmsConfigFile)
command = args[0]
if command == 'create':
tiler.create()
elif command == 'metadata':
tiler.metadata()
elif command == 'stats':
tiler.stats()
elif command == 'statsnodb':
tiler.statsNoDb()
# aws queue specific functions
elif command == 'createqueue':
tiler.createQueue()
elif command == 'createtiles':
tiler.createTiles()
elif command == 'deletequeue':
tiler.deleteQueue()
elif command == 'queuestats':
tiler.queueStats()
else:
error("unknown command '%(command)s'" % {
'command': command}, 4, usage=usage
)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1736141
|
<reponame>technige/httpstream
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012-2015, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
from .util.ordereddict import OrderedDict
from httpstream import Query
def test_can_parse_none_query():
query = Query(None)
assert str(query) == ""
assert query.string is None
assert dict(query) == {}
assert len(query) == 0
assert not query.__bool__()
assert not query.__nonzero__()
def test_can_parse_empty_query():
query = Query("")
assert str(query) == ""
assert query.string == ""
assert dict(query) == {}
assert len(query) == 0
assert not query.__bool__()
assert not query.__nonzero__()
def test_can_parse_key_only_query():
query = Query("foo")
assert str(query) == "foo"
assert query.string == "foo"
assert dict(query) == {"foo": None}
assert query.get("foo") is None
assert len(query) == 1
assert query.__bool__()
assert query.__nonzero__()
def test_can_parse_key_value_query():
query = Query("foo=bar")
assert str(query) == "foo=bar"
assert query.string == "foo=bar"
assert dict(query) == {"foo": "bar"}
assert query.get("foo") == "bar"
assert len(query) == 1
assert query.__bool__()
assert query.__nonzero__()
def test_can_parse_multi_key_value_query():
query = Query("foo=bar&spam=eggs")
assert str(query) == "foo=bar&spam=eggs"
assert query.string == "foo=bar&spam=eggs"
assert dict(query) == {"foo": "bar", "spam": "eggs"}
assert query.get("foo") == "bar"
assert query.get("spam") == "eggs"
def test_can_parse_mixed_query():
query = Query("foo&spam=eggs")
assert str(query) == "foo&spam=eggs"
assert query.string == "foo&spam=eggs"
assert dict(query) == {"foo": None, "spam": "eggs"}
assert query.get("foo") is None
assert query.get("spam") == "eggs"
def test_query_equality():
query1 = Query("foo=bar&spam=eggs")
query2 = Query("foo=bar&spam=eggs")
assert query1 == query2
def test_query_inequality():
query1 = Query("foo=bar&spam=eggs")
query2 = Query("foo=bar&spam=bacon")
assert query1 != query2
def test_query_equality_when_none():
query = Query(None)
none = None
assert query == none
def test_query_is_hashable():
query = Query("foo=bar&spam=eggs")
hashed = hash(query)
assert hashed
def test_getting_non_existent_query_parameters_causes_key_error():
query = Query("foo=bar&spam=eggs")
try:
query.get("bacon")
except KeyError:
assert True
else:
assert False
def test_getting_all_non_existent_query_parameters_causes_key_error():
query = Query("foo=bar&spam=eggs")
try:
query.get_all("bacon")
except KeyError:
assert True
else:
assert False
def test_can_get_nth_parameter():
query = Query("foo=bar&foo=baz&foo=qux&spam=eggs")
assert query.get("foo", 0) == "bar"
assert query.get("foo", 1) == "baz"
assert query.get("foo", 2) == "qux"
try:
query.get("foo", 3)
except IndexError:
assert True
else:
assert False
def test_can_get_all_parameters_with_name():
query = Query("foo=bar&foo=baz&foo=qux&spam=eggs")
values = query.get_all("foo")
assert values == ["bar", "baz", "qux"]
def test_can_get_query_item():
query = Query("one=eins&two=zwei&three=drei&four=vier&five=fünf")
bit = query[2]
assert bit == ("three", "drei")
def test_can_get_query_slice():
query = Query("one=eins&two=zwei&three=drei&four=vier&five=fünf")
bits = query[1:3]
assert bits.string == "two=zwei&three=drei"
def test_can_check_parameter_exists():
query = Query("one=eins&two=zwei&three=drei&four=vier&five=fünf")
assert ("two", "zwei") in query
assert ("nine", "neun") not in query
def test_old_slicing_method():
query = Query("one=eins&two=zwei&three=drei&four=vier&five=fünf")
bits = query.__getslice__(1, 3)
assert bits.string == "two=zwei&three=drei"
def test_passing_a_slice_through_getitem():
query = Query("one=eins&two=zwei&three=drei&four=vier&five=fünf")
bits = query.__getitem__(slice(1, 3))
assert bits.string == "two=zwei&three=drei"
|
StarcoderdataPython
|
3381532
|
import warnings
import matplotlib.pyplot as plt
import cv2
import mmcv
import random
from PIL import Image
import numpy as np
import torch
from mmcv.runner import load_checkpoint
from mmcv.parallel import collate, scatter
from openselfsup.models import build_model
from openselfsup.utils import build_from_cfg
from openselfsup.datasets.registry import PIPELINES
from torchvision.transforms import Compose
def init_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
model = build_model(config.model)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage(object):
def __call__(self, results):
if isinstance(results['img'], str):
results['filename'] = results['img']
else:
results['filename'] = None
img = Image.open(results['img'])
img = img.convert('RGB')
return img
def inference_model(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = cfg.data.test.pipeline
test_pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
test_pipeline = Compose(test_pipeline)
# prepare data
img = Image.open(img)
img = img.convert('RGB')
data = test_pipeline(img)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(data, mode='test')
return result
|
StarcoderdataPython
|
3228627
|
<reponame>SPIN-UMass/SWEET<filename>mailmynet/Maildir/proxy_postfix/Twisted-11.0.0/build/lib.linux-x86_64-2.6/twisted/internet/reactor.py
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The reactor is the Twisted event loop within Twisted, the loop which drives
applications using Twisted. The reactor provides APIs for networking,
threading, dispatching events, and more.
The default reactor is based on C{select(2)} and will be installed if this
module is imported without another reactor being explicitly installed.
Regardless of which reactor is installed, importing this module is the correct
way to get a reference to it.
New application code should prefer to pass and accept the reactor as a
parameter where it is needed, rather than relying on being able to import this
module to get a reference. This simplifies unit testing and may make it easier
to one day support multiple reactors (as a performance enhancement), though
this is not currently possible.
@see: L{IReactorCore<twisted.internet.interfaces.IReactorCore>}
@see: L{IReactorTime<twisted.internet.interfaces.IReactorTime>}
@see: L{IReactorProcess<twisted.internet.interfaces.IReactorProcess>}
@see: L{IReactorTCP<twisted.internet.interfaces.IReactorTCP>}
@see: L{IReactorSSL<twisted.internet.interfaces.IReactorSSL>}
@see: L{IReactorUDP<twisted.internet.interfaces.IReactorUDP>}
@see: L{IReactorMulticast<twisted.internet.interfaces.IReactorMulticast>}
@see: L{IReactorUNIX<twisted.internet.interfaces.IReactorUNIX>}
@see: L{IReactorUNIXDatagram<twisted.internet.interfaces.IReactorUNIXDatagram>}
@see: L{IReactorFDSet<twisted.internet.interfaces.IReactorFDSet>}
@see: L{IReactorThreads<twisted.internet.interfaces.IReactorThreads>}
@see: L{IReactorArbitrary<twisted.internet.interfaces.IReactorArbitrary>}
@see: L{IReactorPluggableResolver<twisted.internet.interfaces.IReactorPluggableResolver>}
"""
import sys
del sys.modules['twisted.internet.reactor']
from twisted.internet import selectreactor
selectreactor.install()
|
StarcoderdataPython
|
3365054
|
from custom_components.sbahn_munich.const import DEFAULT_LIMIT
from datetime import date, datetime
import json
from custom_components.sbahn_munich.sensor import SBahnStation
from custom_components.sbahn_munich.api import Timetable, Station
def test_device_state_attributes():
station = Station("testStation", [], "1235")
entity = SBahnStation(station, None, None, None, DEFAULT_LIMIT)
line_dict = {
"name": "S3",
"color": "#333333",
"text_color": "#444444",
}
timetable = []
timetable.append(
Timetable(
line_dict,
ris_estimated_time=datetime.now().timestamp() * 1000,
ris_aimed_time=datetime.now().timestamp() * 1000,
train_type=2,
to="target1",
updated_at=datetime.now().timestamp() * 1000,
)
)
timetable.append(
Timetable(
line_dict,
ris_estimated_time=datetime.now().timestamp() * 1000,
ris_aimed_time=datetime.now().timestamp() * 1000,
train_type=2,
to="target2",
updated_at=datetime.now().timestamp() * 1000,
),
)
entity._timetable = timetable
attrributes = entity.device_state_attributes
json.dumps(attrributes)
assert attrributes != None
|
StarcoderdataPython
|
3259825
|
# import pyedflib
import numpy as np
from scipy import signal as sg
import argparse
import sys
import json
# import matplotlib.pyplotmatplot as plt
from pprint import pprint
import pandas as pd
class Notch():
Q = 0
f0 = 0
def __init__(self,f0=60,Q=50):
self.f0=f0
self.Q=Q
def argparse(self):
parser = argparse.ArgumentParser()
parser.add_argument('-i','--archivo',help='Ingrese el nombre del archivo .edf a utilizar',type = str)
parser.add_argument('-fo','--fo',help='Frecuencia que se desea filtrar. Por defecto fo = 60',type = float)
parser.add_argument('-Q','--Q',help='Factor de calidad del filtro. Por defecto Q = 50',type = int)
parser.add_argument('-e','--edf',help='Nombre y dirección del archivo .edf de salida',type = str)
parsedargs = parser.parse_args()
arc = parsedargs.archivo
output = parsedargs.edf
if (parsedargs.fo != None):
if (parsedargs.fo> 0):
self.f0 = parsedargs.fo
if (parsedargs.Q != None):
if (parsedargs.Q>0):
self.Q = parsedargs.Q
return arc,output
# def read_edf(self,nameEdf):
# '''
# Descripción: Se encarga de leer el archivo .edf
# Entradas: - nameEdf: nombre del archivo .edf
# Salidas: - in_signal: Matriz de Canales X Tiempo
# - fs: Frecuencia de muestro
# - headers: Etiquetas del archivo .edf
# '''
# edf = pyedflib.EdfReader(nameEdf)
# headers = edf.getSignalHeaders()
# nch = edf.signals_in_file
# nsig = edf.getNSamples()[0]
# fs = edf.getSampleFrequency(0)
# in_signal = np.zeros((nch,nsig))
# for x in range(nch):
# in_signal[x,:] = edf.readSignal(x)
# edf._close()
# del edf
# return in_signal,fs,headers
def filt(self,in_signal,fs):
'''
Descripción: Se encarga de filtrar los datos del EEG
Entradas: - in_signal: Matriz de Canales X Tiempo
- fs: Frecuencia de muestro
Salidas: - out_signal: EEG filtrado (Matriz de CanalesXTiempo)
'''
w0 = self.f0/(fs/2)
num,den = sg.iirnotch(w0,self.Q)
out_signal = np.zeros((len(in_signal),len(in_signal[0])))
for i in range(0,len(in_signal)):
out_signal[i]=sg.filtfilt(num,den,in_signal[i])
return out_signal,num,den
# def write_edf(self,in_signal,headers,nameEdf):
# '''
# Descripción: Se encarga de escribir los datos del nuevo EEG
# Entradas: - headers: etiquetas del .edf
# - in_signal: Matriz de Canales X Tiempo
# - nameEdf : Nombre con el que se desea guardar el nuevo .edf
# '''
# edf = pyedflib.EdfWriter(nameEdf,len(in_signal),file_type=pyedflib.FILETYPE_EDFPLUS)
# edf_info = []
# edf_signal = []
# for i in range (len(in_signal)):
# channel_info={'label':headers[i]['label'],'dimension':headers[i]['dimension'],'sample_rate':headers[i]['sample_rate'],'physical_max':headers[i]['physical_max'] , 'physical_min': headers[i]['physical_min'], 'digital_max': headers[i]['digital_max'], 'digital_min': headers[i]['digital_min'], 'transducer':headers[i]['transducer'] , 'prefilter':headers[i]['prefilter']+',notch '+str(self.f0)+'Hz'}
# edf_info.append(channel_info)
# edf_signal.append(in_signal[i])
# edf.setSignalHeaders(edf_info)
# edf.writeSamples(edf_signal)
# edf.close()
# del edf
#Read data from stdin
def read_in():
lines = sys.stdin.readlines()
#Since our input would only be having one line, parse our JSON data from that
return json.loads(lines[0])
if __name__ == '__main__':
notch1 = Notch()
# argparse input mode
# print ("start of notch")
# arc,output = notch1.argparse()
# signal , fs ,headers= notch1.read_edf(arc)
# filtered_signal,num,den = notch1.filt(signal[:,232250:234750],fs)
# print("size of output",filtered_signal.shape)
# print(vals)
# print("size of input",in_signal.shape)
# fig,subplt=plt.subplots(3,1,figsize=(8,5))
# subplt[0].plot(t,inp[9][ni:nf])
# subplt[0].title.set_text('Señal original')
# subplt[0].grid()
#notch1.write_edf(filtered_signal,headers,output)
# python-shell input mode
inSignals=read_in()
nch=len(inSignals)
nSamples = len(inSignals[0]['data'])
fs=inSignals[0]['samplefrequency']
# print(nch,nSamples)
in_signal = np.zeros((nch,nSamples))
# print(len(inSignals))
# print(len(inSignals[0]['data']))
currentCh=0
for item in inSignals:
for subitem in item['data']:
subitem.pop('time', None)
df = pd.DataFrame(item['data'])
in_signal[currentCh,:]=np.array(df.values).transpose()
# print (in_signal[currentCh,:],currentCh)
currentCh = currentCh +1
# python-shell execute mode
filtered_signal,num,den = notch1.filt(in_signal,fs)
# python-shell output mode
response={}
response['channels']=[]
currentCh=0
for channel in inSignals:
channelObj={}
channelObj['id']=channel['id']
channelObj['label']=channel['label']
channelObj['samples']=channel['samples']
channelObj['physicalMaximum']=channel['physicalMaximum']
channelObj['physicalMinimum']=channel['physicalMinimum']
channelObj['digitalMaximum']=channel['digitalMaximum']
channelObj['digitalMinimum']=channel['digitalMinimum']
channelObj['samplefrequency']= channel['samplefrequency']
channelObj['data']=[]
currentD=0
for subitem in channel['data']:
d={}
# d['value']=float(subitem['value'])
d['value']=float(filtered_signal[currentCh,currentD])
channelObj['data'].append(d)
currentD=currentD+1
response['channels'].append(channelObj)
currentCh=currentCh+1
# print(channelObj['id'])
print (json.dumps(response))
|
StarcoderdataPython
|
1608814
|
<reponame>geancarlo/fastapi-autowire
from .domain import Bar, Foo
from .services import FooPrinter
def controller1(foo: Foo):
return foo.value1, foo.value2
def controller2(foo_printer: FooPrinter):
return foo_printer.print()
def controller3(bar: Bar):
return bar.__dict__
|
StarcoderdataPython
|
21792
|
import codecs
import os
# Function to save a string into a file
def save_string_in_file(string_text, file_name):
with codecs.open(file_name, "w", "utf-8") as f:
f.write(string_text)
f.close()
# Function to read all files in a dir with a specific extension
def read_files_in_dir_ext(dir_route, extension):
files = os.listdir(dir_route)
files_ext = [file for file in files if file.endswith(extension)]
return files_ext
# Function to read a file into a string
def read_file_in_string(file_name):
file_in_string = ""
with codecs.open(file_name, "r", "utf-8") as f:
file_in_string = f.read()
f.close()
return file_in_string
# Function to create a directory
def create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
return
|
StarcoderdataPython
|
4812915
|
import pytest
# set choices for --test-type
test_choices = ['all', 'default', 'matlab']
def pytest_addoption(parser):
"""
Command line input function which requires 'all', 'default' or 'matlab'
as an input
"""
parser.addoption(
"--test-type", action="store", default="all", type=str,
choices=test_choices
)
def pytest_configure(config):
"""
Sets variable `pytest.test_type` to be used within testing
"""
pytest.test_type = config.getoption('--test-type')
def run_for_test_types(TEST_TYPE, *test_types):
"""
Sets decorator that specifies the test types for which a
class/function should be skipped
:param TEST_TYPE: the test type selected by the user
:type TEST_TYPE: str
:param test_types: the test types for which the class/function
should be run
:type test_types: str
:return: decorator that specifies if class/function should be
skipped based on chosen TEST_TYPE
:rtype: callable
"""
to_skip = [type for type in test_choices
if type not in test_types]
return pytest.mark.skipif(TEST_TYPE in to_skip,
reason="Tests can't be run with selected test type")
|
StarcoderdataPython
|
51377
|
<gh_stars>1-10
import sys,os
import py2 as csv
import converter,iohelper
from itertools import groupby
def do_split(csvfilepath,colidx):
result = {}
header=[]
iohelper.delete_file_folder('splitted/')
iohelper.dir_create('splitted/')
with open(csvfilepath, 'rb') as csvfile:
csvreader = csv.reader(csvfile, encoding='utf-8')
header=csvreader.next()
for row in csvreader:
if row[colidx] in result:
result[row[colidx]].append(row)
if(len(result[row[colidx]])>100000):
print 'start---%s' % row[colidx]
with open("splitted/%s.csv" % row[colidx], "ab") as output:
wr = csv.writer(output, encoding='utf-8')
if os.path.getsize("splitted/%s.csv" % row[colidx])==0:
wr.writerow(header)
converter.convert_to_utf8("splitted/%s.csv" % row[colidx])
for line in result[row[colidx]]:
wr.writerow(line)
print 'end---%s' %row[colidx]
result[row[colidx]]=[]
else:
result[row[colidx]] = [row]
for attr, value in result.iteritems():
if attr!='-' and attr!='':
print 'start---%s' %attr
with open("splitted/%s.csv" %attr, "ab") as output:
wr = csv.writer(output, quoting=csv.QUOTE_ALL)
if os.path.getsize("splitted/%s.csv" % attr)==0:
wr.writerow(header)
converter.convert_to_utf8("splitted/%s.csv" % attr)
for line in value:
wr.writerow(line)
print 'end---%s' %attr
value=[]
|
StarcoderdataPython
|
138281
|
import sys, re, os
from pathlib import Path
_, output_dir, *output_base_names = sys.argv
chrom_regex = re.compile(r'(chr[a-zA-Z0-9]+)')
chromosomes = [chrom_regex.search(x).group(1) for x in output_base_names]
output_dir = Path(output_dir)
if not output_dir.exists():
os.makedirs(str(output_dir))
output_files = dict()
for chrom, output_base_name in zip(chromosomes, output_base_names):
output_path = output_dir / output_base_name
output_files[chrom] = open(str(output_path), 'w')
for line in sys.stdin:
chrom = line.split()[0]
if chrom not in output_files:
print(line, end='', file=sys.stderr)
else:
output_files[chrom].write(line)
|
StarcoderdataPython
|
1727888
|
<gh_stars>1-10
S = input()
print("No" if "L" in S[::2] or "R" in S[1::2] else "Yes")
|
StarcoderdataPython
|
19297
|
import unittest
import time
import copy
from unittest.mock import patch, MagicMock, call
from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties
from kafka import KafkaProducer
class TestPostalService(unittest.TestCase):
def setUp(self):
self.mock_delivery_service = MagicMock()
def test_init_without_delivery_service_throws_error(self):
with self.assertRaises(ValueError) as context:
PostalService()
self.assertEqual(str(context.exception), 'delivery_service argument not provided')
def test_post_sends_envelope_to_delivery_service(self):
postal_service = PostalService(delivery_service=self.mock_delivery_service)
test_envelope = Envelope('test', Message('test message'))
postal_service.post(test_envelope)
self.mock_delivery_service.deliver.assert_called_once_with(test_envelope)
def test_post_throws_error_when_envelope_is_none(self):
postal_service = PostalService(delivery_service=self.mock_delivery_service)
with self.assertRaises(ValueError) as context:
postal_service.post(None)
self.assertEqual(str(context.exception), 'An envelope must be passed to post a message')
class TestKafkaDeliveryService(unittest.TestCase):
def setUp(self):
self.messaging_properties = MessagingProperties()
self.messaging_properties.connection_address='test:9092'
self.messaging_properties.config={'api_version_auto_timeout_ms': 5000}
def test_init_without_messaging_config_throws_error(self):
with self.assertRaises(ValueError) as context:
KafkaDeliveryService()
self.assertEqual(str(context.exception), 'messaging_properties argument not provided')
def test_init_without_bootstrap_servers_throws_error(self):
messaging_properties = MessagingProperties()
messaging_properties.connection_address=None
with self.assertRaises(ValueError) as context:
KafkaDeliveryService(messaging_properties=messaging_properties)
self.assertEqual(str(context.exception), 'connection_address not set on messaging_properties')
@patch('ignition.service.messaging.KafkaProducer')
def test_deliver(self, mock_kafka_producer_init):
# need to set this explicitly because we've patched KafkaProducer
mock_kafka_producer_init.DEFAULT_CONFIG = KafkaProducer.DEFAULT_CONFIG
delivery_service = KafkaDeliveryService(messaging_properties=self.messaging_properties)
test_envelope = Envelope('test_topic', Message('test message'))
delivery_service.deliver(test_envelope)
mock_kafka_producer_init.assert_called_once_with(bootstrap_servers='test:9092', api_version_auto_timeout_ms=5000, client_id='ignition')
self.assertEqual(delivery_service.producer, mock_kafka_producer_init.return_value)
mock_kafka_producer = mock_kafka_producer_init.return_value
mock_kafka_producer.send.assert_called_once_with('test_topic', b'test message')
@patch('ignition.service.messaging.KafkaProducer')
def test_deliver_throws_error_when_envelope_is_none(self, mock_kafka_producer_init):
delivery_service = KafkaDeliveryService(messaging_properties=self.messaging_properties)
with self.assertRaises(ValueError) as context:
delivery_service.deliver(None)
self.assertEqual(str(context.exception), 'An envelope must be passed to deliver a message')
class TestKafkaInboxService(unittest.TestCase):
def setUp(self):
self.messaging_properties = MessagingProperties()
self.messaging_properties.connection_address='test:9092'
self.messaging_properties.config={'api_version_auto_timeout_ms':5000}
def test_init_without_messaging_config_throws_error(self):
with self.assertRaises(ValueError) as context:
KafkaInboxService()
self.assertEqual(str(context.exception), 'messaging_properties argument not provided')
def test_init_without_bootstrap_servers_throws_error(self):
messaging_properties = MessagingProperties()
messaging_properties.connection_address=None
with self.assertRaises(ValueError) as context:
KafkaInboxService(messaging_properties=messaging_properties)
self.assertEqual(str(context.exception), 'connection_address not set on messaging_properties')
@patch('ignition.service.messaging.KafkaInboxThread')
def test_watch_inbox_starts_thread(self, mock_kafka_inbox_thread_init):
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
mock_kafka_inbox_thread_init.assert_called_once_with('test:9092', 'test_group', 'test_topic', mock_read_inbox_func, inbox_service._KafkaInboxService__thread_exit_func, self.messaging_properties.config)
mock_kafka_inbox_thread_init.return_value.start.assert_called_once()
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_inits_consumer(self, mock_kafka_consumer_init):
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
mock_kafka_consumer_init.assert_called_once_with('test_topic', bootstrap_servers='test:9092', group_id='test_group', enable_auto_commit=False)
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_inits_consumer(self, mock_kafka_consumer_init):
mock_kafka_consumer = mock_kafka_consumer_init.return_value
mock_record_1 = MagicMock()
mock_record_2 = MagicMock()
infinite_iter_stop = False
infinite_iter_has_stopped = False
ready_for_second_message = False
second_message_sent = False
def build_iter():
def iter():
yield mock_record_1
while not infinite_iter_stop:
if ready_for_second_message:
yield mock_record_2
break
while not infinite_iter_stop:
time.sleep(0.001)
infinite_iter_has_stopped = True
return iter
mock_kafka_consumer.__iter__.side_effect = build_iter()
inbox_service = KafkaInboxService(messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
time.sleep(0.01)
try:
self.assertEqual(len(inbox_service.active_threads), 1)
expected_config = copy.copy(self.messaging_properties.config)
expected_config = {
'bootstrap_servers': 'test:9092',
'group_id': 'test_group',
'enable_auto_commit': False,
'client_id': 'ignition'
}
mock_kafka_consumer_init.assert_called_once_with('test_topic', **expected_config)
mock_kafka_consumer.__iter__.assert_called_once()
mock_record_1.value.decode.assert_called_once_with('utf-8')
mock_record_2.value.decode.assert_not_called()
mock_read_inbox_func.assert_called_once_with(mock_record_1.value.decode.return_value)
mock_kafka_consumer.commit.assert_called_once()
ready_for_second_message = True
time.sleep(1)
mock_record_2.value.decode.assert_called_once_with('utf-8')
mock_read_inbox_func.assert_called_with(mock_record_2.value.decode.return_value)
mock_kafka_consumer.commit.assert_has_calls([call(), call()])
finally:
infinite_iter_stop = True
time.sleep(1)
mock_kafka_consumer.close.assert_called_once()
self.assertEqual(len(inbox_service.active_threads), 0)
@patch('ignition.service.messaging._thread')
@patch('ignition.service.messaging.KafkaConsumer')
def test_watch_inbox_thread_calls_exit_func_on_error(self, mock_kafka_consumer_init, mock_thread):
mock_kafka_consumer = mock_kafka_consumer_init.return_value
mock_record_1 = MagicMock()
infinite_iter_stop = False
ready_for_message = True
def build_iter():
def iter():
while not infinite_iter_stop:
if ready_for_message:
yield mock_record_1
break
return iter
mock_kafka_consumer.__iter__.side_effect = build_iter()
inbox_service = KafkaInboxService(test_mode=True, messaging_properties=self.messaging_properties)
mock_read_inbox_func = MagicMock()
mock_read_inbox_func.side_effect = ValueError('Test error')
self.assertFalse(inbox_service.exited)
inbox_service.watch_inbox('test_group', 'test_topic', mock_read_inbox_func)
ready_for_message = True
time.sleep(0.03)
## Indicates the exit func on inbox_service was called when in "test_mode"
self.assertTrue(inbox_service.exited)
mock_kafka_consumer.commit.assert_not_called()
|
StarcoderdataPython
|
3374965
|
from django.test import TestCase
from mirrors.tests import create_mirror_url
class MirrorUrlTest(TestCase):
def setUp(self):
self.mirror_url = create_mirror_url()
def testAddressFamilies(self):
self.assertIsNotNone(self.mirror_url.address_families())
def testHostname(self):
self.assertEqual(self.mirror_url.hostname, 'archlinux.org')
def testGetAbsoluteUrl(self):
absolute_url = self.mirror_url.get_absolute_url()
expected = '/mirrors/%s/%d/' % (self.mirror_url.mirror.name, self.mirror_url.pk)
self.assertEqual(absolute_url, expected)
def test_mirror_overview(self):
response = self.client.get('/mirrors/')
self.assertEqual(response.status_code, 200)
self.assertIn(self.mirror_url.mirror.name, response.content.decode())
def testClean(self):
# TODO: add test for self.mirror_url.clean()
pass
def tearDown(self):
self.mirror_url.delete()
|
StarcoderdataPython
|
3383179
|
from .event import Event # noqa
from .producer import Producer # noqa
from .consumer import Consumer # noqa
from .decorators import event_subscriber, dispatch_event # noqa
from .dummy import * # noqa
|
StarcoderdataPython
|
86727
|
from game_stats import GameStats
from word_board import WordBoard
from view_cli import ViewCLI
# from view_html import ViewHTML
FILENAME = ''
VIEW = ''
WB = ''
GS = ''
def setup(fn='wgg/static/wordlists/popular9.txt'):
global FILENAME, VIEW, WB, GS
FILENAME = fn
VIEW = ViewCLI()
WB = WordBoard(FILENAME)
GS = GameStats()
def main_loop():
will_exit = False
while (not will_exit):
VIEW.v_board(WB.board)
has_answer = False
while not has_answer:
u_in = VIEW.v_user_in()
if ((u_in == '--ans') or (u_in == '--help')):
VIEW.v_result(WB.ans_word)
GS.process_failure()
has_answer = True
break
elif (WB.check_answer(u_in)):
VIEW.v_print("Correct")
GS.process_correct()
has_answer = True
break
elif (u_in == '--exit'):
VIEW.v_stats(GS.get_stats())
will_exit = True
break
else:
VIEW.v_print("Not correct, try again")
GS.process_incorrect()
VIEW.v_board(WB.board)
WB.generate_random_word()
if __name__ == '__main__':
setup('../static/wordlists/popular9.txt')
main_loop()
|
StarcoderdataPython
|
1669850
|
<reponame>wborbajr/eXchangeAPI-PY<filename>exchangeapi/routers/routers.py
from fastapi import APIRouter
from exchangeapi.routers.v1.items import router as items_v1_router
from exchangeapi.routers.v2.items import router as items_v2_router
router = APIRouter()
router.include_router(items_v1_router)
router.include_router(items_v2_router)
|
StarcoderdataPython
|
1609126
|
from loguru import logger
from fcutils.maths.signals import get_onset_offset
from data.dbase.io import load_bin
def get_triggers(session: dict, sampling_rate: int = 30000) -> dict:
"""
Get the time at which frame triggers occur in bonsai
"""
name = session["name"]
logger.debug(f'Getting bonsai trigger times for session "{name}"')
# load bin data
analog = load_bin(
session["ai_file_path"], nsigs=session["n_analog_channels"]
)
# check that the number of frames is correct
frames_onsets, frames_offsets = get_onset_offset(analog[:, 0], 2.5)
# check everything correct
if len(frames_onsets) != len(frames_offsets):
raise ValueError(
"Mismatch between number of frames onsets and offsets"
)
if len(frames_onsets) != session["n_frames"]:
raise ValueError(
"Mismatch between frame onsets and expected number of frames"
)
# align time stamps to bonsai cut sample
frames_onsets -= session["bonsai_cut_start"]
frames_offsets -= session["bonsai_cut_start"]
# get duration
n_samples = frames_offsets[-1] - frames_onsets[0]
duration_ms = n_samples / sampling_rate * 1000
# return results
return dict(
trigger_times=frames_onsets, n_samples=n_samples, n_ms=duration_ms
)
|
StarcoderdataPython
|
117965
|
tweet_at = '@Attribution'
tweet_url = 'https://example.com/additional-url'
tweet_hashtag = '#MyHashtag'
tweet_data = [
{ 'image': 'https://example.com/image.jpg',
'id': 'image_id',
'title': 'Example title',
'desc': 'Example description with lots of text that probably goes well over the 280 character limit. The python script programmatically determines the length of this string based on the other inputs and cuts it off as necessary.'
}
]
|
StarcoderdataPython
|
1624084
|
<reponame>mrroach/CentralServer
from csrv.model.cards import card_info
from csrv.model.cards import ice
from csrv.model.actions import subroutines
from csrv.model.actions.subroutines import trash_a_program
class Card01064(ice.Ice):
NAME = u'Card01064'
SET = card_info.CORE
NUMBER = 64
SIDE = card_info.CORP
FACTION = card_info.ROBOCORP
INFLUENCE = 1
UNIQUE = False
KEYWORDS = set([
card_info.DESTROYER,
card_info.SENTRY,
])
COST = 4
IMAGE_SRC = '01064.png'
STRENGTH = 0
def __init__(self, game, player):
ice.Ice.__init__(self, game, player)
self.subroutines = [
trash_a_program.TrashAProgram(self.game, self.player),
subroutines.EndTheRun(self.game, self.player),
]
def build_actions(self):
ice.Ice.build_actions(self)
|
StarcoderdataPython
|
1600258
|
from sadie.airr import AirrTable
import pandas as pd
# write airr table to a csv
airr_table_1 = AirrTable(pd.read_csv("PG9 AIRR.csv"))
# write to a json file
airr_table_2 = AirrTable(pd.read_json("PG9 AIRR.json", orient="records"))
# write to an excel file
airr_table_3 = AirrTable(pd.read_excel("PG9 AIRR.xlsx"))
# write to a parquet file that is read by spark
airr_table_4 = AirrTable(pd.read_parquet("PG9 AIRR.parquet"))
# write to a feather file that has rapid IO
airr_table_5 = AirrTable(pd.read_feather("PG9 AIRR.feather"))
# ensure they are all equal to each other
print(airr_table_1 == airr_table_2)
# == airr_table_3 == airr_table_4 == airr_table_5)
|
StarcoderdataPython
|
151327
|
<filename>gui/api_plugins/aff4_test.py<gh_stars>1-10
#!/usr/bin/env python
"""This modules contains tests for AFF4 API renderers."""
from grr.gui.api_plugins import aff4 as aff4_plugin
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
from grr.lib import utils
class ApiAff4RendererTest(test_lib.GRRBaseTest):
"""Test for ApiAff4Renderer."""
def setUp(self):
super(ApiAff4RendererTest, self).setUp()
self.renderer = aff4_plugin.ApiAff4Renderer()
def testRendersAff4ObjectWithGivenPath(self):
with test_lib.FakeTime(42):
with aff4.FACTORY.Create("aff4:/tmp/foo/bar", "AFF4Volume",
token=self.token) as _:
pass
result = self.renderer.Render(utils.DataObject(aff4_path="tmp/foo/bar",
token=self.token))
self.assertEqual(result["urn"], "aff4:/tmp/foo/bar")
self.assertEqual(result["aff4_class"], "AFF4Volume")
self.assertEqual(result["age_policy"], "NEWEST_TIME")
self.assertEqual(result["attributes"]["metadata:last"], 42000000)
class ApiAff4IndexRendererTest(test_lib.GRRBaseTest):
"""Test for ApiAff4IndexRendererTest."""
def setUp(self):
super(ApiAff4IndexRendererTest, self).setUp()
self.renderer = aff4_plugin.ApiAff4IndexRenderer()
def testReturnsChildrenListWithTimestamps(self):
with test_lib.FakeTime(42):
with aff4.FACTORY.Create("aff4:/tmp/foo/bar1", "AFF4Volume",
token=self.token) as _:
pass
with test_lib.FakeTime(43):
with aff4.FACTORY.Create("aff4:/tmp/foo/bar2", "AFF4Volume",
token=self.token) as _:
pass
result = self.renderer.Render(utils.DataObject(aff4_path="tmp/foo",
token=self.token))
result = sorted(result, key=lambda x: x[0])
self.assertEqual(result,
[["aff4:/tmp/foo/bar1", 42000000],
["aff4:/tmp/foo/bar2", 43000000]])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
StarcoderdataPython
|
4810912
|
<reponame>tfrdidi/mod_security_add_ids_to_rules<gh_stars>0
import sys
import os.path
# Mod_security decided to make IDs on rules mandatory with a certain update.
# This script is ment to solve the problem by assigning all rules of a file
# an ID starting with a certain value that is specified by the user.
#
# Call this script with "python addModSecurityIDs.py <pathToRuleFile> <firstIDToAssign>.
uninitialized = "uninitialized"
if len(sys.argv) < 2:
print "Call this script with 'python addModSecurityIDs.py <pathToRuleFile> <firstIDToAssign>'."
exit()
ruleFileName = sys.argv[1]
if not os.path.isfile(ruleFileName):
print ruleFileName + " is not a file."
exit()
startId = int(sys.argv[2])
print "try to update rule file " + ruleFileName
ruleFile = open(ruleFileName, 'r')
content = ruleFile.readlines()
ruleFile.close()
result = open("result.conf", "w")
previousLine = uninitialized
ruleId = startId
for line in content:
if not previousLine == uninitialized:
if "SecRule" in line and "chain" not in previousLine \
and "id:" not in line:
tmp = line.split()
print previousLine
print line
newLine = line[:len(line)-2] + ",id:" + str(ruleId) + line[len(line)-2:]
print newLine
result.write(newLine)
ruleId+=1
else:
result.write(line)
else:
result.write(line)
previousLine = line
|
StarcoderdataPython
|
159336
|
<gh_stars>10-100
from pygofile import Gofile
gofile = Gofile(token='')
|
StarcoderdataPython
|
3305024
|
#!/usr/bin/env python3
from math import *
import random
name = "TerrainB"
block_size = 0.4
block_size_z1 = 0.14
block_size_z2 = 0.247
block_color_r = 0.56
block_color_g = 0.56
block_color_b = 0.56
num_blocks_x = 12
num_blocks_y = 12
iniPos_x = 0.2
iniPos_y = 0.2
block_size2 = block_size/2.0
header = '''\
format: ChoreonoidBody
formatVersion: 1.0
angleUnit: degree
name: {name}
BLOCK: &BLOCK
type: Shape
appearance:
material:
diffuseColor: [ {r:.4}, {g:.4}, {b:.4} ]
geometry:
type: IndexedFaceSet
coordinate: [ -{xy:.5}, -{xy:.5}, 0.0,
{xy:.5}, -{xy:.5}, 0.0,
{xy:.5}, {xy:.5}, 0.0,
-{xy:.5}, {xy:.5}, 0.0,
-{xy:.5}, -{xy:.5}, {z1:.5},
{xy:.5}, -{xy:.5}, {z2:.5},
{xy:.5}, {xy:.5}, {z2:.5},
-{xy:.5}, {xy:.5}, {z1:.5} ]
coordIndex: [ 3, 2, 1, 0, -1,
4, 5, 6, 7, -1,
4, 0, 1, 5, -1,
5, 1, 2, 6, -1,
6, 2, 3, 7, -1,
7, 3, 0, 4, -1 ]
BASE_BLOCK: &BASE_BLOCK
type: Shape
appearance:
material:
diffuseColor: [ {r:.4}, {g:.4}, {b:.4} ]
geometry: {{ type: Box, size: [ {size:.5}, {size:.5}, {z1:.5} ] }}
'''
print(header.format(
name = name,
r = block_color_r,
g = block_color_g,
b = block_color_b,
xy = block_size2,
z1 = block_size_z1,
z2 = block_size_z2,
size = block_size
))
line_header_description = '''\
LINE{no}: &LINE{no}
'''
line_description_block = '''\
-
<<: *BLOCK
rotation: [ 0, 0, 1, {rot} ]
translation: [ {x:.7}, 0, {z:.7} ]
'''
line_description_base = '''\
-
<<: *BASE_BLOCK
translation: [ {x:.7}, 0, {z:.7} ]
'''
for i in range(num_blocks_y):
print(line_header_description.format(
no = i))
for j in range(num_blocks_x):
if i==0 or i==num_blocks_y-1 or j==0 or j==num_blocks_x-1:
z_ = 0 #random.randint(0,1)
else:
z_ = random.randint(0,2)
print(line_description_block.format(
rot = 90 * random.randint(0,3),
x = block_size * j,
z = z_ * block_size_z1
))
if z_==2:
print(line_description_base.format(
x = block_size * j,
z = block_size_z1 * 1.5
))
link_header_description = '''\
links:
-
name: 0
jointType: fixed
material: Ground
convexRecompostiion: true
elements:
-
type: Transform
translation: [ {ini_x:.5}, {ini_y:.5}, 0 ]
elements: *LINE0
'''
link_description = '''\
-
name: {index}
parent: 0
jointType: fixed
material: Ground
convexRecompostiion: true
translation: [ {ini_x:.5}, {y:.5}, 0 ]
elements: *LINE{index}
'''
print(link_header_description.format(
ini_x = iniPos_x,
ini_y = iniPos_y
))
for i in range(1, num_blocks_y):
print(link_description.format(
index = i,
ini_x = iniPos_x,
y = block_size * i + iniPos_y,
))
|
StarcoderdataPython
|
3325545
|
<filename>python/summarise_testing.py
#!/usr/bin/env python3
import pandas as pd
import cfg
def targets_info(fp, target):
data = pd.read_csv(fp)
print('targets information from: {}'.format(fp))
print(target)
print('val count')
print(data[target].value_counts(ascending=False))
z_count = 0
nz_count = 0
idx = 0
for cnt in data[target].value_counts(ascending=False):
if idx == 0:
z_count = cnt
else:
nz_count = nz_count + cnt
idx = idx + 1
print('{}={}%'.format(target, (nz_count / (nz_count + z_count))*100))
print()
def test_targets_info():
obs_fp = cfg.ensure_fp(cfg.observations_root, cfg.observations)
targets_info(obs_fp, 'wall_collisions')
test_obs_fp = cfg.observations_root + 'minimise_wall_collisions.csv'
targets_info(test_obs_fp, 'wall_collisions')
if __name__ == "__main__":
test_targets_info()
|
StarcoderdataPython
|
3398335
|
from typing import List
from pydantic import Field
from .schema import Schema, chainable
from .verdict import Verdict
class Assertion(Schema):
__root__: List[Verdict] = Field(min_items=1, max_items=256, default=[])
@property
def artifacts(self):
return self.__root__
@chainable
def add_artifact(self, verdict: Verdict):
self.__root__.append(verdict)
@chainable
def add_artifacts(self, verdicts: List[Verdict]):
self.__root__.extend(verdicts)
def __iter__(self):
return iter(self.__root__)
def __getitem__(self, item):
return self.__root__[item]
|
StarcoderdataPython
|
49173
|
<reponame>frost917/customer-manager
import json
from datetime import datetime
# Dict in List
def authFailedJson():
payload = dict()
convDict = dict()
convList = list()
convDict['error'] = "AuthFailed"
convDict['msg'] = "Authentication Failed!"
convList.append(convDict)
payload['failed'] = convList
return str(json.dumps(payload))
def dataMissingJson():
payload = dict()
convDict = dict()
convList = list()
convDict['error'] = "MissingData"
convDict['msg'] = "some data is missing!"
convList.append(convDict)
payload['failed'] = convList
return str(json.dumps(payload))
def customerNotFound(customerID):
convDict = dict()
convDict['error'] = 'CustomerNotFound'
convDict['msg'] = 'customer is not found!'
convDict['customerID'] = customerID
convList = list()
convList.append(convDict)
payload = dict()
payload['failed'] = convList
def jobNotFound(jobID):
convDict = dict()
convDict['error'] = 'jobNotFound'
convDict['msg'] = 'job is not found!'
convDict['jobID'] = jobID
convList = list()
convList.append(convDict)
payload = dict()
payload['failed'] = convList
def reserveNotFound(reserveID):
convDict = dict()
convDict['error'] = 'reserveNotFound'
convDict['msg'] = 'reserve is not found!'
convDict['reserveID'] = reserveID
convList = list()
convList.append(convDict)
payload = dict()
payload['failed'] = convList
def dataNotJSON():
payload = dict()
convDict = dict()
convList = list()
convDict['error'] = "DataMustJSON"
convDict['msg'] = "data must be JSON object!"
convList.append(convDict)
payload['failed'] = convList
return str(json.dumps(payload))
def tokenInvalid():
payload = dict()
convDict = dict()
convList = list()
convDict['error'] = "TokenInvalid"
convDict['msg'] = "token is invalid!"
convList.append(convDict)
payload['failed'] = convList
return json.dumps(payload)
def databaseIsGone():
payload = dict()
convDict = dict()
convList = list()
convDict['error'] = "DatabaseIsGone"
convDict['msg'] = "database is dead!"
convDict['queryDate'] = datetime.now().strftime('%Y-%m-%d')
convList.append(convDict)
payload['failed'] = convList
return json.dumps(payload)
def redisIsGone():
payload = dict()
convDict = dict()
convList = list()
convDict['error'] = "redisIsGone"
convDict['msg'] = "redis is dead!"
convDict['queryDate'] = datetime.now().strftime('%Y-%m-%d')
convList.append(convDict)
payload['failed'] = convList
return json.dumps(payload)
def queryingResult(data: dict):
payload = dict()
|
StarcoderdataPython
|
1639911
|
<reponame>gsw945/web-static-server
# -*- coding: utf-8 -*-
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
from server import app, PORT
if __name__ == '__main__':
container = tornado.wsgi.WSGIContainer(app)
http_server = tornado.httpserver.HTTPServer(container)
print('listen at port [{0}]'.format(PORT))
http_server.listen(PORT, address='0.0.0.0')
tornado.ioloop.IOLoop.current().start()
|
StarcoderdataPython
|
3329118
|
# (C) 2015 - 2019 by <NAME>
# License: MIT
import os
import sys
from collections import defaultdict
from time import time, sleep
from datastalker import pythonwifi
import logging
log = logging.getLogger('root.hopper')
class Hopper(object):
"""
Handle all logic regarding channel hopping.
"""
def __init__(self, base_interface, related_interface,
stats, hop_tries=10):
self.base_interface = base_interface
self.related_interface = related_interface
self.stats = stats
self.wifi = None
self.reset_interface()
self.tries = hop_tries
def __del__(self):
del self.wifi
def reset_interface(self):
"Reset interface"
if self.wifi is not None:
del self.wifi
if self.related_interface:
log.info("Putting related interface (%s) down" % self.related_interface)
os.system('ifconfig %s down' % self.related_interface)
self.wifi = pythonwifi.Wireless(self.base_interface)
def configure(self, channels=None, max_karma=None):
self.freqs = {
1: '2.412GHz',
2: '2.417GHz',
3: '2.422GHz',
4: '2.427GHz',
5: '2.432GHz',
6: '2.437GHz',
7: '2.442GHz',
8: '2.447GHz',
9: '2.452GHz',
10: '2.457GHz',
11: '2.462GHz',
12: '2.467GHz',
13: '2.472GHz',
36: '5.180GHz',
40: '5.200GHz',
44: '5.220GHz',
48: '5.240GHz',
#(14, '2.484 Ghz'), # 14
}
# 5Mhz gap, 22MHz wide band.
# Hopping: 1,6,11; (+2) 3,8,13; (+1) 2,7,12; (+3); 4,10,[14],5,9
channels_default = [
1,6,11, 3,8,13, 2,7,12, 4,10,5,9
]
if channels is None:
self.channels = channels_default
else:
self.channels = channels
log.info("Hopping on the following channels: " +
", ".join(str(ch) for ch in self.channels))
if not self.channels:
print("ERROR: No channels selected for hopping")
return False
self.hop_total = 0
self.swipes_total = 0
self.channel_idx = 0
self.channel_number = -1 # Not yet known in fact
self.channel_cnt = len(self.channels)
self.channel_karma = 0
self.max_karma = max_karma
self.channel_inc = 0
self.took = 0
self.channel_swipe_start = time()
return True
def increase_karma(self):
"Current channel is nice - stay here longer"
if self.max_karma is None:
return
if self.channel_inc > self.max_karma:
self.stats.incr('hopper/karmic/saturated')
return
self.stats.incr('hopper/karmic/inc')
self.channel_karma += 1
self.channel_inc += 1
def hop(self):
"Unconditional channel hop"
self.channel_karma = 0
self.channel_inc = 0
start = time()
self.stats.incr('hopper/hops')
# Increment channel
self.channel_idx = (self.channel_idx + 1) % self.channel_cnt
self.channel_number = self.channels[self.channel_idx]
freq = self.freqs[self.channel_number]
# Update swipe statistics
if self.channel_idx == 0:
took = time() - self.channel_swipe_start
self.swipes_total += 1
self.channel_swipe_start = time()
self.stats.incr('hopper/swipe/total')
self.stats.incr('hopper/swipe/total_time', took)
# Tries must fit within watchdog limit.
last_exc = None
for i in range(0, self.tries):
try:
self.wifi.setFrequency(freq)
self.hop_total += 1
return True
except IOError as e:
s = 'Try {0}/{1}: Channel hopping failed (f={1} ch={2})'
log.info(s.format(i+1, self.tries,
freq, self.channel_number))
self.reset_interface()
self.stats.incr('hopper/fail/soft')
sleep(0.8)
last_exc = e
self.stats.incr('hopper/fail/hard')
log.info('Failure after %d failed hopping tries', i)
if self.related_interface is None:
log.info('Try setting related interface or putting interface UP')
raise last_exc
return False
def karmic_hop(self):
"Hop to the next channel, take karma into account"
if self.channel_karma:
self.channel_karma -= 1
s = 'Staying a bit longer on {2}; karma={0} karma_inc={1}'
print(s.format(self.channel_karma,
self.channel_inc,
self.channel_number))
self.stats.incr('hopper/karmic/stay')
return True
self.stats.incr('hopper/karmic/hop')
return self.hop()
|
StarcoderdataPython
|
3295
|
<gh_stars>1-10
def pick_food(name):
if name == "chima":
return "chicken"
else:
return "dry food"
|
StarcoderdataPython
|
3358752
|
"""
Set of specific utilities for combining PySeg with Subtomogram Averaging tools (PyTom)
# Author: <NAME> (Max Planck Institute for Biochemistry)
# Date: 1.06.16
"""
__author__ = '<NAME>'
from .plist import TomoPeaks, SetTomoPeaks, Score, SingleTiltWedge, Particle, ParticleList, PK_COORDS
from .star import Star
from .fxml import XMLFilaments
|
StarcoderdataPython
|
144858
|
<filename>mitmproxy/proxy2/layers/modes.py
from abc import ABCMeta
from mitmproxy import platform
from mitmproxy.net import server_spec
from mitmproxy.proxy2 import commands, events, layer
from mitmproxy.proxy2.layers import tls
from mitmproxy.proxy2.utils import expect
class HttpProxy(layer.Layer):
@expect(events.Start)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
child_layer = layer.NextLayer(self.context)
self._handle_event = child_layer.handle_event
yield from child_layer.handle_event(event)
class DestinationKnown(layer.Layer, metaclass=ABCMeta):
child_layer: layer.Layer
def finish_start(self):
if self.context.options.connection_strategy == "eager":
err = yield commands.OpenConnection(self.context.server)
if err:
yield commands.CloseConnection(self.context.client)
self._handle_event = self.done
return
self._handle_event = self.child_layer.handle_event
yield from self.child_layer.handle_event(events.Start())
@expect(events.DataReceived, events.ConnectionClosed)
def done(self, _) -> layer.CommandGenerator[None]:
yield from ()
class ReverseProxy(DestinationKnown):
@expect(events.Start)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
spec = server_spec.parse_with_mode(self.context.options.mode)[1]
self.context.server.address = spec.address
if spec.scheme not in ("http", "tcp"):
if not self.context.options.keep_host_header:
self.context.server.sni = spec.address[0].encode()
self.child_layer = tls.ServerTLSLayer(self.context)
else:
self.child_layer = layer.NextLayer(self.context)
yield from self.finish_start()
class TransparentProxy(DestinationKnown):
@expect(events.Start)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
assert platform.original_addr is not None
socket = yield commands.GetSocket(self.context.client)
try:
self.context.server.address = platform.original_addr(socket)
except Exception as e:
yield commands.Log(f"Transparent mode failure: {e!r}")
self.child_layer = layer.NextLayer(self.context)
yield from self.finish_start()
|
StarcoderdataPython
|
41481
|
"""
Add `Django Filebrowser`_ to your project so you can use a centralized interface to manage the uploaded files to be used with other components (`cms`_, `zinnia`_, etc.).
The version used is a special version called *no grappelli* that can be used outside of the *django-grapelli* environment.
Filebrowser manage files with a nice interface to centralize them and also manage image resizing versions (original, small, medium, etc..), you can edit these versions or add new ones in the settings.
.. note::
Don't try to use other resizing app like sorl-thumbnails or easy-thumbnails, they will not work with Image fields managed with Filebrowser.
"""
|
StarcoderdataPython
|
1780914
|
import os, sys, math
import torch
import matplotlib.pyplot as plt
def convert_grid2prob(grid, threshold=0.1, temperature=1):
threshold = torch.max(grid) - threshold*(torch.max(grid)-torch.min(grid))
grid[grid>threshold] = torch.tensor(float('inf'))
prob = torch.exp(-temperature*grid) / torch.sum(torch.exp(-temperature*grid))
return prob
def convert_coords2px(coords, x_range, y_range, x_max_px, y_max_px, y_flip=False):
if not isinstance(x_range, (tuple, list)):
x_range = (0, x_range)
if not isinstance(y_range, (tuple, list)):
y_range = (0, y_range)
x_ratio_coords2idx = x_max_px / (x_range[1]-x_range[0])
y_ratio_coords2idx = y_max_px / (y_range[1]-y_range[0])
px_idx_x = coords[:,0]*x_ratio_coords2idx
if y_flip:
px_idx_y = y_max_px-coords[:,1]*y_ratio_coords2idx
else:
px_idx_y = coords[:,1]*y_ratio_coords2idx
px_idx_x[px_idx_x>=x_max_px] = x_max_px-1
px_idx_y[px_idx_y>=y_max_px] = y_max_px-1
px_idx_x[px_idx_x<0] = 0
px_idx_y[px_idx_y<0] = 0
px_idx = torch.stack((px_idx_x, px_idx_y), dim=1)
return px_idx.int()
def convert_px2cell(pxs, x_grid, y_grid, device='cuda'): # pixel to grid cell index
cell_idx = torch.zeros_like(pxs)
for i in range(pxs.shape[0]):
cell_idx[i,0] = torch.where( pxs[i,0]>=torch.tensor(x_grid).to(device) )[0][-1]
cell_idx[i,1] = torch.where( pxs[i,1]>=torch.tensor(y_grid).to(device) )[0][-1]
return cell_idx.int()
def get_weight(grid, index, sigma=1, rho=0):
# grid is HxW
# index is a pair of numbers
# return weight in [0,1]
grid = grid.cpu()
index = index.cpu()
if sigma <= 0: # one-hot
weight = torch.zeros_like(grid)
weight[index[1],index[0]] = 1
return weight
if not isinstance(sigma, (tuple, list)):
sigma = (sigma, sigma)
sigma_x, sigma_y = sigma[0], sigma[1]
x = torch.arange(0, grid.shape[0])
y = torch.arange(0, grid.shape[1])
x, y = torch.meshgrid(x, y)
in_exp = -1/(2*(1-rho**2)) * ((x-index[1])**2/(sigma_x**2)
+ (y-index[0])**2/(sigma_y**2)
- 2*rho*(x-index[0])/(sigma_x)*(y-index[1])/(sigma_y))
z = 1/(2*math.pi*sigma_x*sigma_y*math.sqrt(1-rho**2)) * torch.exp(in_exp)
weight = z/z.max()
weight[weight<0.1] = 0
return weight
def loss_nll(data, label, device='cuda'):
# data is the energy grid, label should be the index (i,j) meaning which grid to choose
# data - BxCxHxW
# label - BxC
weight = torch.tensor([]).to(device) # in batch
for i in range(data.shape[0]):
w = get_weight(data[i,0,:,:], label[i,:])
weight = torch.cat((weight, w.unsqueeze(0).to(device))) # Gaussian fashion [CxHxW]
numerator_in_log = torch.logsumexp(-data+torch.log(weight.unsqueeze(1)), dim=(2,3))
denominator_in_log = torch.logsumexp(-data, dim=(2,3))
l2 = torch.sum(torch.pow(data,2),dim=(2,3)) / (data.shape[2]*data.shape[3])
nll = - numerator_in_log + denominator_in_log + 0.00*l2
return torch.mean(nll)
def loss_mse(data, labels): # for batch
# data, labels - BxMxC
squared_diff = torch.square(data-labels)
squared_sum = torch.sum(squared_diff, dim=2) # BxM
loss = squared_sum/data.shape[0] # BxM
return loss
def loss_msle(data, labels): # for batch
# data, labels - BxMxC
squared_diff = torch.square(torch.log(data)-torch.log(labels))
squared_sum = torch.sum(squared_diff, dim=2) # BxM
loss = squared_sum/data.shape[0] # BxM
return loss
def loss_mae(data, labels): # for batch
# data, labels - BxMxC
abs_diff = torch.abs(data-labels)
abs_sum = torch.sum(abs_diff, dim=2) # BxM
loss = abs_sum/data.shape[0] # BxM
return loss
if __name__ == '__main__':
import numpy as np
from pathlib import Path
from torchvision import transforms
sys.path.append(str(Path(__file__).resolve().parents[1]))
from data_handle.data_handler import ToTensor, Rescale
from data_handle.data_handler import ImageStackDataset, DataHandler
project_dir = Path(__file__).resolve().parents[2]
data_dir = os.path.join(project_dir, 'Data/MAD_1n1e')
csv_path = os.path.join(project_dir, 'Data/MAD_1n1e/all_data.csv')
composed = transforms.Compose([Rescale((200,200), tolabel=False), ToTensor()])
dataset = ImageStackDataset(csv_path=csv_path, root_dir=data_dir, channel_per_image=1, transform=composed, T_channel=False)
myDH = DataHandler(dataset, batch_size=2, shuffle=False, validation_prop=0.2, validation_cache=5)
img = torch.cat((dataset[0]['image'].unsqueeze(0), dataset[1]['image'].unsqueeze(0)), dim=0) # BxCxHxW
label = torch.cat((dataset[0]['label'].unsqueeze(0), dataset[1]['label'].unsqueeze(0)), dim=0)
print(img.shape)
print(label)
x_grid = np.arange(0, 201, 8)
y_grid = np.arange(0, 201, 8)
px_idx = convert_coords2px(label, 10, 10, img.shape[3], img.shape[2])
print('Pixel index:', px_idx)
cell_idx = convert_px2cell(px_idx, x_grid, y_grid, device='cpu') # (xmin ymin xmax ymax)
print('Cell index:', cell_idx)
### Random grid
grid = torch.ones((2,1,25,25)) # BxCxHxW
grid[0,0,17,12] = 0
loss = loss_nll(data=grid, label=cell_idx, device='cpu')
print('Loss:', loss)
### Visualization
fig, axes = plt.subplots(2,2)
(ax1,ax3,ax2,ax4) = (axes[0,0],axes[0,1],axes[1,0],axes[1,1])
ax1.imshow(img[0,0,:,:], cmap='gray')
ax1.plot(px_idx[0,0], px_idx[0,1], 'rx')
ax1.set_xticks(x_grid)
ax1.set_yticks(y_grid)
ax1.grid(linestyle=':')
ax2.imshow(grid[0,0,:,:], cmap='gray')
ax2.plot(cell_idx[0,0], cell_idx[0,1], 'rx')
ax3.imshow(img[1,0,:,:], cmap='gray')
ax3.plot(px_idx[1,0], px_idx[1,1], 'rx')
ax3.set_xticks(x_grid)
ax3.set_yticks(y_grid)
ax3.grid(linestyle=':')
ax4.imshow(grid[1,0,:,:], cmap='gray')
ax4.plot(cell_idx[1,0], cell_idx[1,1], 'rx')
plt.show()
|
StarcoderdataPython
|
148027
|
from __future__ import print_function, absolute_import
import sys
import os
sys.path.append(os.path.dirname(__file__))
__name__='conda_pack'
from .core import CondaEnv, File, CondaPackException, pack
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from .cli import main
main()
|
StarcoderdataPython
|
4806325
|
<filename>src/io_scene_bl4/pbdf.py<gh_stars>0
"""
Provides methods to work with the PBDF (Pod Binary Data File) encryption.
"""
from .binary import *
def retrieve_key(file, file_size):
"""Retrieves the XOR encryption key from the given file.
Args:
file: The encrypted input file.
file_size (int): The size of the input file in bytes.
Returns:
int: The XOR encryption key.
"""
return read_int32(file) ^ file_size
def retrieve_block_size(file, file_size, key):
"""Retrieves the size of blocks the given file is using.
Args:
file: The encrypted input file.
file_size (int): The size of the input file in bytes.
key (int): The XOR encryption key. Use retrieve_key() if unknown.
Returns:
int: The block size in bytes at which end a checksum is placed.
"""
file_pos = file.tell()
checksum = 0
while file_pos < file_size - 4:
dword = read_uint32(file)
file_pos = file.tell()
if dword == checksum & 0xFFFFFFFF and file_size % file_pos == 0:
return file_pos
checksum = (checksum + (dword ^ key))
raise AssertionError("Could not determine PBDF block size.")
def decrypt(in_file, out_file, key, block_size):
"""Decrypts the data in the input file and writes it to the output file.
Args:
in_file: The encrypted input file.
out_file: The output file receiving the decrypted content.
key (int): The XOR encryption key. Use retrieve_key() if unknown.
block_size (int): The block size in bytes at which end a checksum is placed.
Use retrieve_block_size() if unknown.
"""
block = memoryview(bytearray(block_size)).cast("I") # Requires little endian
block_index = 0
block_data_dword_count = block_size // 4 - 1
while True:
# Process a block.
if in_file.readinto(block) != block_size:
break
checksum = 0
if block_index == 0 or key not in [0x00005CA8, 0x0000D13F]:
# First block and most keys always use the default XOR encryption.
for i in range(block_data_dword_count):
block[i] ^= key
checksum += block[i]
else:
# Starting with the second block, specific keys use a special encryption.
last_value = 0
for i in range(block_data_dword_count):
key_value = 0
command = last_value >> 16 & 3
if command == 0:
key_value = last_value - 0x50A4A89D
elif command == 1:
key_value = <KEY> - last_value
elif command == 2:
key_value = (last_value + 0x07091971) << 1
elif command == 3:
key_value = (0x11E67319 - last_value) << 1
last_value = block[i]
command = last_value & 3
if command == 0:
block[i] = (~block[i] ^ key_value) & 0xFFFFFFFF
elif command == 1:
block[i] = (~block[i] ^ ~key_value) & 0xFFFFFFFF
elif command == 2:
block[i] = (block[i] ^ ~key_value) & 0xFFFFFFFF
elif command == 3:
block[i] = (block[i] ^ key_value ^ 0xFFFF) & 0xFFFFFFFF
checksum += block[i]
# Validate the checksum and write the decrypted block.
if checksum & 0xFFFFFFFF != block[-1]:
raise AssertionError("Invalid PBDF block checksum.")
out_file.write(block[:-1])
block_index += 1
def encrypt(in_file, out_file, key, block_size):
"""Encrypts the data in the input file and writes it to the output file.
Args:
in_file: The decrypted input file.
out_file: The output file receiving the encrypted content.
key (int): The XOR encryption key.
block_size (int): The block size in bytes at which end a checksum is placed.
"""
block_data_size = block_size - 4
block = memoryview(bytearray(block_data_size)).cast("I") # Requires little endian
block_index = 0
block_data_dword_count = block_data_size // 4
while True:
# Process a block.
if in_file.readinto(block) != block_data_size:
break
checksum = 0
if block_index == 0 or key not in [0x00005CA8, <KEY>]:
# First block and most keys always use the default XOR encryption.
for i in range(block_data_dword_count):
checksum += block[i]
block[i] ^= key
else:
# Starting with the second block, specific keys use a special encryption.
last_value = 0
for i in range(block_data_dword_count):
checksum += block[i]
key_value = 0
command = last_value >> 16 & 3
if command == 0:
key_value = last_value - 0x50A4A89D
elif command == 1:
key_value = 0x3AF70BC4 - last_value
elif command == 2:
key_value = (last_value + 0x07091971) << 1
elif command == 3:
key_value = (0x11E67319 - last_value) << 1
command = last_value & 3
if command == 0:
block[i] = (~block[i] ^ key_value) & 0xFFFFFFFF
elif command == 1:
block[i] = (~block[i] ^ ~key_value) & 0xFFFFFFFF
elif command == 2:
block[i] = (block[i] ^ ~key_value) & 0xFFFFFFFF
elif command == 3:
block[i] = (block[i] ^ key_value ^ 0xFFFF) & 0xFFFFFFFF
last_value = block[i]
# Add the checksum and write the encrypted block.
out_file.write(block)
write_int32(out_file, checksum & 0xFFFFFFFF)
block_index += 1
def read_header_offsets(file, block_size):
"""Reads the PBDF header (not checking the included file size) and returns the list of offsets adjusted to match
decrypted data positions.
Args:
file: The decrypted input file.
block_size (int): The block size in bytes at which end a checksum is placed.
Returns:
The list of offsets, adjusted to point to decrypted file positions.
"""
_ = read_int32(file)
num_offsets = read_int32(file)
offsets = list(read_int32s(file, num_offsets))
for i in range(num_offsets):
offsets[i] -= offsets[i] // block_size * 4
return offsets
def write_header_offsets(file, block_size, offsets, data_size):
"""Writes the PBDF file header with adjusted offsets to point to encrypted data positions.
Args:
file: The file to write the header to.
block_size (int): The block size in bytes at which end a checksum is placed.
offsets: The list of offsets which will be adjusted to point to encrypted data positions.
data_size: The size of the file data (excluding the header) in bytes.
"""
header_size = (2 + len(offsets)) * 4 # file_size + num_offsets + offsets
file_size = header_size + data_size
num_blocks = data_size // block_size + 1 # Adjust to next complete block
file_size += num_blocks * 4 # Adjust for checksums at the end of each block.
write_int32(file, file_size)
block_data_size = block_size - 4
for i in range(offsets):
offset = offsets[i]
offset += header_size
offset += offset // block_data_size * 4
write_int32(offset, offset)
def read_string(file):
return bytes(
(c ^ ~i) & 0xFF for i, c in enumerate(file.read(read_byte(file)))
).decode("cp437") # May be cp1252, but some fan made tracks have borked strings requiring a safe CP.
|
StarcoderdataPython
|
3339278
|
from django.shortcuts import render
def showLogin(request, **kargs):
return render(request, 'Scouting2016/login.html', context=kargs)
|
StarcoderdataPython
|
1657820
|
#
# Пятнадцатый простой парсер для тестирования скорости работы.
#
# Автор: <NAME>
# Лицензия: MIT License
#
from bs4 import BeautifulSoup
from .parsers_base import get_htmls, get_html
URL = 'https://www.google.com/search?newwindow=1&hl=ru&sxsrf=ACYBGNQVOJ5xq1L-uxtvBuQXhZ7X-nxa0g%3A1581684802586&ei=Qp' \
'hGXtuzI8avrgTLrKnIAg&q=%D1%81%D0%BA%D0%BE%D0%BB%D1%8C%D0%BA%D0%BE+%D0%B2%D0%B5%D1%88%D0%B0%D1%82%D1%8C+%D0%B2+' \
'%D0%B3%D1%80%D0%B0%D0%BC%D0%BC%D0%B0%D1%85&oq=%D1%81%D0%BA%D0%BE%D0%BB%D1%8C%D0%BA%D0%BE+%D0%B2%D0%B5%D1%88%D0' \
'%B0&gs_l=psy-ab.3.0.0j0i10l9.80411.82970..84514...0.0..0.128.1529.0j13......0....1..gws-wiz.......35i39j0i131j' \
'0i67j0i20i263.MTKe0aQ0aRk'
def parse_fifteenth():
return get_pages_data(get_htmls(get_urls(URL)))
def get_urls(url):
res = []
for i in range(10):
res.append(url)
return res
def get_pages_data(htmls):
"""
Получает список HTML документов
Возвращает список словарей с спарсенными данными со страниц
:param htmls: список HTML документов
:type htmls: list of str
:return: list of str с данными спарсенными со страниц
"""
data = [] # Создаем болванку для возвращаемого значения
for html in htmls: # Прогоняем каждый HTML из списка и достаем данные из него
page_data = get_page_data(html) # Получаем список c данными в HTML документе
for pd in page_data: # Проходимся по каждому элементу списка
data.append(pd) # Добавляем словарь в список с данными на return
return data # список со всеми спаренными данными из полученного списка HTML документов
def get_page_data(html):
"""
Получает HTML документ, парсит его, и находит заголовки поисковой выдачи
:param html: HTML документ
:type html: str
:return: list of str
"""
res = []
soup = BeautifulSoup(html, 'lxml') # Создаем объект супа
raw_titles = soup.find_all('div', 'BNeawe vvjwJb AP7Wnd') # находит заголовки поисковой выдачи
for raw in raw_titles:
res.append(raw.text)
return res
if __name__ == '__main__':
# print(get_html(URL))
print(parse_first())
|
StarcoderdataPython
|
1686141
|
<filename>laaso/_subscription_ids.py
#
# laaso/_subscription_ids.py
#
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#
'''
Implement an object that can pull subscription_ids from the config
as logical names and return them as str-as-uuid.
This is done in a separate file to keep dependencies linear and simple.
'''
from ._scfg import scfg
from ._subscriptions import subscription_mapper
class SubscriptionIdCacher():
'''
Exports read-only attributes that map strings
in the scfg to effective subscription IDs.
Effective IDs are UUIDs-as-strings if the input
is a UUID-as-string, or if the input is a string
that is a known alias for a subscription.
Otherwise, the effective ID is the input ID.
This exists to do just-in-time evaluation to handle
codepaths that alter the defaults, such as
subscription_setup.py or unit tests.
'''
def __getattr__(self, name):
return self.translate(getattr(scfg, name))
@staticmethod
def translate(name):
'''
Translate the given subscription name.
This is a best-effort conversion to UUID-as-str.
Otherwise, just returns name.
'''
return subscription_mapper.effective(name)
|
StarcoderdataPython
|
3370569
|
import logging
import uuid
from dataclasses import dataclass
from datetime import datetime
from sqlalchemy import Column, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
@dataclass
class JobPosting(Base):
""" Data model of Job Postings """
id: str
external_id: str
url: str
origin_url: str
source: str
title: str
company_id: str
company_name: str
location_string: str
posted_datetime: datetime
job_description: str
created_datetime: datetime
updated_datetime: datetime
__tablename__ = "job_posting"
id = Column(String(256), primary_key=True)
external_id = Column(String(256))
url = Column(String(256))
origin_url = Column(String(2048))
source = Column(String(256))
title = Column(String(256))
company_id = Column(String(50))
company_name = Column(String(256))
location_string = Column(String(256))
posted_datetime = Column(DateTime)
job_description = Column(String(10000))
created_datetime = Column(DateTime)
updated_datetime = Column(DateTime)
def __init__(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, str):
try:
self.__setattr__(k, datetime.strptime(v, "%Y-%m-%dT%H:%M:%S%z"))
except ValueError: # This exception is expected if the string is not a ISO8601 datetime
self.__setattr__(k, v)
else:
self.__setattr__(k, v)
@classmethod
def get(cls, session, job_posting_id):
return session.query(cls).filter(cls.id == job_posting_id).one_or_none()
@classmethod
def get_by_external_id(cls, session, source, external_id):
return session.query(cls).filter(cls.source == source, cls.external_id == external_id).one_or_none()
@classmethod
def get_by_url(cls, session, url):
return session.query(cls).filter(cls.url == url).one_or_none()
@classmethod
def get_by_origin_url(cls, session, origin_url):
return session.query(cls).filter(cls.origin_url == origin_url).one_or_none()
@classmethod
def create(cls, session, **kwargs):
if 'id' not in kwargs:
kwargs['id'] = str(uuid.uuid4())
job_posting = cls(**kwargs)
job_posting.created_datetime = datetime.now()
session.add(job_posting)
return job_posting
@classmethod
def update(cls, session, job_posting_id, **kwargs):
logging.debug(f'update is called with {job_posting_id}, {kwargs}')
job_posting = session.query(cls).filter(cls.id == job_posting_id).one()
if not job_posting:
raise ValueError(u'job posting with id %s does not exist', job_posting_id)
for k, v in kwargs.items():
if type(v) == str:
setattr(job_posting, k, cls._cleansing_string(v))
else:
setattr(job_posting, k, v)
job_posting.updated_datetime = datetime.now()
session.add(job_posting)
return job_posting
@classmethod
def delete(cls, session, job_posting_id):
job_posting = session.query(cls).filter(cls.id == job_posting_id).one()
if not job_posting:
raise ValueError(u'job posting with id %s does not exist', job_posting_id)
session.delete(job_posting)
return job_posting
@classmethod
def _cleansing_string(cls, content: str) -> str:
return content.replace(u'\ufeff', '')
|
StarcoderdataPython
|
107138
|
"""This module provides classes for performing syntactic aggregation.
For example, 'Roman is programming.' and 'Roman is singing' can be
put together to create 'Roman is programming and singing.'
"""
import logging
from copy import deepcopy
from nlglib.features import NUMBER, category
from nlglib.macroplanning import Document, Paragraph
from nlglib.microplanning import *
class ElementError(Exception):
pass
class AggregationError(Exception):
pass
class SentenceAggregator:
"""Sentence aggregator looks for similarly looking syntactic structures
and aggregates them together to decrease repetition.
"""
def __init__(self, logger=None):
"""Create a new SentenceAggregator. """
self.logger = logger or logging.getLogger(__name__)
def __call__(self, msg, **kwargs):
return self.aggregate(msg, **kwargs)
def aggregate(self, msg, **kwargs):
"""Perform aggregation on the message depending on its category.
If the object has attribute 'aggregate', it will be called with args (self, **kwargs).
Otherwise, get the object's category (`msg.category`) or type name
and try to look up the attribute with the same name in `self` (dynamic dispatch).
List, set and tuple are aggregated by `element_list()`. Lastly,
if no method matches, return `msg`.
"""
cat = msg.category if hasattr(msg, 'category') else type(msg).__name__
self.logger.debug('Aggregating {0}: {1}'.format(cat, repr(msg)))
if msg is None:
return None
if hasattr(msg, 'aggregate'):
return msg.aggregate(self, **kwargs)
# support dynamic dispatching
attribute = cat.lower()
if hasattr(self, attribute):
fn = getattr(self, attribute)
return fn(msg, **kwargs)
elif isinstance(msg, (list, set, tuple)):
return self.element_list(msg, **kwargs)
else:
return msg
def document(self, doc, **kwargs):
""" Perform aggregation on a document - possibly before lexicalisation. """
self.logger.debug('Aggregating document.')
if doc is None: return None
title = self.aggregate(doc.title, **kwargs)
sections = [self.aggregate(x, **kwargs) for x in doc.sections if x is not None]
return Document(title, *sections)
def paragraph(self, para, **kwargs):
""" Perform syntactic aggregation on the constituents. """
self.logger.debug('Aggregating paragraph.')
if para is None: return None
messages = [self.aggregate(x, **kwargs) for x in para.messages if x is not None]
return Paragraph(*messages)
def element_list(self, element, marker='and', **kwargs):
self.logger.debug('Aggregating a list')
elements = []
if len(element) > 1:
elements = self.synt_aggregation([self.aggregate(x, **kwargs) for x in element],
marker=marker)
elif len(element) == 1:
elements.append(self.aggregate(element[0]))
return elements
def clause(self, clause, **kwargs):
"""Check if clause contains a coordinated element and if so, aggregate. """
self.logger.debug('Aggregating a clause:\n' + repr(clause))
subj = self.aggregate(clause.subject, **kwargs)
obj = self.aggregate(clause.complements, **kwargs)
vp = self.aggregate(clause.predicate, **kwargs)
vp.features.update(clause.predicate.features)
c = deepcopy(clause)
c.subject = subj
c.predicate = vp
c.complements = obj
self.logger.debug('...result:\n' + repr(c))
return c
def coordination(self, cc, **kwargs):
self.logger.debug('Aggregating coordination.')
coords = self.element_list(cc.coords, marker=cc.conj, **kwargs)
if len(coords) == 1:
result = coords[0]
result.features.update(cc.features)
return result
else:
return Coordination(*coords, conj=cc.conj, features=cc.features)
def add_elements(self, lhs, rhs, conj='and', **kwargs):
if lhs.category == rhs.category == category.NOUN_PHRASE:
return self.aggregate_noun_phrase(lhs, rhs, **kwargs)
e1 = deepcopy(lhs)
e2 = deepcopy(rhs)
if e1.category == category.COORDINATION:
cc = e1
if e2 not in cc.coords:
cc.coords.append(e2)
elif isinstance(e2, Coordination):
cc = e2
if e1 not in cc.coords:
cc.coords.append(e1)
else:
cc = Coordination(e1, e2, conj=conj)
cc[NUMBER] = NUMBER.plural
return cc
def aggregate_noun_phrase(self, lhs, rhs, **kwargs):
"""Aggregate two noun phrases"""
del kwargs # unused for now
if lhs.head == rhs.head:
rv = deepcopy(lhs)
rv.premodifiers.extend(rhs.premodifiers)
return rv
elif lhs.premodifiers == rhs.premodifiers:
rv = deepcopy(lhs)
rv.head = CC(deepcopy(lhs.head), deepcopy(rhs.head), features={'NUMBER': 'plural'})
return rv
else:
return CC(deepcopy(lhs), deepcopy(rhs), features={'NUMBER': 'plural'})
def try_to_aggregate(self, sent1, sent2, marker='and', **kwargs):
""" Attempt to combine two elements into one by replacing the differing
elements by a conjunction.
"""
del kwargs # unused for now
if sent1 is None or sent2 is None:
return None
replacement = Var("REPLACEMENT", "REPLACEMENT")
for e1 in sentence_iterator(sent1):
s1 = deepcopy(sent1)
s1.replace(e1, replacement) # replace one element
for e2 in sentence_iterator(sent2):
s2 = deepcopy(sent2)
s2.replace(e2, replacement) # replace one element
if s1 == s2:
self.logger.debug('Aggregating:\n\t%s\n\t%s' % (str(s1), str(s2)))
cc = self.add_elements(e1, e2, conj=marker)
s1.replace(replacement, cc)
self.logger.debug('Result of aggregation:\n%s' % repr(s1))
return s1
return None
def synt_aggregation(self, elements, max=3, marker='and', **kwargs):
""" Take a list of elements and combine elements that are synt. similar.
elements - a list of elements to combine
max - a maximum number of elements to aggregate
The algorithm relies on shared structure of the elements. If, for
example, two elements share the subject, combine the VPs into
a conjunction. Do not combine more than 'max' elements into each other.
"""
if elements is None: return
if len(elements) < 2: return elements
self.logger.debug('performing synt. aggr on:\n' + repr(elements))
aggregated = []
# first try partial syntax aggregation (e.g., clause + adj, etc)
# assume format [clause, mod, mod, clause, clause, mod, clause, mod, mod...]
new_elements = []
i = 0
while i < len(elements):
e = elements[i]
if is_clause_type(e) and i + 1 < len(elements):
try:
next = elements[i + 1]
while not is_clause_type(next):
e = e + next
i += 1
if i + 1 >= len(elements):
break
next = elements[i + 1]
except AggregationError:
pass
new_elements.append(e)
i += 1
if len(new_elements) < 2:
return new_elements
i = 0
while i < len(new_elements):
msg, increment = self._do_aggregate(new_elements, i, max, marker, **kwargs)
if isinstance(msg, Clause):
if ('PROPER', 'true') in msg.subject.features:
msg.predicate['NUMBER'] = 'SINGULAR'
aggregated.append(msg)
i += increment
return aggregated
def _do_aggregate(self, elements, i, max, marker='and', **kwargs):
del kwargs # unused for now
lhs = elements[i]
j = i + 1
increment = 1
while j < len(elements) and self._can_aggregate(lhs, max):
self.logger.debug('LHS = %s' % lhs)
rhs = elements[j]
if self._can_aggregate(rhs, max):
tmp = self.try_to_aggregate(lhs, rhs, marker)
if tmp is not None:
lhs = tmp
increment += 1
j += 1
else:
break
# cannot aggregate. can we skip it?
elif self._can_skip(elements, j):
j += 1
increment += 1
else:
break
return lhs, increment
def _can_aggregate(self, message, max):
""" Return true if this message can be aggregated.
max - maximum number of coordinates in a coordingated clause
If the message does not have a coordinated clause or if the number
of coordinates is less then 'max', return True.
"""
if message is None: return False
for part in sentence_iterator(message):
if not isinstance(part, Coordination):
continue
else:
return len(part.coords) < max
# simple sentence - no coordinated clause
return True
def _can_skip(self, elements, j):
""" Return true if this element can be skipped. """
return elements[j] is None
class DifficultyEstimator:
"""Most basic difficulty estimator that returns 0 for any structure,
resulting in always aggregating syntax trees if possible.
In general, aggregating elements will increase the reading/comprehension
difficulty of the resulting element. A subclass of the DifficultyEstimator
could assess the difficulty of an element by, for example, counting
non-stop words and the length of the sentence.
"""
threshold = 1
def estimate(self, element, context):
del element, context # unused for now
return 0
def can_aggregate(self, first, second, context):
"""We can aggregate elements if their combined estimate
is less than or equal to the threshold.
"""
return self.estimate(first, context) + self.estimate(second, context) <= self.threshold
class AmbiguityEstimator:
"""Most basic ambiguity estimator that returns 0 for any structure,
resulting in always aggregating syntax trees if possible.
In general, aggregating elements can introduce ambiguities
or even mislead readers. For example, consider:
John bought a house. Peter bought a house.
==> John and Peter bought a house.
We should try to keep ambiguities low by, for example,
adding some more information as in:
==> John and Peter each bought a house.
"""
threshold = 1
def estimate(self, element, context):
del element, context # unused for now
return 0
def can_aggregate(self, first, second, context):
"""We can aggregate elements if their combined estimate
is less than or equal to the threshold.
"""
return self.estimate(first, context) + self.estimate(second, context) <= self.threshold
|
StarcoderdataPython
|
3317352
|
import unittest
from textblob import TextBlob
def translate(text, from_l, to_l):
en_blob = TextBlob(text)
return en_blob.translate(from_lang=from_l, to=to_l)
translate(text='muy bien', from_l='es', to_l='en')
print(translate('Hello', 'en', 'es'))
class TestMethods(unittest.TestCase):
def test_get_lemmas(self):
result = TextBlob("Hola")
self.assertEqual(translate('Hello', 'en', 'es'), result)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1771879
|
from .bot import Bot
from .user import User
from vkbottle.framework.framework.branch import (
Branch,
ExitBranch,
AbstractBranch,
ClsBranch,
CoroutineBranch,
)
from vkbottle.framework.framework.handler import Handler, Middleware
from .framework import rule, swear
|
StarcoderdataPython
|
1669874
|
<reponame>vishalbelsare/diffusion-maps<filename>diffusion_maps/__init__.py
"""Diffusion maps module.
"""
from .diffusion_maps import *
from .geometric_harmonics import *
from .plot import *
from .version import *
|
StarcoderdataPython
|
3396450
|
<reponame>siddharth-143/Python
"""
Cartooning an Image
"""
# importing libraries
import cv2
import numpy as np
# reading image
img = cv2.imread("../images/1.jpeg")
# Edges
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
# Cartoonization
color = cv2.bilateralFilter(img, 9, 250, 255)
cartoon = cv2.bitwise_and(color, color, mask=edges)
cv2.imshow("Image", img)
cv2.imshow("edges", edges)
cv2.imshow("Cartoon", cartoon)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
StarcoderdataPython
|
1644607
|
# coding: utf-8
from pyccel.stdlib.parallel.mpi import mpi_init
from pyccel.stdlib.parallel.mpi import mpi_finalize
from pyccel.stdlib.parallel.mpi import mpi_comm_size
from pyccel.stdlib.parallel.mpi import mpi_comm_rank
from pyccel.stdlib.parallel.mpi import mpi_comm_world
# we need to declare these variables somehow,
# since we are calling mpi subroutines
ierr = -1
size = -1
rank = -1
mpi_init(ierr)
comm = mpi_comm_world
mpi_comm_size(comm, size, ierr)
mpi_comm_rank(comm, rank, ierr)
print('I process ', rank, ', among ', size, ' processes')
mpi_finalize(ierr)
|
StarcoderdataPython
|
3222191
|
<reponame>JensGeorg/CopraRNA
#!/usr/bin/env python
import sys
IntaRNA_result = sys.argv[1]
enrich_count = int(sys.argv[2])
#print IntaRNA_result
with open(IntaRNA_result) as file:
IntaRNA_lines = file.readlines()
#print IntaRNA_lines[1]
backgroundList = []
# go through IntaRNA output line by line and extract Entrez Gene Ids
for i in range(1,len(IntaRNA_lines)): # range omits the right boundary
# split
curr_line = IntaRNA_lines[i]
split = curr_line.split(";")
entrezID = split[37] ## edit 2.0.5.1 // changed to 37 because of new IntaRNA output
backgroundList.append(entrezID)
backgroundList = list(map(str,backgroundList))
print ("background:" + str(len(backgroundList)))
inputList = backgroundList[0:enrich_count]
print ("input:" + str(len(inputList)))
inputIds = ",".join(inputList)
#print inputIds
backgroundIds = ",".join(backgroundList)
#print backgroundIds
#sys.exit()
sys.path.append('../')
import logging
import traceback as tb
import suds.metrics as metrics
from suds import *
from suds.client import Client
from datetime import datetime
errors = 0
#setup_logging()
#logging.getLogger('suds.client').setLevel(logging.DEBUG)
url = 'https://david-d.ncifcrf.gov/webservice/services/DAVIDWebService?wsdl'
print ('url=%s' % url)
# create a service client using the wsdl.
client = Client(url)
ws = 'https://david-d.ncifcrf.gov/webservice/services/DAVIDWebService.DAVIDWebServiceHttpSoap11Endpoint/'
client.wsdl.services[0].setlocation(ws)
exit
#
# print the service (introspection)
#
print (client)
#authenticate user email
print (client.service.authenticate('<EMAIL>'))
# add enrich_count (amount) predicted
idType = 'ENTREZ_GENE_ID'
listName = 'make_up'
listType = 0
print (client.service.addList(inputIds, idType, listName, listType))
print (client.service.getDefaultCategoryNames())
thd = 1
ct = 1
print (client.service.getChartReport(thd,ct))
|
StarcoderdataPython
|
1721116
|
import os
import sys
import getopt
import numpy as np
import pandas as pd
import tensorflow as tf
from PIL import Image, ImageOps
def load_data(path):
X = np.array([]).reshape((0, 28, 28))
y = np.array([])
for root, dirs, files in os.walk(path):
for file in files:
if ".png" in file:
img = Image.open(os.path.join(root, file))
img = img.resize((28, 28))
img = img.convert("L")
img = ImageOps.equalize(img)
img = np.array(img)
img = img.reshape(1, *img.shape)
X = np.vstack((X, img))
elif ".csv" in file:
data = pd.read_csv(os.path.join(root, file), names=["labels"])
y = data["labels"].to_numpy()
return X, y
def preprocess(X, y):
X = X.reshape(X.shape[0], 28, 28, 1)
X = X / 255
y = tf.keras.utils.to_categorical(y, 10)
return X, y
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["name=", "dataset="])
except getopt.error as err:
sys.exit(2)
name = "model.h5"
root = ""
for opt, val in opts:
if opt == "--name": name = val
elif opt == "--dataset": root = val
X_test, y_test = load_data(root) if root else tf.keras.datasets.mnist.load_data()[1]
X_test, y_test = preprocess(X_test, y_test)
if X_test.shape[0] != y_test.shape[0]:
print("The number of images is not equal to the number of labels")
sys.exit(2)
if X_test.shape[1:-1] != (28, 28):
print("The dimensions of the images are not 28x28")
sys.exit(2)
model = tf.keras.models.load_model(name)
loss, acc = model.evaluate(X_test, y_test, verbose=0)
print(f"Model {name} stats:")
print(f'loss: {loss}')
print(f'acc: {acc}')
|
StarcoderdataPython
|
4822560
|
<reponame>nataliemcmullen/WikiMiney
urls = [
"pagecounts-20121101-000000.gz",
"pagecounts-20121101-010000.gz",
"pagecounts-20121101-020000.gz",
"pagecounts-20121101-030000.gz",
"pagecounts-20121101-040000.gz",
"pagecounts-20121101-050000.gz",
"pagecounts-20121101-060000.gz",
"pagecounts-20121101-070000.gz",
"pagecounts-20121101-080001.gz",
"pagecounts-20121101-090000.gz",
"pagecounts-20121101-100000.gz",
"pagecounts-20121101-110000.gz",
"pagecounts-20121101-120000.gz",
"pagecounts-20121101-130000.gz",
"pagecounts-20121101-140000.gz",
"pagecounts-20121101-150000.gz",
"pagecounts-20121101-160000.gz",
"pagecounts-20121101-170000.gz",
"pagecounts-20121101-180000.gz",
"pagecounts-20121101-190000.gz",
"pagecounts-20121101-200000.gz",
"pagecounts-20121101-210000.gz",
"pagecounts-20121101-220001.gz",
"pagecounts-20121101-230000.gz",
"pagecounts-20121102-000000.gz",
"pagecounts-20121102-010000.gz",
"pagecounts-20121102-020000.gz",
"pagecounts-20121102-030000.gz",
"pagecounts-20121102-040000.gz",
"pagecounts-20121102-050000.gz",
"pagecounts-20121102-060000.gz",
"pagecounts-20121102-070000.gz",
"pagecounts-20121102-080000.gz",
"pagecounts-20121102-090000.gz",
"pagecounts-20121102-100000.gz",
"pagecounts-20121102-110000.gz",
"pagecounts-20121102-120001.gz",
"pagecounts-20121102-130000.gz",
"pagecounts-20121102-140000.gz",
"pagecounts-20121102-150000.gz",
"pagecounts-20121102-160000.gz",
"pagecounts-20121102-170000.gz",
"pagecounts-20121102-180000.gz",
"pagecounts-20121102-190000.gz",
"pagecounts-20121102-200000.gz",
"pagecounts-20121102-210000.gz",
"pagecounts-20121102-220000.gz",
"pagecounts-20121102-230000.gz",
"pagecounts-20121103-000000.gz",
"pagecounts-20121103-010001.gz",
"pagecounts-20121103-020000.gz",
"pagecounts-20121103-030000.gz",
"pagecounts-20121103-040000.gz",
"pagecounts-20121103-050000.gz",
"pagecounts-20121103-060000.gz",
"pagecounts-20121103-070000.gz",
"pagecounts-20121103-080000.gz",
"pagecounts-20121103-090000.gz",
"pagecounts-20121103-100000.gz",
"pagecounts-20121103-110000.gz",
"pagecounts-20121103-120000.gz",
"pagecounts-20121103-130000.gz",
"pagecounts-20121103-140001.gz",
"pagecounts-20121103-150000.gz",
"pagecounts-20121103-160000.gz",
"pagecounts-20121103-170000.gz",
"pagecounts-20121103-180000.gz",
"pagecounts-20121103-190000.gz",
"pagecounts-20121103-200000.gz",
"pagecounts-20121103-210000.gz",
"pagecounts-20121103-220000.gz",
"pagecounts-20121103-230000.gz",
"pagecounts-20121104-000000.gz",
"pagecounts-20121104-010000.gz",
"pagecounts-20121104-020000.gz",
"pagecounts-20121104-030001.gz",
"pagecounts-20121104-040000.gz",
"pagecounts-20121104-050000.gz",
"pagecounts-20121104-060000.gz",
"pagecounts-20121104-070000.gz",
"pagecounts-20121104-080000.gz",
"pagecounts-20121104-090000.gz",
"pagecounts-20121104-100000.gz",
"pagecounts-20121104-110000.gz",
"pagecounts-20121104-120000.gz",
"pagecounts-20121104-130000.gz",
"pagecounts-20121104-140000.gz",
"pagecounts-20121104-150000.gz",
"pagecounts-20121104-160001.gz",
"pagecounts-20121104-170000.gz",
"pagecounts-20121104-180000.gz",
"pagecounts-20121104-190000.gz",
"pagecounts-20121104-200000.gz",
"pagecounts-20121104-210000.gz",
"pagecounts-20121104-220000.gz",
"pagecounts-20121104-230000.gz",
"pagecounts-20121105-000000.gz",
"pagecounts-20121105-010000.gz",
"pagecounts-20121105-020000.gz",
"pagecounts-20121105-030000.gz",
"pagecounts-20121105-040000.gz",
"pagecounts-20121105-050000.gz",
"pagecounts-20121105-060001.gz",
"pagecounts-20121105-070000.gz",
"pagecounts-20121105-080000.gz",
"pagecounts-20121105-090000.gz",
"pagecounts-20121105-100000.gz",
"pagecounts-20121105-110000.gz",
"pagecounts-20121105-120000.gz",
"pagecounts-20121105-130000.gz",
"pagecounts-20121105-140000.gz",
"pagecounts-20121105-150000.gz",
"pagecounts-20121105-160000.gz",
"pagecounts-20121105-170000.gz",
"pagecounts-20121105-180000.gz",
"pagecounts-20121105-190001.gz",
"pagecounts-20121105-200000.gz",
"pagecounts-20121105-210000.gz",
"pagecounts-20121105-220000.gz",
"pagecounts-20121105-230000.gz",
"pagecounts-20121106-000000.gz",
"pagecounts-20121106-010000.gz",
"pagecounts-20121106-020000.gz",
"pagecounts-20121106-030000.gz",
"pagecounts-20121106-040000.gz",
"pagecounts-20121106-050000.gz",
"pagecounts-20121106-060000.gz",
"pagecounts-20121106-070001.gz",
"pagecounts-20121106-080000.gz",
"pagecounts-20121106-090000.gz",
"pagecounts-20121106-100000.gz",
"pagecounts-20121106-110000.gz",
"pagecounts-20121106-120000.gz",
"pagecounts-20121106-130000.gz",
"pagecounts-20121106-140000.gz",
"pagecounts-20121106-150000.gz",
"pagecounts-20121106-160000.gz",
"pagecounts-20121106-170000.gz",
"pagecounts-20121106-180000.gz",
"pagecounts-20121106-190001.gz",
"pagecounts-20121106-200000.gz",
"pagecounts-20121106-210000.gz",
"pagecounts-20121106-220000.gz",
"pagecounts-20121106-230000.gz",
"pagecounts-20121107-000000.gz",
"pagecounts-20121107-010000.gz",
"pagecounts-20121107-020000.gz",
"pagecounts-20121107-030000.gz",
"pagecounts-20121107-040000.gz",
"pagecounts-20121107-050000.gz",
"pagecounts-20121107-060000.gz",
"pagecounts-20121107-070000.gz",
"pagecounts-20121107-080001.gz",
"pagecounts-20121107-090000.gz",
"pagecounts-20121107-100000.gz",
"pagecounts-20121107-110000.gz",
"pagecounts-20121107-120000.gz",
"pagecounts-20121107-130000.gz",
"pagecounts-20121107-140000.gz",
"pagecounts-20121107-150000.gz",
"pagecounts-20121107-160000.gz",
"pagecounts-20121107-170000.gz",
"pagecounts-20121107-180000.gz",
"pagecounts-20121107-190000.gz",
"pagecounts-20121107-200000.gz",
"pagecounts-20121107-210001.gz",
"pagecounts-20121107-220000.gz",
"pagecounts-20121107-230000.gz",
"pagecounts-20121108-000000.gz",
"pagecounts-20121108-010000.gz",
"pagecounts-20121108-020000.gz",
"pagecounts-20121108-030000.gz",
"pagecounts-20121108-040000.gz",
"pagecounts-20121108-050000.gz",
"pagecounts-20121108-060000.gz",
"pagecounts-20121108-070000.gz",
"pagecounts-20121108-080000.gz",
"pagecounts-20121108-090000.gz",
"pagecounts-20121108-100001.gz",
"pagecounts-20121108-110000.gz",
"pagecounts-20121108-120000.gz",
"pagecounts-20121108-130000.gz",
"pagecounts-20121108-140000.gz",
"pagecounts-20121108-150000.gz",
"pagecounts-20121108-160000.gz",
"pagecounts-20121108-170000.gz",
"pagecounts-20121108-180000.gz",
"pagecounts-20121108-190000.gz",
"pagecounts-20121108-200000.gz",
"pagecounts-20121108-210000.gz",
"pagecounts-20121108-220000.gz",
"pagecounts-20121108-230001.gz",
"pagecounts-20121109-000000.gz",
"pagecounts-20121109-010000.gz",
"pagecounts-20121109-020000.gz",
"pagecounts-20121109-030000.gz",
"pagecounts-20121109-040000.gz",
"pagecounts-20121109-050000.gz",
"pagecounts-20121109-060000.gz",
"pagecounts-20121109-070000.gz",
"pagecounts-20121109-080000.gz",
"pagecounts-20121109-090000.gz",
"pagecounts-20121109-100000.gz",
"pagecounts-20121109-110000.gz",
"pagecounts-20121109-120001.gz",
"pagecounts-20121109-130000.gz",
"pagecounts-20121109-140000.gz",
"pagecounts-20121109-150000.gz",
"pagecounts-20121109-160000.gz",
"pagecounts-20121109-170000.gz",
"pagecounts-20121109-180000.gz",
"pagecounts-20121109-190000.gz",
"pagecounts-20121109-200000.gz",
"pagecounts-20121109-210000.gz",
"pagecounts-20121109-220000.gz",
"pagecounts-20121109-230000.gz",
"pagecounts-20121110-000000.gz",
"pagecounts-20121110-010001.gz",
"pagecounts-20121110-020000.gz",
"pagecounts-20121110-030000.gz",
"pagecounts-20121110-040000.gz",
"pagecounts-20121110-050000.gz",
"pagecounts-20121110-060000.gz",
"pagecounts-20121110-070000.gz",
"pagecounts-20121110-080000.gz",
"pagecounts-20121110-090000.gz",
"pagecounts-20121110-100000.gz",
"pagecounts-20121110-110000.gz",
"pagecounts-20121110-120000.gz",
"pagecounts-20121110-130000.gz",
"pagecounts-20121110-140000.gz",
"pagecounts-20121110-150001.gz",
"pagecounts-20121110-160000.gz",
"pagecounts-20121110-170000.gz",
"pagecounts-20121110-180000.gz",
"pagecounts-20121110-190000.gz",
"pagecounts-20121110-200000.gz",
"pagecounts-20121110-210000.gz",
"pagecounts-20121110-220000.gz",
"pagecounts-20121110-230000.gz",
"pagecounts-20121111-000000.gz",
"pagecounts-20121111-010000.gz",
"pagecounts-20121111-020000.gz",
"pagecounts-20121111-030000.gz",
"pagecounts-20121111-040001.gz",
"pagecounts-20121111-050000.gz",
"pagecounts-20121111-060000.gz",
"pagecounts-20121111-070000.gz",
"pagecounts-20121111-080000.gz",
"pagecounts-20121111-090000.gz",
"pagecounts-20121111-100000.gz",
"pagecounts-20121111-110000.gz",
"pagecounts-20121111-120000.gz",
"pagecounts-20121111-130000.gz",
"pagecounts-20121111-140000.gz",
"pagecounts-20121111-150000.gz",
"pagecounts-20121111-160000.gz",
"pagecounts-20121111-170001.gz",
"pagecounts-20121111-180000.gz",
"pagecounts-20121111-190000.gz",
"pagecounts-20121111-200000.gz",
"pagecounts-20121111-210000.gz",
"pagecounts-20121111-220000.gz",
"pagecounts-20121111-230000.gz",
"pagecounts-20121112-000000.gz",
"pagecounts-20121112-010000.gz",
"pagecounts-20121112-020000.gz",
"pagecounts-20121112-030000.gz",
"pagecounts-20121112-040000.gz",
"pagecounts-20121112-050001.gz",
"pagecounts-20121112-060000.gz",
"pagecounts-20121112-070000.gz",
"pagecounts-20121112-080000.gz",
"pagecounts-20121112-090000.gz",
"pagecounts-20121112-100000.gz",
"pagecounts-20121112-110000.gz",
"pagecounts-20121112-120000.gz",
"pagecounts-20121112-130000.gz",
"pagecounts-20121112-140000.gz",
"pagecounts-20121112-150000.gz",
"pagecounts-20121112-160000.gz",
"pagecounts-20121112-170000.gz",
"pagecounts-20121112-180001.gz",
"pagecounts-20121112-190000.gz",
"pagecounts-20121112-200000.gz",
"pagecounts-20121112-210000.gz",
"pagecounts-20121112-220000.gz",
"pagecounts-20121112-230000.gz",
"pagecounts-20121113-000000.gz",
"pagecounts-20121113-010000.gz",
"pagecounts-20121113-020000.gz",
"pagecounts-20121113-030000.gz",
"pagecounts-20121113-040000.gz",
"pagecounts-20121113-050000.gz",
"pagecounts-20121113-060001.gz",
"pagecounts-20121113-070000.gz",
"pagecounts-20121113-080000.gz",
"pagecounts-20121113-090000.gz",
"pagecounts-20121113-100000.gz",
"pagecounts-20121113-110000.gz",
"pagecounts-20121113-120000.gz",
"pagecounts-20121113-130000.gz",
"pagecounts-20121113-140000.gz",
"pagecounts-20121113-150000.gz",
"pagecounts-20121113-160000.gz",
"pagecounts-20121113-170000.gz",
"pagecounts-20121113-180000.gz",
"pagecounts-20121113-190001.gz",
"pagecounts-20121113-200000.gz",
"pagecounts-20121113-210000.gz",
"pagecounts-20121113-220000.gz",
"pagecounts-20121113-230000.gz",
"pagecounts-20121114-000000.gz",
"pagecounts-20121114-010000.gz",
"pagecounts-20121114-020000.gz",
"pagecounts-20121114-030000.gz",
"pagecounts-20121114-040000.gz",
"pagecounts-20121114-050000.gz",
"pagecounts-20121114-060000.gz",
"pagecounts-20121114-070000.gz",
"pagecounts-20121114-080001.gz",
"pagecounts-20121114-090000.gz",
"pagecounts-20121114-100000.gz",
"pagecounts-20121114-110000.gz",
"pagecounts-20121114-120000.gz",
"pagecounts-20121114-130000.gz",
"pagecounts-20121114-140000.gz",
"pagecounts-20121114-150000.gz",
"pagecounts-20121114-160000.gz",
"pagecounts-20121114-170000.gz",
"pagecounts-20121114-180000.gz",
"pagecounts-20121114-190000.gz",
"pagecounts-20121114-200001.gz",
"pagecounts-20121114-210000.gz",
"pagecounts-20121114-220000.gz",
"pagecounts-20121114-230000.gz",
"pagecounts-20121115-000000.gz",
"pagecounts-20121115-010000.gz",
"pagecounts-20121115-020000.gz",
"pagecounts-20121115-030000.gz",
"pagecounts-20121115-040000.gz",
"pagecounts-20121115-050000.gz",
"pagecounts-20121115-060000.gz",
"pagecounts-20121115-070000.gz",
"pagecounts-20121115-080000.gz",
"pagecounts-20121115-090000.gz",
"pagecounts-20121115-100001.gz",
"pagecounts-20121115-110000.gz",
"pagecounts-20121115-120000.gz",
"pagecounts-20121115-130000.gz",
"pagecounts-20121115-140000.gz",
"pagecounts-20121115-150000.gz",
"pagecounts-20121115-160000.gz",
"pagecounts-20121115-170000.gz",
"pagecounts-20121115-180000.gz",
"pagecounts-20121115-190000.gz",
"pagecounts-20121115-200000.gz",
"pagecounts-20121115-210001.gz",
"pagecounts-20121115-220000.gz",
"pagecounts-20121115-230000.gz",
"pagecounts-20121116-000000.gz",
"pagecounts-20121116-010000.gz",
"pagecounts-20121116-020000.gz",
"pagecounts-20121116-030000.gz",
"pagecounts-20121116-040000.gz",
"pagecounts-20121116-050000.gz",
"pagecounts-20121116-060000.gz",
"pagecounts-20121116-070000.gz",
"pagecounts-20121116-080000.gz",
"pagecounts-20121116-090000.gz",
"pagecounts-20121116-100001.gz",
"pagecounts-20121116-110000.gz",
"pagecounts-20121116-120000.gz",
"pagecounts-20121116-130000.gz",
"pagecounts-20121116-140000.gz",
"pagecounts-20121116-150000.gz",
"pagecounts-20121116-160000.gz",
"pagecounts-20121116-170000.gz",
"pagecounts-20121116-180000.gz",
"pagecounts-20121116-190000.gz",
"pagecounts-20121116-200000.gz",
"pagecounts-20121116-210000.gz",
"pagecounts-20121116-220000.gz",
"pagecounts-20121116-230001.gz",
"pagecounts-20121117-000000.gz",
"pagecounts-20121117-010000.gz",
"pagecounts-20121117-020000.gz",
"pagecounts-20121117-030000.gz",
"pagecounts-20121117-040000.gz",
"pagecounts-20121117-050000.gz",
"pagecounts-20121117-060000.gz",
"pagecounts-20121117-070000.gz",
"pagecounts-20121117-080000.gz",
"pagecounts-20121117-090000.gz",
"pagecounts-20121117-100000.gz",
"pagecounts-20121117-110000.gz",
"pagecounts-20121117-120000.gz",
"pagecounts-20121117-130001.gz",
"pagecounts-20121117-140000.gz",
"pagecounts-20121117-150000.gz",
"pagecounts-20121117-160000.gz",
"pagecounts-20121117-170000.gz",
"pagecounts-20121117-180000.gz",
"pagecounts-20121117-190000.gz",
"pagecounts-20121117-200000.gz",
"pagecounts-20121117-210000.gz",
"pagecounts-20121117-220000.gz",
"pagecounts-20121117-230000.gz",
"pagecounts-20121118-000000.gz",
"pagecounts-20121118-010000.gz",
"pagecounts-20121118-020000.gz",
"pagecounts-20121118-030001.gz",
"pagecounts-20121118-040000.gz",
"pagecounts-20121118-050000.gz",
"pagecounts-20121118-060000.gz",
"pagecounts-20121118-070000.gz",
"pagecounts-20121118-080000.gz",
"pagecounts-20121118-090000.gz",
"pagecounts-20121118-100000.gz",
"pagecounts-20121118-110000.gz",
"pagecounts-20121118-120000.gz",
"pagecounts-20121118-130000.gz",
"pagecounts-20121118-140000.gz",
"pagecounts-20121118-150001.gz",
"pagecounts-20121118-160000.gz",
"pagecounts-20121118-170000.gz",
"pagecounts-20121118-180000.gz",
"pagecounts-20121118-190000.gz",
"pagecounts-20121118-200000.gz",
"pagecounts-20121118-210000.gz",
"pagecounts-20121118-220000.gz",
"pagecounts-20121118-230000.gz",
"pagecounts-20121119-000000.gz",
"pagecounts-20121119-010000.gz",
"pagecounts-20121119-020001.gz",
"pagecounts-20121119-030000.gz",
"pagecounts-20121119-040000.gz",
"pagecounts-20121119-050000.gz",
"pagecounts-20121119-060000.gz",
"pagecounts-20121119-070000.gz",
"pagecounts-20121119-080000.gz",
"pagecounts-20121119-090000.gz",
"pagecounts-20121119-100000.gz",
"pagecounts-20121119-110000.gz",
"pagecounts-20121119-120000.gz",
"pagecounts-20121119-130000.gz",
"pagecounts-20121119-140000.gz",
"pagecounts-20121119-150001.gz",
"pagecounts-20121119-160000.gz",
"pagecounts-20121119-170000.gz",
"pagecounts-20121119-180000.gz",
"pagecounts-20121119-190000.gz",
"pagecounts-20121119-200000.gz",
"pagecounts-20121119-210000.gz",
"pagecounts-20121119-220000.gz",
"pagecounts-20121119-230000.gz",
"pagecounts-20121120-000000.gz",
"pagecounts-20121120-010000.gz",
"pagecounts-20121120-020000.gz",
"pagecounts-20121120-030001.gz",
"pagecounts-20121120-040000.gz",
"pagecounts-20121120-050000.gz",
"pagecounts-20121120-060000.gz",
"pagecounts-20121120-070000.gz",
"pagecounts-20121120-080000.gz",
"pagecounts-20121120-090000.gz",
"pagecounts-20121120-100000.gz",
"pagecounts-20121120-110000.gz",
"pagecounts-20121120-120000.gz",
"pagecounts-20121120-130000.gz",
"pagecounts-20121120-140000.gz",
"pagecounts-20121120-150000.gz",
"pagecounts-20121120-160001.gz",
"pagecounts-20121120-170000.gz",
"pagecounts-20121120-180000.gz",
"pagecounts-20121120-190000.gz",
"pagecounts-20121120-200000.gz",
"pagecounts-20121120-210000.gz",
"pagecounts-20121120-220000.gz",
"pagecounts-20121120-230000.gz",
"pagecounts-20121121-000000.gz",
"pagecounts-20121121-010000.gz",
"pagecounts-20121121-020000.gz",
"pagecounts-20121121-030000.gz",
"pagecounts-20121121-040000.gz",
"pagecounts-20121121-050001.gz",
"pagecounts-20121121-060000.gz",
"pagecounts-20121121-070000.gz",
"pagecounts-20121121-080000.gz",
"pagecounts-20121121-090000.gz",
"pagecounts-20121121-100000.gz",
"pagecounts-20121121-110000.gz",
"pagecounts-20121121-120000.gz",
"pagecounts-20121121-130000.gz",
"pagecounts-20121121-140000.gz",
"pagecounts-20121121-150000.gz",
"pagecounts-20121121-160000.gz",
"pagecounts-20121121-170001.gz",
"pagecounts-20121121-180000.gz",
"pagecounts-20121121-190000.gz",
"pagecounts-20121121-200000.gz",
"pagecounts-20121121-210000.gz",
"pagecounts-20121121-220000.gz",
"pagecounts-20121121-230000.gz",
"pagecounts-20121122-000000.gz",
"pagecounts-20121122-010000.gz",
"pagecounts-20121122-020000.gz",
"pagecounts-20121122-030000.gz",
"pagecounts-20121122-040000.gz",
"pagecounts-20121122-050000.gz",
"pagecounts-20121122-060001.gz",
"pagecounts-20121122-070000.gz",
"pagecounts-20121122-080000.gz",
"pagecounts-20121122-090000.gz",
"pagecounts-20121122-100000.gz",
"pagecounts-20121122-110000.gz",
"pagecounts-20121122-120000.gz",
"pagecounts-20121122-130000.gz",
"pagecounts-20121122-140000.gz",
"pagecounts-20121122-150000.gz",
"pagecounts-20121122-160000.gz",
"pagecounts-20121122-170000.gz",
"pagecounts-20121122-180001.gz",
"pagecounts-20121122-190000.gz",
"pagecounts-20121122-200000.gz",
"pagecounts-20121122-210000.gz",
"pagecounts-20121122-220000.gz",
"pagecounts-20121122-230000.gz",
"pagecounts-20121123-000000.gz",
"pagecounts-20121123-010000.gz",
"pagecounts-20121123-020000.gz",
"pagecounts-20121123-030000.gz",
"pagecounts-20121123-040000.gz",
"pagecounts-20121123-050000.gz",
"pagecounts-20121123-060000.gz",
"pagecounts-20121123-070001.gz",
"pagecounts-20121123-080000.gz",
"pagecounts-20121123-090000.gz",
"pagecounts-20121123-100000.gz",
"pagecounts-20121123-110000.gz",
"pagecounts-20121123-120000.gz",
"pagecounts-20121123-130000.gz",
"pagecounts-20121123-140000.gz",
"pagecounts-20121123-150000.gz",
"pagecounts-20121123-160000.gz",
"pagecounts-20121123-170000.gz",
"pagecounts-20121123-180000.gz",
"pagecounts-20121123-190000.gz",
"pagecounts-20121123-200001.gz",
"pagecounts-20121123-210000.gz",
"pagecounts-20121123-220000.gz",
"pagecounts-20121123-230000.gz",
"pagecounts-20121124-000000.gz",
"pagecounts-20121124-010000.gz",
"pagecounts-20121124-020000.gz",
"pagecounts-20121124-030000.gz",
"pagecounts-20121124-040000.gz",
"pagecounts-20121124-050000.gz",
"pagecounts-20121124-060000.gz",
"pagecounts-20121124-070000.gz",
"pagecounts-20121124-080000.gz",
"pagecounts-20121124-090001.gz",
"pagecounts-20121124-100000.gz",
"pagecounts-20121124-110000.gz",
"pagecounts-20121124-120000.gz",
"pagecounts-20121124-130000.gz",
"pagecounts-20121124-140000.gz",
"pagecounts-20121124-150000.gz",
"pagecounts-20121124-160000.gz",
"pagecounts-20121124-170000.gz",
"pagecounts-20121124-180000.gz",
"pagecounts-20121124-190000.gz",
"pagecounts-20121124-200000.gz",
"pagecounts-20121124-210001.gz",
"pagecounts-20121124-220000.gz",
"pagecounts-20121124-230000.gz",
"pagecounts-20121125-000000.gz",
"pagecounts-20121125-010000.gz",
"pagecounts-20121125-020000.gz",
"pagecounts-20121125-030000.gz",
"pagecounts-20121125-040000.gz",
"pagecounts-20121125-050000.gz",
"pagecounts-20121125-060000.gz",
"pagecounts-20121125-070000.gz",
"pagecounts-20121125-080000.gz",
"pagecounts-20121125-090000.gz",
"pagecounts-20121125-100001.gz",
"pagecounts-20121125-110000.gz",
"pagecounts-20121125-120000.gz",
"pagecounts-20121125-130000.gz",
"pagecounts-20121125-140000.gz",
"pagecounts-20121125-150000.gz",
"pagecounts-20121125-160000.gz",
"pagecounts-20121125-170000.gz",
"pagecounts-20121125-180000.gz",
"pagecounts-20121125-190000.gz",
"pagecounts-20121125-200000.gz",
"pagecounts-20121125-210000.gz",
"pagecounts-20121125-220001.gz",
"pagecounts-20121125-230000.gz",
"pagecounts-20121126-000000.gz",
"pagecounts-20121126-010000.gz",
"pagecounts-20121126-020000.gz",
"pagecounts-20121126-030000.gz",
"pagecounts-20121126-040000.gz",
"pagecounts-20121126-050000.gz",
"pagecounts-20121126-060000.gz",
"pagecounts-20121126-070000.gz",
"pagecounts-20121126-080000.gz",
"pagecounts-20121126-090000.gz",
"pagecounts-20121126-100000.gz",
"pagecounts-20121126-110001.gz",
"pagecounts-20121126-120000.gz",
"pagecounts-20121126-130000.gz",
"pagecounts-20121126-140000.gz",
"pagecounts-20121126-150000.gz",
"pagecounts-20121126-160000.gz",
"pagecounts-20121126-170000.gz",
"pagecounts-20121126-180000.gz",
"pagecounts-20121126-190000.gz",
"pagecounts-20121126-200000.gz",
"pagecounts-20121126-210000.gz",
"pagecounts-20121126-220001.gz",
"pagecounts-20121126-230000.gz",
"pagecounts-20121127-000000.gz",
"pagecounts-20121127-010000.gz",
"pagecounts-20121127-020000.gz",
"pagecounts-20121127-030000.gz",
"pagecounts-20121127-040000.gz",
"pagecounts-20121127-050000.gz",
"pagecounts-20121127-060000.gz",
"pagecounts-20121127-070000.gz",
"pagecounts-20121127-080000.gz",
"pagecounts-20121127-090000.gz",
"pagecounts-20121127-100000.gz",
"pagecounts-20121127-110001.gz",
"pagecounts-20121127-120000.gz",
"pagecounts-20121127-130000.gz",
"pagecounts-20121127-140000.gz",
"pagecounts-20121127-150000.gz",
"pagecounts-20121127-160000.gz",
"pagecounts-20121127-170000.gz",
"pagecounts-20121127-180000.gz",
"pagecounts-20121127-190000.gz",
"pagecounts-20121127-200000.gz",
"pagecounts-20121127-210000.gz",
"pagecounts-20121127-220000.gz",
"pagecounts-20121127-230000.gz",
"pagecounts-20121128-000001.gz",
"pagecounts-20121128-010000.gz",
"pagecounts-20121128-020000.gz",
"pagecounts-20121128-030000.gz",
"pagecounts-20121128-040000.gz",
"pagecounts-20121128-050000.gz",
"pagecounts-20121128-060000.gz",
"pagecounts-20121128-070000.gz",
"pagecounts-20121128-080000.gz",
"pagecounts-20121128-090000.gz",
"pagecounts-20121128-100000.gz",
"pagecounts-20121128-110000.gz",
"pagecounts-20121128-120001.gz",
"pagecounts-20121128-130000.gz",
"pagecounts-20121128-140000.gz",
"pagecounts-20121128-150000.gz",
"pagecounts-20121128-160000.gz",
"pagecounts-20121128-170000.gz",
"pagecounts-20121128-180000.gz",
"pagecounts-20121128-190000.gz",
"pagecounts-20121128-200000.gz",
"pagecounts-20121128-210000.gz",
"pagecounts-20121128-220000.gz",
"pagecounts-20121128-230000.gz",
"pagecounts-20121129-000001.gz",
"pagecounts-20121129-010000.gz",
"pagecounts-20121129-020000.gz",
"pagecounts-20121129-030000.gz",
"pagecounts-20121129-040000.gz",
"pagecounts-20121129-050000.gz",
"pagecounts-20121129-060000.gz",
"pagecounts-20121129-070000.gz",
"pagecounts-20121129-080000.gz",
"pagecounts-20121129-090000.gz",
"pagecounts-20121129-100000.gz",
"pagecounts-20121129-110000.gz",
"pagecounts-20121129-120000.gz",
"pagecounts-20121129-130001.gz",
"pagecounts-20121129-140000.gz",
"pagecounts-20121129-150000.gz",
"pagecounts-20121129-160000.gz",
"pagecounts-20121129-170000.gz",
"pagecounts-20121129-180000.gz",
"pagecounts-20121129-190000.gz",
"pagecounts-20121129-200000.gz",
"pagecounts-20121129-210000.gz",
"pagecounts-20121129-220000.gz",
"pagecounts-20121129-230000.gz",
"pagecounts-20121130-000000.gz",
"pagecounts-20121130-010001.gz",
"pagecounts-20121130-020000.gz",
"pagecounts-20121130-030000.gz",
"pagecounts-20121130-040000.gz",
"pagecounts-20121130-050000.gz",
"pagecounts-20121130-060000.gz",
"pagecounts-20121130-070000.gz",
"pagecounts-20121130-080000.gz",
"pagecounts-20121130-090000.gz",
"pagecounts-20121130-100000.gz",
"pagecounts-20121130-110000.gz",
"pagecounts-20121130-120000.gz",
"pagecounts-20121130-130000.gz",
"pagecounts-20121130-140001.gz",
"pagecounts-20121130-150000.gz",
"pagecounts-20121130-160000.gz",
"pagecounts-20121130-170000.gz",
"pagecounts-20121130-180000.gz",
"pagecounts-20121130-190000.gz",
"pagecounts-20121130-200000.gz",
"pagecounts-20121130-210000.gz",
"pagecounts-20121130-220000.gz",
"pagecounts-20121130-230000.gz",
]
import os
base = "http://dumps.wikimedia.org/other/pagecounts-raw/"
tail = "2012/2012-11/"
i = 0
for url in urls:
i = i + 1
one = "en-" + url[:-3]
two = url[:-3]
three = url
if not (os.path.isfile(one) or os.path.isfile(two) or os.path.isfile(three)):
#os.system("curl --silent -O %s >> /dev/null" % (base + tail + url))
os.system("curl -O %s" % (base + tail + url))
print "%d completeted of %d total. %d remaining" % (i, len(urls), len(urls) - i)
|
StarcoderdataPython
|
1760805
|
<reponame>jonathanslenders/edgedb
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from edgedb.lang.edgeql import ast as qlast
from . import delta as sd
from . import derivable
from . import inheriting
from . import name as sn
from . import named
from . import objects as so
from . import referencing
class Action(inheriting.InheritingObject):
_type = 'action'
class ActionSet(so.ObjectSet, type=Action):
pass
class Event(inheriting.InheritingObject):
_type = 'event'
class Policy(derivable.DerivableObject):
_type = 'policy'
# Policy subject, i.e object in the schema to which
# this policy is applied
subject = so.Field(named.NamedObject, compcoef=0.714)
# Event
event = so.Field(Event, compcoef=0.429)
# Actions in response to an event
actions = so.Field(ActionSet, ActionSet, coerce=True, compcoef=0.86)
def init_derived(self, schema, source, *, replace_original=None, **kwargs):
policy = super().init_derived(schema, source, **kwargs)
policy.subject = source
return policy
class InternalPolicySubject(referencing.ReferencingObject):
policy = referencing.RefDict(ref_cls=Policy, compcoef=0.857)
def add_policy(self, policy, replace=False):
self.add_classref('policy', policy, replace=replace)
def del_policy(self, policy_name, schema):
self.del_classref('policy', policy_name, schema)
class PolicySubject:
def get_policy(self, schema, policy_cls, policy_key):
return schema._policy_schema.get(policy_cls, policy_key)
def materialize_policies(self, schema):
self._merge_policies(schema, self.bases)
def _merge_policies(self, schema, bases, force_first=False):
seen = set()
for base in bases:
for event, policies in schema._policy_schema.iter(base):
self_policies = schema._policy_schema.get(self, event)
if (self_policies is None or
(force_first and (self, event) not in seen)):
schema._policy_schema.add(policies[-1], self)
seen.add((self, event))
class PolicySchema:
def __init__(self):
self._index = {}
def add(self, policy, subject=None):
if subject is None:
subject = policy.subject
event = policy.event
try:
subject_policies = self._index[subject]
except KeyError:
subject_policies = self._index[subject] = {}
try:
policy_stack = subject_policies[event]
except KeyError:
policy_stack = subject_policies[event] = []
policy_stack.append(policy)
def delete(self, policy):
subject_policies = self._index[policy.subject]
policy_stack = subject_policies[policy.event]
policy_stack.remove(policy)
def get_all(self, subject, event):
try:
subject_policies = self._index[subject]
except KeyError:
return None
else:
return subject_policies.get(event)
def get(self, subject, event):
stack = self.get_all(subject, event)
if stack:
return stack[-1]
def iter(self, subject):
try:
subject_policies = self._index[subject]
except KeyError:
return ()
else:
return subject_policies.items()
class ActionCommandContext(sd.ObjectCommandContext):
pass
class ActionCommand(named.NamedObjectCommand, schema_metaclass=Action,
context_class=ActionCommandContext):
pass
class EventCommandContext(sd.ObjectCommandContext):
pass
class EventCommand(named.NamedObjectCommand, schema_metaclass=Event,
context_class=EventCommandContext):
pass
class PolicyCommandContext(sd.ObjectCommandContext):
pass
class InternalPolicySubjectCommandContext:
# policy mixin
pass
class CreateAction(named.CreateNamedObject, ActionCommand):
astnode = qlast.CreateAction
class RenameAction(named.RenameNamedObject, ActionCommand):
pass
class AlterAction(named.AlterNamedObject, ActionCommand):
astnode = qlast.AlterAction
class DeleteAction(named.DeleteNamedObject, ActionCommand):
astnode = qlast.DropAction
class CreateEvent(inheriting.CreateInheritingObject, EventCommand):
astnode = qlast.CreateEvent
class RenameEvent(named.RenameNamedObject, EventCommand):
pass
class RebaseEvent(inheriting.RebaseNamedObject, EventCommand):
pass
class AlterEvent(inheriting.AlterInheritingObject, EventCommand):
astnode = qlast.AlterEvent
class DeleteEvent(inheriting.DeleteInheritingObject, EventCommand):
astnode = qlast.DropEvent
class PolicyCommand(
referencing.ReferencedObjectCommand,
schema_metaclass=Policy,
context_class=PolicyCommandContext,
referrer_context_class=InternalPolicySubjectCommandContext):
@classmethod
def _classname_from_ast(cls, astnode, context, schema):
parent_ctx = context.get(sd.CommandContextToken)
subject_name = parent_ctx.op.classname
event_name = sn.Name(module=astnode.event.module,
name=astnode.event.name)
pnn = Policy.get_specialized_name(
event_name, subject_name
)
pn = sn.Name(name=pnn, module=subject_name.module)
return pn
def _apply_fields_ast(self, context, node):
super()._apply_fields_ast(context, node)
if node.event is None:
event_name = Policy.get_shortname(self.classname)
node.event = qlast.ObjectRef(
name=event_name.name,
module=event_name.module
)
def _apply_field_ast(self, context, node, op):
if op.property == 'name':
pass
elif op.property == 'event':
node.event = qlast.ObjectRef(
name=op.new_value.classname.name,
module=op.new_value.classname.module
)
elif op.property == 'actions':
node.actions = [qlast.ObjectRef(
name=a.classname.name,
module=a.classname.module
) for a in op.new_value]
else:
pass
class CreatePolicy(PolicyCommand, named.CreateNamedObject):
astnode = qlast.CreateLocalPolicy
@classmethod
def _cmd_tree_from_ast(cls, astnode, context, schema):
cmd = super()._cmd_tree_from_ast(astnode, context, schema)
parent_ctx = context.get(sd.CommandContextToken)
subject_name = parent_ctx.op.classname
cmd.update((
sd.AlterObjectProperty(
property='subject',
new_value=so.ObjectRef(classname=subject_name)
),
sd.AlterObjectProperty(
property='event',
new_value=so.ObjectRef(
classname=sn.Name(
module=astnode.event.module,
name=astnode.event.name
)
)
),
sd.AlterObjectProperty(
property='actions',
new_value=so.ObjectList(
so.ObjectRef(
classname=sn.Name(
module=action.module,
name=action.name
)
)
for action in astnode.actions
)
)
))
return cmd
class RenamePolicy(PolicyCommand, named.RenameNamedObject):
pass
class AlterPolicy(PolicyCommand, named.AlterNamedObject):
astnode = qlast.AlterLocalPolicy
@classmethod
def _cmd_tree_from_ast(cls, astnode, context, schema):
cmd = super()._cmd_tree_from_ast(astnode, context, schema)
cmd.update((
sd.AlterObjectProperty(
property='actions',
new_value=so.ObjectList(
so.ObjectRef(
classname=sn.Name(
module=action.module,
name=action.name
)
)
for action in astnode.actions
)
),
))
return cmd
class DeletePolicy(PolicyCommand, named.DeleteNamedObject):
pass
|
StarcoderdataPython
|
131282
|
<reponame>cfbarbero/find-cwlogs-subscriptions
import json
import argparse
parser = argparse.ArgumentParser(description='Parse the subscriptions')
parser.add_argument('filename', type=str, help='an integer for the accumulator')
args = parser.parse_args()
with open(args.filename, 'r') as f:
data = json.load(f)
subscriptions={}
for item in data:
for subscription in item['subscription_filters']:
if subscription not in subscriptions:
subscriptions[subscription]=[]
subscriptions[subscription].append(item['log_group'])
print(json.dumps(subscriptions, indent=2))
|
StarcoderdataPython
|
4824776
|
<reponame>Plavit/Get-Real
import pytest
import dash_html_components as html
import pandas as pd
from generators import generate_table, generate_europe_map, generate_world_map
from app import DATA_UN, DATA_EU
df = pd.read_csv('data/{}'.format(DATA_UN))
dfeu = pd.read_csv('data/{}'.format(DATA_EU))
filtered_df = pd.DataFrame(df[df.Year == 2018], columns=['Czech name', 'eGov index']).reset_index()
# checks if the generate table function raises an exception when passed something else than a dataframe
@pytest.mark.parametrize(
"dataframe,max_rows",
[
(filtered_df, 10),
("not a dataframe", "hoho")
])
def test_func_table(dataframe, max_rows):
if isinstance(dataframe, pd.DataFrame):
try:
generate_table(dataframe, 10)
assert True
except Exception:
assert False
else:
try:
generate_table(dataframe, 10)
assert False
except Exception:
assert True
# checks if the generate worldmap function raises an exception when passed something else than a valid year
@pytest.mark.parametrize(
"dataframe,year",
[
(df, 2018),
("not a dataframe", "hoho")
])
def test_func_worldmap(dataframe, year):
if isinstance(year, int):
try:
generate_world_map(dataframe, year)
assert True
except Exception:
assert False
else:
try:
generate_world_map(dataframe, year)
assert False
except Exception:
assert True
# checks if the generate europemap function raises an exception when passed something else than a dataframe
@pytest.mark.parametrize(
"dataframe,year",
[
(dfeu, 2018),
("not a dataframe", "hoho")
])
def test_func_euromap(dataframe, year):
if isinstance(dataframe, pd.DataFrame):
try:
generate_europe_map(dataframe, year)
assert True
except Exception:
assert False
else:
try:
generate_europe_map(dataframe, year)
assert False
except Exception:
assert True
|
StarcoderdataPython
|
3286494
|
from abc import ABCMeta, abstractmethod
class Plugin(object):
__metaclass__ = ABCMeta
def __init__(self, options):
super(Plugin, self).__init__()
self.options = options
@abstractmethod
def run(self):
pass
|
StarcoderdataPython
|
157925
|
<reponame>Clonexy700/edu54book
from tkinter import *
root = Tk()
c = Canvas(root, width=500, height=500, bg='white')
c.pack()
c.create_oval(225, 235, 275, 285, width=2)
c.create_oval(200, 210, 300, 310, width=2)
c.create_oval(225, 80, 275, 210, width=2)
c.create_oval(225, 310, 275, 450, width=2)
c.create_oval(60, 240, 200, 285, width=2)
c.create_oval(300, 240, 440, 285, width=2)
root.mainloop()
|
StarcoderdataPython
|
164798
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from neutron.common import exceptions
def convert_filters(**kwargs):
result = copy.deepcopy(kwargs)
if 'tenant_id' in result:
if 'project_id' in result:
raise exceptions.TenantIdProjectIdFilterConflict()
result['project_id'] = result.pop('tenant_id')
return result
|
StarcoderdataPython
|
3361953
|
<reponame>GerbenRienk/casi<filename>src/extract/list_one_study.py
'''
The purpose of this module is to try to send a request to the castor-api
to obtain a list of studies.
Created on 6-11-2020
@author: <NAME>
Copyright 2020 TrialDataSolutions
'''
from utils.dictfile import DictFile
from utils.castor_api import CastorApi
def list_study():
# manually supply the study-id
study_id = '2A72D9CC-06B5-0078-B089-A5456C7A7024'
# read configuration file for client id and client secret and other parameters
config=DictFile('casi.config').read()
# make an instance of the api
api = CastorApi(config)
# try to get an access-token
api.sessions.get_access_token(verbose=False)
#request the studies
my_study = api.study.list(study_id, verbose=True)
print(my_study)
# display name if we have that
if 'name' in my_study:
print(my_study['name'])
else:
print('could not find a name for %s' % study_id)
if __name__ == '__main__':
list_study()
|
StarcoderdataPython
|
1723712
|
<gh_stars>10-100
import os
import pandas as pd
import numpy as np
from tqdm import tqdm
import math
from time import time
np.set_printoptions(suppress=True)
def remove_forts():
"""this part is to remove the unneccessary files at the end of the simulation. Our purpose is to combine this part with the start of machine learning part in the future."""
if os.path.exists('ffieldss'):
os.remove('ffieldss')
if os.path.exists('control'):
os.remove('control')
if os.path.exists('exe'):
os.remove('exe')
if os.path.exists('geo'):
os.remove('geo')
if os.path.exists('iopt'):
os.remove('iopt')
if os.path.exists('summary.txt'):
os.remove('summary.txt')
if os.path.exists('run.log'):
os.remove('run.log')
if os.path.exists('fort.13'):
os.remove('fort.13')
if os.path.exists('fort.20'):
os.remove('fort.20')
if os.path.exists('fort.35'):
os.remove('fort.35')
if os.path.exists('fort.71'):
os.remove('fort.71')
if os.path.exists('fort.9'):
os.remove('fort.9')
if os.path.exists('fort.91'):
os.remove('fort.91')
if os.path.exists('fort.98'):
os.remove('fort.98')
if os.path.exists('output.pdb'):
os.remove('output.pdb')
if os.path.exists('moldyn.vel'):
os.remove('moldyn.vel')
if os.path.exists('output.MOP'):
os.remove('output.MOP')
if os.path.exists('molfra.out'):
os.remove('molfra.out')
if os.path.exists('trainset.in'):
os.remove('trainset.in')
if os.path.exists('fort.25'):
os.remove('fort.25')
if os.path.exists('fort.3'):
os.remove('fort.3')
if os.path.exists('fort.4'):
os.remove('fort.4')
if os.path.exists('fort.45'):
os.remove('fort.45')
if os.path.exists('fort.62'):
os.remove('fort.62')
if os.path.exists('fort.7'):
os.remove('fort.7')
if os.path.exists('fort.72'):
os.remove('fort.72')
if os.path.exists('fort.73'):
os.remove('fort.73')
if os.path.exists('fort.79'):
os.remove('fort.79')
if os.path.exists('fort.83'):
os.remove('fort.83')
if os.path.exists('fort.90'):
os.remove('fort.90')
if os.path.exists('fort.21'):
os.remove('fort.21')
|
StarcoderdataPython
|
147775
|
<reponame>pletzer/nemoflux<filename>nemoflux/timeobj.py<gh_stars>0
import xarray
from datetime import datetime
class TimeObj(object):
def __init__(self, nc):
self.timeVarName = ''
self.timeVar = []
for vName, var in nc.items():
if getattr(var, 'standard_name', '') == 'time' or \
getattr(var, 'long_name', '') == 'Time axis':
self.timeName = vName
self.timeVar = var
def getValues(self):
return self.timeVar[:]
def getSize(self):
return len(self.timeVar)
def getTimeAsDate(self, timeIndex):
year = self.timeVar.values[timeIndex].year
month = self.timeVar.values[timeIndex].month
day = self.timeVar.values[timeIndex].day
return datetime(year=year, month=month, day=day).date()
def getTimeAsString(self, timeIndex):
timeVal = self.timeVar.values[timeIndex]
year, month, day = timeVal.year, timeVal.month, timeVal.day
return f'{year}-{month}-{day}'
###############################################################################
def test():
nc = xarray.open_dataset('../data/nz/U.nc')
to = TimeObj(nc)
print(to.timeVarName)
print(to.timeVar)
print(f'number of steps: {to.getSize()}')
for timeIndex in range(to.getSize()):
print(f'index: {timeIndex} date: {to.getTimeAsString(timeIndex)}')
if __name__ == '__main__':
test()
|
StarcoderdataPython
|
42464
|
# -*- coding: utf-8 -*-
import wx
from grail import Grail
from pages import LoginPage, MainFrame
from protocol import GrailProtocol
class MainFrameLogic(MainFrame):
def __init__(self, login, password):
super().__init__(None)
# self.m_notebook1.SetBackgroundColour(wx.NullColour)
self.grail = Grail()
self.grail.open(login, password)
self._change_counter = 0
self._push_counter = 0
self.grail_text_ctrl.SetLabelText(self.grail.get())
self.commit_btn.Disable()
self.push_btn.Disable()
self.data_list.InsertColumn(0, 'ID')
self.data_list.InsertColumn(1, 'Hash', width=300)
self.push_row(self.grail.get_header_hash())
for chain in self.grail.get_hash_list():
self.push_row(chain)
self.grail_text_ctrl.SetValue(self.grail.get())
self._selected = 0
try:
self.diff_text.SetLabel(self.grail.get_diff(self._selected))
except ValueError:
pass
def grail_update(self, e):
diff = e.GetString()
self._change_counter = 0
match = len(self.grail.get()) >= len(diff)
max_diff = self.grail.get() if match else diff
watch_diff = diff if match else self.grail.get()
for i, litter in enumerate(max_diff):
try:
if watch_diff[i] != litter:
self._change_counter += 1
except IndexError:
self._change_counter += 1
if self._change_counter > 0:
self.commit_btn.Enable()
else:
self.commit_btn.Disable()
self.commit_btn.SetLabelText(f"Commit ({self._change_counter})")
def selected(self, event):
self._selected = int(self.data_list.GetItemText(event.Index))
self.grail_text_ctrl.SetValue(self.grail.get(self._selected + 1))
self.diff_text.SetLabel(self.grail.get_diff(self._selected))
def commit(self, event):
self.grail.update(self.grail_text_ctrl.GetValue())
<<<<<<< Updated upstream
chain = self.grail.get_last_hash()
id = self.push_row(chain)
=======
chain = self.grail.get_last_hash()
id = self.push_row(chain)
>>>>>>> Stashed changes
self.status_bar.SetStatusText(f'Запись #{id} добавлена, ожидается подтверждение клиентом.')
self.commit_btn.Disable()
self.commit_btn.SetLabelText("Commit (0)")
self._push_counter += 1
self.push_btn.Enable()
self.push_btn.SetLabelText(f"Push ({self._push_counter})")
def push(self, event):
self.grail.save()
self._push_counter = 0
self.push_btn.Disable()
self.push_btn.SetLabelText("Push (0)")
def push_row(self, item):
index = self.data_list.GetItemCount()
index = self.data_list.InsertItem(self.data_list.GetItemCount(), str(index))
self.data_list.SetItem(index, 1, item.hex())
return index
class LoginPageLogic(LoginPage):
M_LABEL_NEW = u"Регистрация в сети"
M_LABEL_LOGIN = u"Авторизация"
M_LABEL_UNLOCK = u"Для расшифровки введите пароль:"
M_BTN_REG = u"Регистрация"
M_BTN_UNLOCK = u"Расшифровать"
M_BTN_REG_PROC = u"Запрос принят..."
M_ERROR_FIELD = u"Не все поля заполнены"
M_ERROR_REG = u"Вы не зарегистрированы"
M_ERROR_PWD = u"<PASSWORD>"
def __init__(self):
super().__init__(None)
self.page = None
self.page_login()
self.__protocol = GrailProtocol()
def on_reg(self, event):
if self.page == 1:
# PAGE CREATE
self.page_login()
self.reg_btn.SetLabelText(self.M_BTN_REG)
pass
elif self.page == 2:
# PAGE LOGIN
self.page_create()
self.reg_btn.SetLabelText(self.M_LABEL_LOGIN)
def on_unlock(self, event):
self.status_bar.SetStatusText(wx.EmptyString)
if self.page == 1:
# PAGE CREATE
password = self.password_field.GetValue()
login = self.login_field.GetValue()
if len(password) == 0 or len(login) == 0:
self.status_bar.SetStatusText(self.M_ERROR_FIELD)
else:
form = MainFrameLogic(login, password)
form.Show()
self.Close()
elif self.page == 2:
# PAGE LOGIN
login = self.login_field.GetValue()
password = self.<PASSWORD>.GetValue()
if len(password) == 0 and len(login) == 0:
self.status_bar.SetStatusText(self.M_ERROR_FIELD)
else:
if not self.__protocol.check(login, password):
self.status_bar.SetStatusText(self.M_ERROR_PWD)
else:
form = MainFrameLogic(login, password)
form.Show()
self.Close()
def passwd_no_eq(self):
pass
# self.m_staticText3.SetLabel(u"Неверный пароль")
def page_create(self):
self.page = 1
self.title_text.SetLabelText(self.M_LABEL_NEW)
self.lock_btn.SetLabelText(self.M_BTN_REG)
self.login_field.Enable()
self.password_field.Enable()
self.Layout()
def page_login(self):
self.page = 2
self.title_text.SetLabelText(self.M_LABEL_LOGIN)
self.lock_btn.SetLabelText(self.M_BTN_UNLOCK)
self.login_field.Enable()
self.password_field.Enable()
self.Layout()
if __name__ == "__main__":
app = wx.App()
wnd = LoginPageLogic()
wnd.Show()
app.MainLoop()
|
StarcoderdataPython
|
18170
|
from epidemioptim.environments.cost_functions.costs.death_toll_cost import DeathToll
from epidemioptim.environments.cost_functions.costs.gdp_recess_cost import GdpRecess
|
StarcoderdataPython
|
1626681
|
<gh_stars>0
from rubygems_utils import RubyGemsTestUtils
class RubyGemsTestrubygems_faraday_net_http_persistent(RubyGemsTestUtils):
def test_gem_list_rubygems_faraday_net_http_persistent(self):
self.gem_is_installed("faraday-net_http_persistent")
|
StarcoderdataPython
|
1793789
|
<filename>model/QAsparql/lcquad_dataset.py
import json
import requests, json, re, operator
import sys
from parser.lc_quad import LC_Qaud
def prepare_dataset(ds):
ds.load()
ds.parse()
return ds
def ask_query(uri):
if uri == "<https://www.w3.org/1999/02/22-rdf-syntax-ns#type>":
return 200, json.loads("{\"boolean\": \"True\"}")
uri = uri.replace("https://", "http://")
return query(u'ASK WHERE {{ {} ?u ?x }}'.format(uri))
def query(q):
q = q.replace("https://", "http://")
payload = (
('query', q),
('format', 'application/json'))
r = requests.get('http://dbpedia.org/sparql', params=payload)
return r.status_code, r.json()
def has_answer(t):
if "results" in t and len(t["results"]["bindings"]) > 0:
return True
if "boolean" in t:
return True
return False
if __name__ == "__main__":
with open('data/LC-QUAD/train-data.json', 'r', encoding='utf-8') as f:
train = json.load(f)
with open('data/LC-QUAD/test-data.json', 'r', encoding='utf-8') as f:
test = json.load(f)
data = train + test
print('data len: ', len(data))
with open("data/LC-QUAD/data.json", "w") as write_file:
json.dump(data, write_file)
ds = LC_Qaud(path="./data/LC-QUAD/data.json")
tmp = []
for qapair in prepare_dataset(ds).qapairs:
raw_row = dict()
raw_row["id"] = qapair.id.__str__()
raw_row["question"] = qapair.question.text
raw_row["sparql_query"] = qapair.sparql.query
try:
r = query(qapair.sparql.query)
raw_row["answers"] = r[1]
except Exception as e:
raw_row["answers"] = []
tmp.append(raw_row)
with open('data/LC-QUAD/linked_answer.json', 'w') as jsonFile:
json.dump(tmp, jsonFile)
print('data len: ', len(tmp))
|
StarcoderdataPython
|
158130
|
import json
import os
from dotenv import load_dotenv
import pytest
from ssaw import Client
@pytest.fixture(scope="session", autouse=True)
def load_env_vars(request):
curr_path = os.path.dirname(os.path.realpath(__file__))
env_path = os.path.join(curr_path, "tests/env_vars.sh")
load_dotenv(dotenv_path=env_path)
env_path = os.path.join(curr_path, "tests/env_vars_override.sh")
if os.path.isfile(env_path):
load_dotenv(dotenv_path=env_path, override=True)
@pytest.fixture(scope="session")
def session():
return Client(
os.environ.get("base_url"),
os.environ.get("SOLUTIONS_API_USER", ""),
os.environ.get("SOLUTIONS_API_PASSWORD", ""))
@pytest.fixture(scope="session")
def admin_session():
return Client(
os.environ.get("base_url"),
os.environ.get("admin_username", ""),
os.environ.get("admin_password", ""))
@pytest.fixture(scope="session")
def params():
return json.load(open("tests/params.json", mode="r"))
|
StarcoderdataPython
|
1655496
|
<gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WriteResponseApiModel(Model):
"""Result of attribute write.
:param results: All results of attribute writes
:type results:
list[~azure-iiot-opc-twin.models.AttributeWriteResponseApiModel]
"""
_attribute_map = {
'results': {'key': 'results', 'type': '[AttributeWriteResponseApiModel]'},
}
def __init__(self, results=None):
super(WriteResponseApiModel, self).__init__()
self.results = results
|
StarcoderdataPython
|
3298121
|
from PyQt5.QtCore import Qt
from PyQt5.QtSql import QSqlTableModel
class libraryModel:
def __init__(self):
self.model = self.createTable()
@staticmethod
def createTable():
tableModel = QSqlTableModel()
tableModel.setTable("library")
tableModel.setEditStrategy(QSqlTableModel.OnFieldChange)
tableModel.select()
headers = ("id","title","author","status")
for c,h in enumerate(headers):
tableModel.setHeaderData(c,Qt.Horizontal,h)
return tableModel
def addBook(self,data):
row = self.model.rowCount()
self.model.insertRows(row,1)
for c,f in enumerate(data):
self.model.setData(self.model.index(row,c+1),f)
self.model.submitAll()
self.model.select()
def deleteBook(self,row):
self.model.removeRow(row)
self.model.submitAll()
self.model.select()
def issueBook(self,row):
pass
def returnBook(self,row):
pass
|
StarcoderdataPython
|
3377879
|
import socket
import os
import asyncio
from asyncio import run_coroutine_threadsafe
from rocketmq.client import PushConsumer, ConsumeStatus
from typing import Callable, Awaitable, Any, List, Dict
"""
在消息队列中,GroupId目的维持在并发条件下消费位点(offset)的一致性。
管理每个消费队列的不同消费组的消费进度是一个非常复杂的事情。消息会被多个消费者消费,
不同的是每个消费者只负责消费其中部分消费队列,添加或删除消费者,都会使负载发生变动,
容易造成消费进度冲突,因此需要集中管理。
因此,RocketMQ为了简化问题,尤其是集群消费模式时,采用远程模式管理offset,并且限制主题和标签组合。
这即RocketMQ所谓的。
消息处理函数位置作为和主题和tag定义作为一个唯一的group_id。
"""
import inspect
class Subscription:
def __init__(self, topic: str, expression: str,
handler: Callable[..., Awaitable[Any]],
depth: int = 0):
self._topic = topic
self._expression = expression
self._handler = handler
self._decorated_depth = depth
self._consumer = None
def subscribe(self, prefix, host, loop):
if not prefix.endswith("%"):
prefix += "%"
handler = self._handler
## group_id不建议含有函数行号,避免因程序修改导致groupid频繁修改
group_id = (
f"{prefix}{handler.__module__}%{handler.__qualname__}"
f"%{self._decorated_depth}"
)
group_id.replace(".", "%")
check_group_id(group_id)
consumer = PushConsumer(group_id=group_id)
consumer.set_thread_count(1)
consumer.set_name_server_address(host)
_subscribe_handler(loop, consumer, self._topic,
self._expression, handler)
consumer.start()
self._consumer = consumer
def unsubscribe(self):
if self._consumer:
self._consumer.shutdown()
self._consumer = None
_registry: Dict[Callable[..., Awaitable[Any]], List[Subscription]] = {}
class Channel:
def __init__(self, host: str = None) -> None:
self._name_srv_addrs = host
self._subscriptions = []
def topic(self, name: str):
check_topic_name(name)
return _TopicPort(self, name)
def subscribe(self, topic: str, expression: str = '*'):
def _decorator(handler: Callable[..., Awaitable[Any]]):
handler_subscriptions = _registry.get(handler)
if handler_subscriptions is None:
handler_subscriptions = []
_registry[handler] = handler_subscriptions
subscription = Subscription(topic, expression, handler, len(handler_subscriptions))
handler_subscriptions.append(subscription)
self._subscriptions.append(subscription)
return handler
return _decorator
async def start(self):
if len(self._subscriptions) == 0:
return
loop = asyncio.get_running_loop()
for listener in self._subscriptions:
listener.subscribe("demo", self._name_srv_addrs, loop)
print("started")
async def stop(self):
for listener in self._subscriptions:
listener.unsubscribe()
print("stopped")
async def __aenter__(self):
await self.start()
async def __aexit__(self, exec_type, value, traceback):
await self.stop()
class _TopicPort:
def __init__(self, channel: Channel, topic: str):
self._channel = channel
self._topic = topic
def __call__(self, expression: str = "*") -> Any:
def _decorator(handler: Callable[..., Awaitable[Any]]):
return self._channel.subscribe(self._topic, expression)(handler)
return _decorator
def _subscribe_handler(loop, consumer, topic, expression, handler):
def _callback(msg):
message_id = msg.id
message = msg.body
tags = msg.tags
try:
corou = handler(message_id, message)
future = run_coroutine_threadsafe(corou, loop)
_result = future.result()
return ConsumeStatus.CONSUME_SUCCESS
except Exception as exc:
return ConsumeStatus.RECONSUME_LATER
consumer.subscribe(topic, _callback, expression=expression)
import re
VALID_NAME_PATTERN = re.compile("^[%|a-zA-Z0-9_-]+$")
VALID_NAME_STR = (
"allowing only numbers, uppercase and lowercase letters,"
" '%', '|', '-' and '_' symbols"
)
class InvalidTopicName(Exception):
...
class InvalidGroupId(Exception):
...
def check_topic_name(name):
if not name:
raise InvalidTopicName("The topic name is empty")
if not VALID_NAME_PATTERN.match(name):
raise InvalidTopicName(f"the topic name '{name}' contains illegal characters, {VALID_NAME_STR}")
if len(name) > 127:
raise InvalidTopicName("the topic name is longer than name max length 127.")
def check_group_id(name):
if not name:
raise InvalidGroupId("The group_id is empty")
if not VALID_NAME_PATTERN.match(name):
raise InvalidGroupId(f"the group_id '{name}' contains illegal characters, {VALID_NAME_STR}")
if len(name) > 255:
raise InvalidGroupId("the group_id is longer than name max length 255.")
|
StarcoderdataPython
|
1695944
|
from freezegun import freeze_time
from openinghours.tests.tests import OpeningHoursTestCase
class FormsTestCase(OpeningHoursTestCase):
def setUp(self):
super(FormsTestCase, self).setUp()
def tearDown(self):
super(FormsTestCase, self).tearDown()
def test_hours_are_published(self):
response = self.client.get('/')
self.assertContains(response, '8:30am to 12:00pm')
self.assertContains(response, '10:00am to 1:00pm')
def test_edit_form(self):
self.tearDown()
post_data = {
'day1_1-opens': '11:30', 'day1_1-shuts': '17:30',
'day2_1-opens': '11:30', 'day2_1-shuts': '17:30',
'day3_1-opens': '11:30', 'day3_1-shuts': '17:30',
'day4_1-opens': '11:30', 'day4_1-shuts': '17:30',
'day5_1-opens': '11:30', 'day5_1-shuts': '17:30',
'day6_1-opens': '11:30', 'day6_1-shuts': '13:30',
'day7_1-opens': '00:00', 'day7_1-shuts': '00:00',
'day1_2-opens': '00:00', 'day1_2-shuts': '00:00',
'day2_2-opens': '00:00', 'day2_2-shuts': '00:00',
'day3_2-opens': '00:00', 'day3_2-shuts': '00:00',
'day4_2-opens': '00:00', 'day4_2-shuts': '00:00',
'day5_2-opens': '00:00', 'day5_2-shuts': '00:00',
'day6_2-opens': '00:00', 'day6_2-shuts': '00:00',
'day7_2-opens': '00:00', 'day7_2-shuts': '00:00',
}
post = self.client.post('/edit/1', post_data)
resp = self.client.get('/edit/1')
self.assertContains(resp, '<option value="11:30" selected', count=6)
self.assertContains(resp, '<option value="17:30" selected', count=5)
self.assertContains(resp, '<option value="00:00">', count=7*2*2)
resp2 = self.client.get('/')
self.assertContains(resp2, '11:30am')
self.assertContains(resp2, '5:30pm')
|
StarcoderdataPython
|
1771524
|
from __future__ import absolute_import
import numpy as np
from pyti import catch_errors
from six.moves import range
from numba import jit
@jit
def accumulation_distribution(close_data, high_data, low_data, volume):
"""
Accumulation/Distribution.
Formula:
A/D = (Ct - Lt) - (Ht - Ct) / (Ht - Lt) * Vt + A/Dt-1
"""
catch_errors.check_for_input_len_diff(
close_data, high_data, low_data, volume
)
ad = np.zeros(len(close_data))
for idx in range(1, len(close_data)):
ad[idx] = (
(((close_data[idx] - low_data[idx]) -
(high_data[idx] - close_data[idx])) /
(high_data[idx] - low_data[idx]) *
volume[idx]) +
ad[idx-1]
)
return ad
|
StarcoderdataPython
|
27565
|
"""Module defining DiagGGNPermute."""
from backpack.core.derivatives.permute import PermuteDerivatives
from backpack.extensions.secondorder.diag_ggn.diag_ggn_base import DiagGGNBaseModule
class DiagGGNPermute(DiagGGNBaseModule):
"""DiagGGN extension of Permute."""
def __init__(self):
"""Initialize."""
super().__init__(derivatives=PermuteDerivatives())
|
StarcoderdataPython
|
45221
|
<reponame>vaibhav162/Banking-Marketing-Project<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # Importing Libraries and Dataset
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[2]:
bank= pd.read_csv(r"C:\Users\shruti\Desktop\Decodr\Project\Decodr Project\Bank marketing project\bank.csv", delimiter=";")
# In[3]:
bank.head()
# In[4]:
bank.tail()
# In[5]:
# Renaming "y" column with "deposit"
bank.rename(columns={"y":"deposit"}, inplace=True)
# In[6]:
bank.head()
# # Data Exploration
# In[7]:
# To get total number of rows
print("Bank Marketing Dataset contains {rows} rows.".format(rows=len(bank)))
# In[8]:
# To get percentage of missing values in each columns
missing_values= bank.isnull().mean()*100
missing_values.sum()
# ### Categorical Columns Exploration
# In[9]:
cat_columns = ['job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month','poutcome']
fig, axs = plt.subplots(3, 3, sharex=False, sharey=False, figsize=(10, 8))
counter = 0
for cat_column in cat_columns:
value_counts = bank[cat_column].value_counts()
trace_x = counter // 3
trace_y = counter % 3
x_pos = np.arange(0, len(value_counts))
axs[trace_x, trace_y].bar(x_pos, value_counts.values, tick_label = value_counts.index)
axs[trace_x, trace_y].set_title(cat_column)
for tick in axs[trace_x, trace_y].get_xticklabels():
tick.set_rotation(90)
counter += 1
plt.show()
# ### Numerical Columns Exploration
# In[10]:
num_columns = ['balance', 'day', 'duration', 'campaign', 'pdays', 'previous']
fig, axs = plt.subplots(2, 3, sharex=False, sharey=False, figsize=(10, 8))
counter = 0
for num_column in num_columns:
trace_x = counter // 3
trace_y = counter % 3
axs[trace_x, trace_y].bar(x_pos, value_counts.values, tick_label = value_counts.index)
axs[trace_x, trace_y].set_title(num_column)
counter += 1
plt.show()
# In[11]:
bank[["pdays", "campaign", "previous"]].describe()
# In[12]:
len(bank[bank["pdays"]> 400])/ len(bank)*100
# In[13]:
len(bank[bank["campaign"]> 34])/ len(bank)*100
# In[14]:
len(bank[bank["previous"]> 34])/ len(bank)*100
# ## Analysis of Categorical columns
# In[15]:
value_counts= bank["deposit"].value_counts()
value_counts.plot.bar(title= "Deposit value counts")
# In[16]:
# Plotting Deposit Vs Jobs
j_bank= pd.DataFrame()
j_bank["yes"]= bank[bank["deposit"] == "yes"]["job"].value_counts()
j_bank["no"]= bank[bank["deposit"] == "no"]["job"].value_counts()
j_bank.plot.bar(title= "Job & Deposit")
# In[17]:
# Plotting Deposit Vs Marital Status
j_bank= pd.DataFrame()
j_bank["yes"]= bank[bank["deposit"] == "yes"]["marital"].value_counts()
j_bank["no"]= bank[bank["deposit"] == "no"]["marital"].value_counts()
j_bank.plot.bar(title= "Marital Status & Deposit")
# In[18]:
# Plotting Deposite Vs Education
j_bank= pd.DataFrame()
j_bank["yes"]= bank[bank["deposit"] == "yes"]["education"].value_counts()
j_bank["no"]= bank[bank["deposit"] == "no"]["education"].value_counts()
j_bank.plot.bar(title= "Education & Deposit")
# In[19]:
# Plotting Deposit Vs Contact
j_bank= pd.DataFrame()
j_bank["yes"]= bank[bank["deposit"] == "yes"]["contact"].value_counts()
j_bank["no"]= bank[bank["deposit"] == "no"]["contact"].value_counts()
j_bank.plot.bar(title= "Contact & Deposit")
# ## Analysis of Numeric columns
# In[20]:
# Balance & Deposit
b_bank= pd.DataFrame()
b_bank['balance_yes'] = (bank[bank['deposit'] == 'yes'][['deposit','balance']].describe())['balance']
b_bank['balance_no'] = (bank[bank['deposit'] == 'no'][['deposit','balance']].describe())['balance']
b_bank
# In[21]:
b_bank.drop(["count", "25%", "50%", "75%"]).plot.bar(title= "Balance & Deposit Statistics")
# In[22]:
# Age & Deposit
b_bank= pd.DataFrame()
b_bank['age_yes'] = (bank[bank['deposit'] == 'yes'][['deposit','age']].describe())['age']
b_bank['age_no'] = (bank[bank['deposit'] == 'no'][['deposit','age']].describe())['age']
b_bank
# In[23]:
b_bank.drop(["count", "25%", "50%", "75%"]).plot.bar(title= "Age & Deposit Statistics")
# In[24]:
# Campaign & Deposit
b_bank= pd.DataFrame()
b_bank['campaign_yes'] = (bank[bank['deposit'] == 'yes'][['deposit','campaign']].describe())['campaign']
b_bank['campaign_no'] = (bank[bank['deposit'] == 'no'][['deposit','campaign']].describe())['campaign']
b_bank
# In[25]:
b_bank.drop(["count", "25%", "50%", "75%"]).plot.bar(title= "Campaign & Deposit Statistics")
# In[26]:
# Previous Campaign & Deposit
b_bank= pd.DataFrame()
b_bank['previous_yes'] = (bank[bank['deposit'] == 'yes'][['deposit','previous']].describe())['previous']
b_bank['previous_no'] = (bank[bank['deposit'] == 'no'][['deposit','previous']].describe())['previous']
b_bank
# In[27]:
b_bank.drop(["count", "25%", "50%", "75%"]).plot.bar(title= "Previous Campaign & Deposit Statistics")
# # Data Cleaning
# In[28]:
def get_dummy_from_bool(row, column_name):
"""Returns 0 if value in column_name is no, returns 1 if value in column_name is yes"""
return 1 if row[column_name] == "yes" else 0
def get_correct_values(row, column_name, threshold, bank):
"""Returns mean value if value in column_name is above threshold"""
if row[column_name] <= threshold:
return row[column_name]
else:
mean= bank[bank[column_name] <= threshold][column_name].mean()
return mean
def clean_data(bank):
'''
INPUT
df - pandas dataframe containing bank marketing campaign dataset
OUTPUT
df - cleaned dataset:
1. columns with 'yes' and 'no' values are converted into boolean variables;
2. categorical columns are converted into dummy variables;
3. drop irrelevant columns.
4. impute incorrect values
'''
cleaned_bank = bank.copy()
# Converting columns containing 'yes' and 'no' values to boolean variables and drop original columns
bool_columns = ['default', 'housing', 'loan', 'deposit']
for bool_col in bool_columns:
cleaned_bank[bool_col + '_bool'] = bank.apply(lambda row: get_dummy_from_bool(row, bool_col),axis=1)
cleaned_bank = cleaned_bank.drop(columns = bool_columns)
# Converting categorical columns to dummies
cat_columns = ['job', 'marital', 'education', 'contact', 'month', 'poutcome']
for col in cat_columns:
cleaned_bank = pd.concat([cleaned_bank.drop(col, axis=1),
pd.get_dummies(cleaned_bank[col], prefix=col, prefix_sep='_',
drop_first=True, dummy_na=False)], axis=1)
# Dropping irrelevant columns
cleaned_bank = cleaned_bank.drop(columns = ['pdays'])
# Imputing incorrect values and drop original columns
cleaned_bank['campaign_cleaned'] = bank.apply(lambda row: get_correct_values(row, 'campaign', 34, cleaned_bank),axis=1)
cleaned_bank['previous_cleaned'] = bank.apply(lambda row: get_correct_values(row, 'previous', 34, cleaned_bank),axis=1)
cleaned_bank = cleaned_bank.drop(columns = ['campaign', 'previous'])
return cleaned_bank
# In[29]:
cleaned_bank= clean_data(bank)
cleaned_bank.head()
# # Predicting Campaign Model
# ### Classification Model
# In[30]:
X= cleaned_bank.drop(columns= "deposit_bool")
y= cleaned_bank[["deposit_bool"]]
# In[31]:
TEST_SIZE = 0.3
RAND_STATE= 42
# In[41]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test= train_test_split(X, y, test_size= TEST_SIZE, random_state= RAND_STATE)
# In[42]:
pip install xgboost
# In[43]:
import xgboost
import warnings
xgb = xgboost.XGBClassifier(n_estimators=100, learning_rate=0.08, gamma=0, subsample=0.75,
colsample_bytree=1, max_depth=7)
# In[44]:
xgb.fit(X_train, y_train.squeeze().values)
# In[45]:
y_train_preds= xgb.predict(X_train)
y_test_preds= xgb.predict(X_test)
# In[47]:
from sklearn.metrics import accuracy_score
print("XGB accuracy score for train data : %.3f and for test data : %.3f" % (accuracy_score(y_train, y_train_preds),
accuracy_score(y_test, y_test_preds)))
# # Get Feature Importance from Trained Model
# In[50]:
headers= ["name", "score"]
values= sorted(zip(X_train.columns, xgb.feature_importances_), key= lambda x: x[1]*-1)
xgb_feature_importances_=pd.DataFrame(values,columns=headers)
xgb_feature_importances_
# In[52]:
x_pos= np.arange(0, len(xgb_feature_importances_))
plt.figure(figsize=(10,8))
plt.bar(x_pos, xgb_feature_importances_["score"])
plt.xticks(x_pos, xgb_feature_importances_["name"])
plt.xticks(rotation=90)
plt.title("Feature Importance (XGB)")
plt.show()
# In[ ]:
|
StarcoderdataPython
|
3355148
|
from niaaml.classifiers.classifier import Classifier
from niaaml.classifiers.random_forest import RandomForest
from niaaml.classifiers.multi_layer_perceptron import MultiLayerPerceptron
from niaaml.classifiers.linear_svc import LinearSVC
from niaaml.classifiers.ada_boost import AdaBoost
from niaaml.classifiers.extremely_randomized_trees import ExtremelyRandomizedTrees
from niaaml.classifiers.bagging import Bagging
from niaaml.classifiers.decision_tree import DecisionTree
from niaaml.classifiers.k_neighbors import KNeighbors
from niaaml.classifiers.gaussian_process import GaussianProcess
from niaaml.classifiers.gaussian_naive_bayes import GaussianNB
from niaaml.classifiers.quadratic_driscriminant_analysis import QuadraticDiscriminantAnalysis
from niaaml.classifiers.utility import ClassifierFactory
__all__ = [
'Classifier',
'RandomForest',
'MultiLayerPerceptron',
'LinearSVC',
'AdaBoost',
'Bagging',
'ExtremelyRandomizedTrees',
'DecisionTree',
'KNeighbors',
'GaussianProcess',
'GaussianNB',
'QuadraticDiscriminantAnalysis',
'ClassifierFactory'
]
|
StarcoderdataPython
|
29098
|
<reponame>AugustinMascarelli/survol
"""
Computer system.
Scripts related to the class CIM_ComputerSystem.
"""
import sys
import socket
import lib_util
# This must be defined here, because dockit cannot load modules from here,
# and this ontology would not be defined.
def EntityOntology():
return ( ["Name"], )
import lib_common
from lib_properties import pc
# This returns a nice name given the parameter of the object.
def EntityName(entity_ids_arr):
entity_id = entity_ids_arr[0]
return entity_id
# We do not care about the entity_host as this is simply the machine from which
# this machine was detected, so nothing more than a computer on the same network.
def UniversalAlias(entity_ids_arr,entity_host,entity_class):
# TOO SLOW !!!
return "ThisComputer:"+entity_ids_arr[0].lower()
try:
# (entity_ids_arr=[u'desktop-ni99v8e'], entity_host='192.168.0.14', entity_class=u'CIM_ComputerSystem')
# might possibly throw:
# "[Errno 11004] getaddrinfo failed "
aHostName = lib_util.GlobalGetHostByName(entity_ids_arr[0])
except:
aHostName = entity_host
# Hostnames are case-insensitive, RFC4343 https://tools.ietf.org/html/rfc4343
return "ThisComputer:"+aHostName.lower()
# This adds the WBEM and WMI urls related to the entity.
def AddWbemWmiServers(grph,rootNode,entity_host, nameSpace, entity_type, entity_id):
DEBUG("AddWbemWmiServers entity_host=%s nameSpace=%s entity_type=%s entity_id=%s", entity_host,nameSpace,entity_type,entity_id)
if entity_host:
host_wbem_wmi = entity_host
else:
host_wbem_wmi = lib_util.currentHostname
# This receives a map and a RDF property, and must add the correspknding nodes to the rootNode
# int the given graph. The same callback signature is used elsewhere to generate HTML tables.
def AddWMap(theMap,propData):
for urlSubj in theMap:
grph.add( ( rootNode, propData, urlSubj ) )
for theProp, urlObj in theMap[urlSubj]:
grph.add( ( urlSubj, theProp, urlObj ) )
mapWbem = AddWbemServers(host_wbem_wmi, nameSpace, entity_type, entity_id)
AddWMap(mapWbem,pc.property_wbem_data)
mapWmi = AddWmiServers(host_wbem_wmi, nameSpace, entity_type, entity_id)
AddWMap(mapWmi,pc.property_wmi_data)
mapSurvol = AddSurvolServers(host_wbem_wmi, nameSpace, entity_type, entity_id)
AddWMap(mapSurvol,pc.property_survol_agent)
def AddWbemServers(entity_host, nameSpace, entity_type, entity_id):
DEBUG("AddWbemServers entity_host=%s nameSpace=%s entity_type=%s entity_id=%s",entity_host,nameSpace,entity_type,entity_id)
mapWbem = dict()
try:
# Maybe some of these servers are not able to display anything about this object.
import lib_wbem
wbem_servers_desc_list = lib_wbem.GetWbemUrlsTyped( entity_host, nameSpace, entity_type, entity_id )
# sys.stderr.write("wbem_servers_desc_list len=%d\n" % len(wbem_servers_desc_list))
for url_server in wbem_servers_desc_list:
# TODO: Filter only entity_host
# sys.stderr.write("url_server=%s\n" % str(url_server))
if lib_wbem.ValidClassWbem(entity_type):
wbemNode = lib_common.NodeUrl(url_server[0])
if entity_host:
txtLiteral = "WBEM url, host=%s class=%s"%(entity_host,entity_type)
else:
txtLiteral = "WBEM url, current host, class=%s"%(entity_type)
wbemHostNode = lib_common.gUriGen.HostnameUri( url_server[1] )
mapWbem[wbemNode] = [
( pc.property_information, lib_common.NodeLiteral(txtLiteral ) ),
( pc.property_host, wbemHostNode )
]
# TODO: This could try to pen a HTTP server on this machine, possibly with port 80.
# grph.add( ( wbemHostNode, pc.property_information, lib_common.NodeLiteral("Url to host") ) )
except ImportError:
pass
return mapWbem
def AddWmiServers(entity_host, nameSpace, entity_type, entity_id):
DEBUG("AddWmiServers entity_host=%s nameSpace=%s entity_type=%s entity_id=%s",entity_host,nameSpace,entity_type,entity_id)
# This will not work on Linux.
import lib_wmi
mapWmi = dict()
if lib_wmi.ValidClassWmi(entity_type):
# TODO: We may also loop on all machines which may describe this object.
wmiurl = lib_wmi.GetWmiUrl( entity_host, nameSpace, entity_type, entity_id )
# sys.stderr.write("wmiurl=%s\n" % str(wmiurl))
if wmiurl:
wmiNode = lib_common.NodeUrl(wmiurl)
if entity_host:
txtLiteral = "WMI url, host=%s class=%s"%(entity_host,entity_type)
else:
txtLiteral = "WMI url, current host, class=%s"%(entity_type)
mapWmi[wmiNode] = [
(pc.property_information, lib_common.NodeLiteral(txtLiteral))
]
if entity_host:
nodePortalWmi = lib_util.UrlPortalWmi(entity_host)
mapWmi[wmiNode].append(
(pc.property_rdf_data_nolist2, nodePortalWmi)
)
return mapWmi
def AddSurvolServers(entity_host, nameSpace, entity_type, entity_id):
DEBUG("AddSurvolServers entity_host=%s nameSpace=%s entity_type=%s entity_id=%s",entity_host,nameSpace,entity_type,entity_id)
mapSurvol = dict()
# TODO: Not implemented yet.
return mapSurvol
# g = geocoder.ip('172.16.17.32')
# g.json
# {'status': 'OK', 'city': u'Mountain View', 'ok': True, 'encoding': 'utf-8', 'ip': u'172.16.17.32',
# 'hostname': u'lhr35s10-in-f5.1e100.net', 'provider': 'ipinfo', 'state': u'California', 'location': '172.16.17.32',
# 'status_code': 200, 'country': u'US', 'lat': 37.4192, 'org': u'AS15169 Google Inc.', 'lng': -122.0574, 'postal': u'94043',
# 'address': u'Mountain View, California, US'}
#
# g = geocoder.ip('192.168.1.22')
# g.json
# {'status': 'ERROR - No results found', 'status_code': 200, 'encoding': 'utf-8', 'ip': u'192.168.1.22',
# 'location': '192.168.1.22', 'provider': 'ipinfo', 'ok': False}
def AddGeocoder(grph,node,ipv4):
try:
import geocoder
except ImportError:
return
try:
geoc = geocoder.ip(ipv4)
for jsonKey,jsonVal in geoc.json.iteritems():
# Conversion to str otherwise numbers are displayed as "float".
grph.add( ( node, lib_common.MakeProp(jsonKey), lib_common.NodeLiteral(str(jsonVal)) ) )
except Exception:
# This might be a simple time-out.
return
# The URL is hard-coded but very important because it allows to visit another host with WMI access.
def AddInfo(grph,node,entity_ids_arr):
theHostname = entity_ids_arr[0]
try:
ipv4 = lib_util.GlobalGetHostByName(theHostname)
except:
grph.add( ( node, pc.property_information, lib_common.NodeLiteral("Unknown machine") ) )
return
grph.add( ( node, lib_common.MakeProp("IP address"), lib_common.NodeLiteral(ipv4) ) )
fqdn = socket.getfqdn(theHostname)
grph.add( ( node, lib_common.MakeProp("FQDN"), lib_common.NodeLiteral(fqdn) ) )
# No need to do that, because it is done in entity.py if mode!=json.
# nameSpace = ""
# AddWbemWmiServers(grph,node,theHostname, nameSpace, "CIM_ComputerSystem", "Name="+theHostname)
AddGeocoder(grph,node,ipv4)
|
StarcoderdataPython
|
1651786
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from click.testing import CliRunner
from knipse import cli
class TestKnipse(unittest.TestCase):
def test_command_line_interface(self) -> None:
'''Test command line interface.'''
runner = CliRunner()
result = runner.invoke(cli.cli_knipse)
assert result.exit_code == 0
assert 'photo' in result.output
help_result = runner.invoke(cli.cli_knipse, ['--help'])
assert help_result.exit_code == 0
# number of spaces between `--help` and rest may change as further
# options are added to the cli (-> separate assertions)
assert '--help' in help_result.output
assert 'Show this message and exit.' in help_result.output
|
StarcoderdataPython
|
79621
|
<filename>test_integration/geopm_test_integration.py<gh_stars>0
#!/usr/bin/env python
#
# Copyright (c) 2015, 2016, 2017, 2018, 2019, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import sys
import unittest
import subprocess
import time
import pandas
import collections
import socket
import shlex
import json
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test_integration import util
from test_integration import geopm_test_launcher
import geopmpy.io
import geopmpy.launcher
def create_frequency_map_policy(min_freq, max_freq, frequency_map, use_env=False):
"""Create a frequency map to be consumed by the frequency map agent.
Arguments:
min_freq: Floor frequency for the agent
max_freq: Ceiling frequency for the agent
frequency_map: Dictionary mapping region names to frequencies
use_env: If true, apply the map to an environment variable, and return
the policy needed when the environment variable is in use.
Otherwise, clear the environment variable and return the policy
needed when the variable is not in use.
"""
policy = {'frequency_min': min_freq, 'frequency_max': max_freq}
known_hashes = {
'dgemm': 0x00000000a74bbf35,
'all2all': 0x000000003ddc81bf,
'stream': 0x00000000d691da00,
'sleep': 0x00000000536c798f,
'MPI_Barrier': 0x000000007b561f45,
'model-init': 0x00000000644f9787,
'unmarked-region': 0x00000000725e8066 }
if use_env:
os.environ['GEOPM_FREQUENCY_MAP'] = json.dumps(frequency_map)
else:
if 'GEOPM_FREQUENCY_MAP' in os.environ:
os.environ.pop('GEOPM_FREQUENCY_MAP')
for i, (region_name, frequency) in enumerate(frequency_map.items()):
region_hash = known_hashes[region_name]
policy['HASH_{}'.format(i)] = int(region_hash)
policy['FREQ_{}'.format(i)] = frequency
return policy
class TestIntegration(unittest.TestCase):
def setUp(self):
self.longMessage = True
self._agent = 'power_governor'
self._options = {'power_budget': 150}
self._tmp_files = []
self._output = None
self._power_limit = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
self._frequency = geopm_test_launcher.geopmread("MSR::PERF_CTL:FREQ board 0")
self._original_freq_map_env = os.environ.get('GEOPM_FREQUENCY_MAP')
def tearDown(self):
geopm_test_launcher.geopmwrite("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0 " + str(self._power_limit))
geopm_test_launcher.geopmwrite("MSR::PERF_CTL:FREQ board 0 " + str(self._frequency))
if sys.exc_info() == (None, None, None) and os.getenv('GEOPM_KEEP_FILES') is None:
if self._output is not None:
self._output.remove_files()
for ff in self._tmp_files:
try:
os.remove(ff)
except OSError:
pass
if self._original_freq_map_env is None:
if 'GEOPM_FREQUENCY_MAP' in os.environ:
os.environ.pop('GEOPM_FREQUENCY_MAP')
else:
os.environ['GEOPM_FREQUENCY_MAP'] = self._original_freq_map_env
def assertNear(self, a, b, epsilon=0.05, msg=''):
denom = a if a != 0 else 1
if abs((a - b) / denom) >= epsilon:
self.fail('The fractional difference between {a} and {b} is greater than {epsilon}. {msg}'.format(a=a, b=b, epsilon=epsilon, msg=msg))
def create_progress_df(self, df):
# Build a df with only the first region entry and the exit.
df = df.reset_index(drop=True)
last_index = 0
filtered_df = pandas.DataFrame()
row_list = []
progress_1s = df['REGION_PROGRESS'].loc[df['REGION_PROGRESS'] == 1]
for index, _ in progress_1s.iteritems():
row = df.loc[last_index:index].head(1)
row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]
row = df.loc[last_index:index].tail(1)
row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]
last_index = index + 1 # Set the next starting index to be one past where we are
filtered_df = pandas.concat(row_list)
return filtered_df
def test_report_and_trace_generation(self):
name = 'test_report_and_trace_generation'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
def test_no_report_and_trace_generation(self):
name = 'test_no_report_and_trace_generation'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
@unittest.skipUnless('mr-fusion' in socket.gethostname(), "This test only enabled on known working systems.")
def test_report_and_trace_generation_pthread(self):
name = 'test_report_and_trace_generation_pthread'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.set_pmpi_ctl('pthread')
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
@unittest.skipUnless(geopm_test_launcher.detect_launcher() != "aprun",
'ALPS does not support multi-application launch on the same nodes.')
@util.skip_unless_batch()
def test_report_and_trace_generation_application(self):
name = 'test_report_and_trace_generation_application'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.set_pmpi_ctl('application')
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
@unittest.skipUnless(geopm_test_launcher.detect_launcher() == "srun" and os.getenv('SLURM_NODELIST') is None,
'Requires non-sbatch SLURM session for alloc\'d and idle nodes.')
def test_report_generation_all_nodes(self):
name = 'test_report_generation_all_nodes'
report_path = name + '.report'
num_node = 1
num_rank = 1
delay = 1.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
time.sleep(5) # Wait a moment to finish cleaning-up from a previous test
idle_nodes = launcher.get_idle_nodes()
idle_nodes_copy = list(idle_nodes)
alloc_nodes = launcher.get_alloc_nodes()
launcher.write_log(name, 'Idle nodes : {nodes}'.format(nodes=idle_nodes))
launcher.write_log(name, 'Alloc\'d nodes : {nodes}'.format(nodes=alloc_nodes))
node_names = []
for nn in idle_nodes_copy:
launcher.set_node_list(nn.split()) # Hack to convert string to list
try:
launcher.run(name)
node_names += nn.split()
except subprocess.CalledProcessError as e:
if e.returncode == 1 and nn not in launcher.get_idle_nodes():
launcher.write_log(name, '{node} has disappeared from the idle list!'.format(node=nn))
idle_nodes.remove(nn)
else:
launcher.write_log(name, 'Return code = {code}'.format(code=e.returncode))
raise e
ao = geopmpy.io.AppOutput(report_path, do_cache=False)
sleep_data = ao.get_report_data(node_name=nn, region='sleep')
app_data = ao.get_app_total_data(node_name=nn)
self.assertNotEqual(0, len(sleep_data))
self.assertNear(delay, sleep_data['runtime'].item())
self.assertGreater(app_data['runtime'].item(), sleep_data['runtime'].item())
self.assertEqual(1, sleep_data['count'].item())
self.assertEqual(len(node_names), len(idle_nodes))
def test_runtime(self):
name = 'test_runtime'
report_path = name + '.report'
num_node = 1
num_rank = 5
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn, region='sleep')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertNear(delay, report['runtime'].item())
self.assertGreater(app_total['runtime'].item(), report['runtime'].item())
def test_runtime_epoch(self):
name = 'test_runtime_epoch'
report_path = name + '.report'
num_node = 1
num_rank = 5
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
app_conf.append_region('spin', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
spin_data = self._output.get_report_data(node_name=nn, region='spin')
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
total_runtime = sleep_data['runtime'].item() + spin_data['runtime'].item()
self.assertNear(total_runtime, epoch_data['runtime'].item())
def test_epoch_data_valid(self):
name = 'test_epoch_data_valid'
report_path = name + '.report'
num_node = 1
num_rank = 1
big_o = 1.0
loop_count = 10
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin-unmarked', big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
report = geopmpy.io.RawReport(report_path)
node_names = report.host_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
regions = report.region_names(nn)
self.assertTrue('model-init' not in regions)
totals = report.raw_totals(nn)
unmarked = report.raw_region(nn, 'unmarked-region')
epoch = report.raw_epoch(nn)
# Epoch has valid data
self.assertGreater(epoch['runtime (sec)'], 0)
self.assertGreater(epoch['sync-runtime (sec)'], 0)
self.assertGreater(epoch['package-energy (joules)'], 0)
self.assertGreater(epoch['dram-energy (joules)'], 0)
self.assertGreater(epoch['power (watts)'], 0)
self.assertGreater(epoch['frequency (%)'], 0)
self.assertGreater(epoch['frequency (Hz)'], 0)
self.assertEqual(epoch['count'], loop_count)
# Runtime
self.assertTrue(totals['runtime (sec)'] > unmarked['runtime (sec)'] >= epoch['runtime (sec)'],
'''The total runtime is NOT > the unmarked runtime or the unmarked runtime is NOT
>= the Epoch runtime.''')
# Package Energy (joules)
self.assertTrue(totals['package-energy (joules)'] >
unmarked['package-energy (joules)'] >=
epoch['package-energy (joules)'],
'''The total package energy (joules) is NOT > the unmarked package energy (joules)
or the unmarked package energy (joules) is NOT >= the Epoch package
energy (joules).''')
# DRAM Energy
self.assertTrue(totals['dram-energy (joules)'] >
unmarked['dram-energy (joules)'] >=
epoch['dram-energy (joules)'],
'''The total dram energy is NOT > the unmarked dram energy or the unmarked
dram energy is NOT >= the Epoch dram energy.''')
# Sync-runtime
self.assertTrue(unmarked['sync-runtime (sec)'] >= epoch['sync-runtime (sec)'],
'''The sync-runtime for the unmarked region is NOT >= the Epoch sync-runtime.''')
def test_runtime_nested(self):
name = 'test_runtime_nested'
report_path = name + '.report'
num_node = 1
num_rank = 1
delay = 1.0
loop_count = 2
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('nested-progress', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
spin_data = self._output.get_report_data(node_name=nn, region='spin')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
app_totals = self._output.get_app_total_data(node_name=nn)
# The spin sections of this region sleep for 'delay' seconds twice per loop.
self.assertNear(2 * loop_count * delay, spin_data['runtime'].item())
self.assertNear(spin_data['runtime'].item(), epoch_data['runtime'].item(), epsilon=0.01)
self.assertGreater(app_totals['network-time'].item(), 0)
self.assertGreater(0.1, app_totals['network-time'].item())
self.assertEqual(loop_count, spin_data['count'].item())
def test_trace_runtimes(self):
name = 'test_trace_runtimes'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
trace = self._output.get_trace_data(node_name=nn)
app_totals = self._output.get_app_total_data(node_name=nn)
self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item(), msg='Application runtime failure, node_name={}.'.format(nn))
# Calculate runtime totals for each region in each trace, compare to report
tt = trace.reset_index(level='index') # move 'index' field from multiindex to columns
tt = tt.set_index(['REGION_HASH'], append=True) # add region_hash column to multiindex
tt_reg = tt.groupby(level=['REGION_HASH'])
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name not in ['unmarked-region', 'model-init', 'epoch'] and
not region_name.startswith('MPI_') and
region_data['sync_runtime'].item() != 0):
region_hash = region_data['id'].item()
trace_data = tt_reg.get_group(region_hash)
start_idx = trace_data.iloc[0]['index']
end_idx = trace_data.iloc[-1]['index'] + 1 # use time from sample after exiting region
start_time = tt.loc[tt['index'] == start_idx]['TIME'].item()
end_time = tt.loc[tt['index'] == end_idx]['TIME'].item()
trace_elapsed_time = end_time - start_time
msg = 'for region {rn} on node {nn}'.format(rn=region_name, nn=nn)
self.assertNear(trace_elapsed_time, region_data['sync_runtime'].item(), msg=msg)
#epoch
region_data = self._output.get_report_data(node_name=nn, region='epoch')
trace_elapsed_time = trace.iloc[-1]['TIME'] - trace['TIME'].loc[trace['EPOCH_COUNT'] == 0].iloc[0]
msg = 'for epoch on node {nn}'.format(nn=nn)
self.assertNear(trace_elapsed_time, region_data['runtime'].item(), msg=msg)
@util.skip_unless_config_enable('bloat')
def test_runtime_regulator(self):
name = 'test_runtime_regulator'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 20
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
sleep_big_o = 1.0
spin_big_o = 0.5
expected_region_runtime = {'spin': spin_big_o, 'sleep': sleep_big_o}
app_conf.append_region('sleep', sleep_big_o)
app_conf.append_region('spin', spin_big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
app_totals = self._output.get_app_total_data(node_name=nn)
trace = self._output.get_trace_data(node_name=nn)
self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item())
tt = trace.set_index(['REGION_HASH'], append=True)
tt = tt.groupby(level=['REGION_HASH'])
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if region_name not in ['unmarked-region', 'model-init', 'epoch'] and not region_name.startswith('MPI_') and region_data['runtime'].item() != 0:
trace_data = tt.get_group(region_data['id'].item())
filtered_df = self.create_progress_df(trace_data)
first_time = False
epsilon = 0.001 if region_name != 'sleep' else 0.05
for index, df in filtered_df.iterrows():
if df['REGION_PROGRESS'] == 1:
self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)
first_time = True
if first_time is True and df['REGION_PROGRESS'] == 0:
self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)
@util.skip_unless_run_long_tests()
@util.skip_unless_config_enable('bloat')
def test_region_runtimes(self):
name = 'test_region_runtimes'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
loop_count = 500
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 8.0)
app_conf.set_loop_count(loop_count)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
# Calculate region times from traces
region_times = collections.defaultdict(lambda: collections.defaultdict(dict))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn).set_index(['REGION_HASH'], append=True).groupby(level=['REGION_HASH'])
for region_hash, data in tt:
filtered_df = self.create_progress_df(data)
filtered_df = filtered_df.diff()
# Since I'm not separating out the progress 0's from 1's, when I do the diff I only care about the
# case where 1 - 0 = 1 for the progress column.
filtered_df = filtered_df.loc[filtered_df['REGION_PROGRESS'] == 1]
if len(filtered_df) > 1:
launcher.write_log(name, 'Region elapsed time stats from {} - {} :\n{}'\
.format(nn, region_hash, filtered_df['TIME'].describe()))
filtered_df['TIME'].describe()
region_times[nn][region_hash] = filtered_df
launcher.write_log(name, '{}'.format('-' * 80))
# Loop through the reports to see if the region runtimes line up with what was calculated from the trace files above.
regions = self._output.get_region_names()
write_regions = True
for nn in node_names:
for region_name in regions:
rr = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name != 'epoch' and
rr['id'].item() != 0 and
rr['count'].item() > 1):
if write_regions:
launcher.write_log(name, 'Region {} is {}.'.format(rr['id'].item(), region_name))
runtime = rr['sync_runtime'].item()
self.assertNear(runtime,
region_times[nn][rr['id'].item()]['TIME'].sum())
write_regions = False
# Test to ensure every region detected in the trace is captured in the report.
for nn in node_names:
report_ids = []
for region_name in regions:
rr = self._output.get_report_data(node_name=nn, region=region_name)
report_ids.append(rr['id'].item())
for region_hash in region_times[nn].keys():
self.assertTrue(region_hash in report_ids, msg='Report from {} missing region_hash {}'.format(nn, region_hash))
def test_progress(self):
name = 'test_progress'
report_path = name + '.report'
num_node = 1
num_rank = 4
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep-progress', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertNear(delay, sleep_data['runtime'].item())
self.assertGreater(app_total['runtime'].item(), sleep_data['runtime'].item())
self.assertEqual(1, sleep_data['count'].item())
def test_count(self):
name = 'test_count'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
delay = 0.01
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
trace_data = self._output.get_trace_data(node_name=nn)
spin_data = self._output.get_report_data(node_name=nn, region='spin')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
self.assertNear(delay * loop_count, spin_data['runtime'].item())
self.assertEqual(loop_count, spin_data['count'].item())
self.assertEqual(loop_count, epoch_data['count'].item())
self.assertEqual(loop_count, trace_data['EPOCH_COUNT'][-1])
@util.skip_unless_run_long_tests()
def test_scaling(self):
"""
This test will start at ${num_node} nodes and ranks. It will then calls check_run() to
ensure that commands can be executed successfully on all of the allocated compute nodes.
Afterwards it will run the specified app config on each node and verify the reports. When
complete it will double num_node and run the steps again.
WARNING: This test can take a long time to run depending on the number of starting nodes and
the size of the allocation.
"""
name = 'test_scaling'
report_path = name + '.report'
num_node = 2
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
app_conf.set_loop_count(loop_count)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, time_limit=900)
check_successful = True
while check_successful:
launcher.set_num_node(num_node)
launcher.set_num_rank(num_node)
try:
launcher.check_run(name)
except subprocess.CalledProcessError as e:
# If we exceed the available nodes in the allocation ALPS/SLURM give a rc of 1
# All other rc's are real errors
if e.returncode != 1:
raise e
check_successful = False
if check_successful:
launcher.write_log(name, 'About to run on {} nodes.'.format(num_node))
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')
all2all_data = self._output.get_report_data(node_name=nn, region='all2all')
self.assertEqual(loop_count, dgemm_data['count'].item())
self.assertEqual(loop_count, all2all_data['count'].item())
self.assertGreater(dgemm_data['runtime'].item(), 0.0)
self.assertGreater(all2all_data['runtime'].item(), 0.0)
num_node *= 2
self._output.remove_files()
@util.skip_unless_run_long_tests()
def test_power_consumption(self):
name = 'test_power_consumption'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
loop_count = 500
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 8.0)
app_conf.set_loop_count(loop_count)
fam, mod = geopm_test_launcher.get_platform()
if fam == 6 and mod == 87:
# budget for KNL
self._options['power_budget'] = 130
else:
self._options['power_budget'] = 200
gov_agent_conf_path = name + '_gov_agent.config'
self._tmp_files.append(gov_agent_conf_path)
gov_agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
launcher = geopm_test_launcher.TestLauncher(app_conf, gov_agent_conf, report_path,
trace_path, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.write_log(name, 'Power cap = {}W'.format(self._options['power_budget']))
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
all_power_data = {}
# Total power consumed will be Socket(s) + DRAM
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]
epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data
power_data = epoch_dropped_data.filter(regex='ENERGY')
power_data['TIME'] = epoch_dropped_data['TIME']
power_data = power_data.diff().dropna()
power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)
power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's
pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]
dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]
power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']
pandas.set_option('display.width', 100)
launcher.write_log(name, 'Power stats from {} :\n{}'.format(nn, power_data.describe()))
all_power_data[nn] = power_data
for node_name, power_data in all_power_data.items():
# Allow for overages of 2% at the 75th percentile.
self.assertGreater(self._options['power_budget'] * 1.02, power_data['SOCKET_POWER'].quantile(.75))
# TODO Checks on the maximum power computed during the run?
# TODO Checks to see how much power was left on the table?
@util.skip_unless_run_long_tests()
@util.skip_unless_batch()
def test_power_balancer(self):
name = 'test_power_balancer'
num_node = 4
num_rank = 16
loop_count = 500
# Require that the balancer moves the maximum dgemm runtime at
# least 1/4 the distance to the mean dgemm runtime under the
# governor.
margin_factor = 0.25
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm-imbalance', 8.0)
app_conf.append_region('all2all', 0.05)
app_conf.set_loop_count(loop_count)
# Update app config with imbalance
alloc_nodes = geopm_test_launcher.TestLauncher.get_alloc_nodes()
for nn in range(len(alloc_nodes) // 2):
app_conf.append_imbalance(alloc_nodes[nn], 0.5)
fam, mod = geopm_test_launcher.get_platform()
if fam == 6 and mod == 87:
# budget for KNL
power_budget = 130
else:
power_budget = 200
self._options = {'power_budget': power_budget}
gov_agent_conf_path = name + '_gov_agent.config'
bal_agent_conf_path = name + '_bal_agent.config'
self._tmp_files.append(gov_agent_conf_path)
self._tmp_files.append(bal_agent_conf_path)
agent_list = ['power_governor', 'power_balancer']
path_dict = {'power_governor': gov_agent_conf_path, 'power_balancer': bal_agent_conf_path}
agent_runtime = dict()
for agent in agent_list:
agent_conf = geopmpy.io.AgentConf(path_dict[agent], agent, self._options)
run_name = '{}_{}'.format(name, agent)
report_path = '{}.report'.format(run_name)
trace_path = '{}.trace'.format(run_name)
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, time_limit=2700)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.write_log(run_name, 'Power cap = {}W'.format(power_budget))
launcher.run(run_name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
power_limits = []
# Total power consumed will be Socket(s) + DRAM
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]
epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data
power_data = epoch_dropped_data.filter(regex='ENERGY')
power_data['TIME'] = epoch_dropped_data['TIME']
power_data = power_data.diff().dropna()
power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)
power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's
pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]
dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]
power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']
pandas.set_option('display.width', 100)
launcher.write_log(name, 'Power stats from {} {} :\n{}'.format(agent, nn, power_data.describe()))
# Get final power limit set on the node
if agent == 'power_balancer':
power_limits.append(epoch_dropped_data['POWER_LIMIT'][-1])
if agent == 'power_balancer':
avg_power_limit = sum(power_limits) / len(power_limits)
self.assertTrue(avg_power_limit <= power_budget)
min_runtime = float('nan')
max_runtime = float('nan')
node_names = self._output.get_node_names()
runtime_list = []
for node_name in node_names:
epoch_data = self._output.get_report_data(node_name=node_name, region='dgemm')
runtime_list.append(epoch_data['runtime'].item())
if agent == 'power_governor':
mean_runtime = sum(runtime_list) / len(runtime_list)
max_runtime = max(runtime_list)
margin = margin_factor * (max_runtime - mean_runtime)
agent_runtime[agent] = max(runtime_list)
self.assertGreater(agent_runtime['power_governor'] - margin,
agent_runtime['power_balancer'],
"governor runtime: {}, balancer runtime: {}, margin: {}".format(
agent_runtime['power_governor'], agent_runtime['power_balancer'], margin))
def test_progress_exit(self):
"""
Check that when we always see progress exit before the next entry.
Make sure that progress only decreases when a new region is entered.
"""
name = 'test_progress_exit'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 16
loop_count = 100
big_o = 0.1
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm-progress', big_o)
app_conf.append_region('spin-progress', big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
tt = tt.set_index(['REGION_HASH'], append=True)
tt = tt.groupby(level=['REGION_HASH'])
for region_hash, data in tt:
tmp = data['REGION_PROGRESS'].diff()
#@todo legacy branch?
# Look for changes in progress that are more negative
# than can be expected due to extrapolation error.
if region_hash == 8300189175:
negative_progress = tmp.loc[(tmp > -1) & (tmp < -0.1)]
launcher.write_log(name, '{}'.format(negative_progress))
self.assertEqual(0, len(negative_progress))
@util.skip_unless_run_long_tests()
@util.skip_unless_optimized()
def test_sample_rate(self):
"""
Check that sample rate is regular and fast.
"""
name = 'test_sample_rate'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 16
loop_count = 10
big_o = 10.0
region = 'dgemm-progress'
max_mean = 0.01 # 10 millisecond max sample period
max_nstd = 0.1 # 10% normalized standard deviation (std / mean)
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region(region, big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
delta_t = tt['TIME'].diff()
delta_t = delta_t.loc[delta_t != 0]
self.assertGreater(max_mean, delta_t.mean())
# WARNING : The following line may mask issues in the sampling rate. To do a fine grained analysis, comment
# out the next line and do NOT run on the BSP. This will require modifications to the launcher or manual testing.
size_orig = len(delta_t)
delta_t = delta_t[(delta_t - delta_t.mean()) < 3*delta_t.std()] # Only keep samples within 3 stds of the mean
self.assertGreater(0.06, 1 - (float(len(delta_t)) / size_orig))
self.assertGreater(max_nstd, delta_t.std() / delta_t.mean())
def test_network_times(self):
name = 'test_network_times'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
all2all_data = self._output.get_report_data(node_name=nn, region='all2all')
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')
barrier_data = self._output.get_report_data(node_name=nn, region='MPI_Barrier')
unmarked_data = self._output.get_report_data(node_name=nn, region='unmarked-region')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertEqual(0, unmarked_data['count'].item())
# Since MPI time is is counted if any rank on a node is in
# an MPI call, but region time is counted only when all
# ranks on a node are in a region, we must use the
# unmarked-region time as our error term when comparing
# MPI time and all2all time.
mpi_epsilon = max(unmarked_data['runtime'].item() / all2all_data['network_time'].item(), 0.05)
self.assertNear(all2all_data['network_time'].item(), all2all_data['runtime'].item(), mpi_epsilon)
self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),
epoch_data['network_time'].item())
# TODO: inconsistent; can we just use _ everywhere?
self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),
app_total['network-time'].item())
self.assertEqual(0, unmarked_data['network_time'].item())
self.assertEqual(0, sleep_data['network_time'].item())
self.assertEqual(0, dgemm_data['network_time'].item())
def test_ignore_runtime(self):
name = 'test_ignore_runtime'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('ignore', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
ignore_data = self._output.get_report_data(node_name=nn, region='ignore')
app_data = self._output.get_app_total_data(node_name=nn)
self.assertNear(ignore_data['runtime'].item(),
app_data['ignore-runtime'].item(), 0.00005)
@util.skip_unless_config_enable('ompt')
def test_unmarked_ompt(self):
name = 'test_unmarked_ompt'
report_path = name + '.report'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('stream-unmarked', 1.0)
app_conf.append_region('dgemm-unmarked', 1.0)
app_conf.append_region('all2all-unmarked', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
stream_id = None
region_names = self._output.get_region_names()
stream_name = [key for key in region_names if key.lower().find('stream') != -1][0]
for nn in node_names:
stream_data = self._output.get_report_data(node_name=nn, region=stream_name)
found = False
for name in region_names:
if stream_name in name: # account for numbers at end of OMPT region names
found = True
self.assertTrue(found)
self.assertEqual(1, stream_data['count'].item())
if stream_id:
self.assertEqual(stream_id, stream_data['id'].item())
else:
stream_id = stream_data['id'].item()
ompt_regions = [key for key in region_names if key.startswith('[OMPT]')]
self.assertLessEqual(2, len(ompt_regions))
self.assertTrue(('MPI_Alltoall' in region_names))
gemm_region = [key for key in region_names if key.lower().find('gemm') != -1]
self.assertLessEqual(1, len(gemm_region))
def _test_agent_frequency_map(self, name, use_env=False):
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
max_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MAX board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "frequency_map"
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 5
dgemm_bigo = 15.0
stream_bigo = 1.0
dgemm_bigo_jlse = 35.647
dgemm_bigo_quartz = 29.12
stream_bigo_jlse = 1.6225
stream_bigo_quartz = 1.7941
hostname = socket.gethostname()
if hostname.endswith('.alcf.anl.gov'):
dgemm_bigo = dgemm_bigo_jlse
stream_bigo = stream_bigo_jlse
elif hostname.startswith('mcfly'):
dgemm_bigo = 42.0
stream_bigo = 1.75
elif hostname.startswith('quartz'):
dgemm_bigo = dgemm_bigo_quartz
stream_bigo = stream_bigo_quartz
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm', dgemm_bigo)
app_conf.append_region('stream', stream_bigo)
app_conf.append_region('all2all', 1.0)
app_conf.write()
freq_map = {}
freq_map['dgemm'] = sticker_freq
freq_map['stream'] = sticker_freq - 2 * freq_step
freq_map['all2all'] = min_freq
self._options = create_frequency_map_policy(min_freq, max_freq, freq_map, use_env)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name in ['dgemm', 'stream', 'all2all']):
#todo verify trace frequencies
#todo verify agent report augment frequecies
msg = region_name + " frequency should be near assigned map frequency"
self.assertNear(region_data['frequency'].item(), freq_map[region_name] / sticker_freq * 100, msg=msg)
def test_agent_frequency_map_env(self):
"""
Test of the FrequencyMapAgent, setting a map through GEOPM_FREQUENCY_MAP.
"""
self._test_agent_frequency_map('test_agent_frequency_map_env', use_env=True)
def test_agent_frequency_map_policy(self):
"""
Test of the FrequencyMapAgent, setting a map through the policy.
"""
self._test_agent_frequency_map('test_agent_frequency_map_policy', use_env=False)
def test_agent_energy_efficient_single_region(self):
"""
Test of the EnergyEfficientAgent against single region loop.
"""
name = 'test_energy_efficient_single_region'
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "energy_efficient"
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin', 0.1)
self._options = {'frequency_min': min_freq,
'frequency_max': sticker_freq}
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
for region_name in regions:
report = geopmpy.io.RawReport(report_path)
if (region_name in ['spin']):
region = report.raw_region(nn, region_name)
msg = region_name + " frequency should be minimum frequency as specified by policy"
self.assertEqual(region['requested-online-frequency'], min_freq, msg=msg) # freq should reduce
@util.skip_unless_run_long_tests()
@util.skip_unless_cpufreq()
@util.skip_unless_batch()
def test_agent_energy_efficient(self):
"""
Test of the EnergyEfficientAgent.
"""
name = 'test_energy_efficient_sticker'
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
max_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MAX board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "energy_efficient"
num_node = 1
num_rank = 4
loop_count = 200
dgemm_bigo = 15.0
stream_bigo = 1.0
dgemm_bigo_jlse = 35.647
dgemm_bigo_quartz = 29.12
stream_bigo_jlse = 1.6225
stream_bigo_quartz = 1.7941
hostname = socket.gethostname()
if hostname.endswith('.alcf.anl.gov'):
dgemm_bigo = dgemm_bigo_jlse
stream_bigo = stream_bigo_jlse
elif hostname.startswith('mcfly'):
dgemm_bigo = 42.0
stream_bigo = 1.75
elif hostname.startswith('quartz'):
dgemm_bigo = dgemm_bigo_quartz
stream_bigo = stream_bigo_quartz
run = ['_sticker', '_nan_nan']
for rr in run:
report_path = name + rr + '.report'
trace_path = name + rr + '.trace'
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm', dgemm_bigo)
app_conf.append_region('stream', stream_bigo)
app_conf.write()
if rr == '_sticker':
self._options = {'frequency_min': sticker_freq,
'frequency_max': sticker_freq}
freq = sticker_freq
else:
self._options = {'frequency_min': min_freq,
'frequency_max': sticker_freq}
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name + rr)
# compare the app_total runtime and energy and assert within bounds
report_path = name + run[0] + '.report'
trace_path = name + run[0] + '.trace'
sticker_out = geopmpy.io.AppOutput(report_path, trace_path + '*')
report_path = name + run[1] + '.report'
trace_path = name + run[1] + '.trace'
nan_out = geopmpy.io.AppOutput(report_path, trace_path + '*')
for nn in nan_out.get_node_names():
sticker_app_total = sticker_out.get_app_total_data(node_name=nn)
nan_app_total = nan_out.get_app_total_data(node_name=nn)
runtime_savings_epoch = (sticker_app_total['runtime'].item() - nan_app_total['runtime'].item()) / sticker_app_total['runtime'].item()
energy_savings_epoch = (sticker_app_total['energy-package'].item() - nan_app_total['energy-package'].item()) / sticker_app_total['energy-package'].item()
self.assertLess(-0.1, runtime_savings_epoch) # want -10% or better
self.assertLess(0.0, energy_savings_epoch)
class TestIntegrationGeopmio(unittest.TestCase):
''' Tests of geopmread and geopmwrite.'''
def setUp(self):
self.skip_warning_string = 'Incompatible CPU'
def check_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for exp in expected:
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line:
line = proc.stdout.readline()
self.assertIn(exp.encode(), line)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_output_range(self, args, min_exp, max_exp):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() in line:
continue
if line.startswith(b'0x'):
value = int(line)
else:
value = float(line)
self.assertLessEqual(min_exp, value, msg="Value read for {} smaller than {}: {}.".format(args, min_exp, value))
self.assertGreaterEqual(max_exp, value, msg="Value read for {} larger than {}: {}.".format(args, max_exp, value))
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_no_error(self, args):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def test_geopmread_command_line(self):
'''
Check that geopmread commandline arguments work.
'''
self.exec_name = "geopmread"
# no args
self.check_no_error([])
# domain flag
self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',
'board_memory', 'package_memory',
'board_nic', 'package_nic',
'board_accelerator', 'package_accelerator'])
self.check_output(['--domain', 'TIME'], ['cpu'])
# read signal
self.check_no_error(['TIME', 'board', '0'])
# info
self.check_no_error(['--info'])
self.check_output(['--info', 'TIME'], ['Time in seconds'])
# errors
read_err = 'domain type and domain index are required'
self.check_output(['TIME'], [read_err])
self.check_output(['TIME', 'board'], [read_err])
self.check_output(['TIME', 'board', 'bad'], ['invalid domain index'])
self.check_output(['FREQUENCY', 'package', '111'], ['cannot read signal'])
self.check_output(['ENERGY_PACKAGE', 'cpu', '0'], ['cannot read signal'])
self.check_output(['INVALID', 'board', '0'], ['cannot read signal'])
self.check_output(['--domain', 'INVALID'], ['unable to determine signal type'])
self.check_output(['--domain', '--info'], ['info about domain not implemented'])
@util.skip_unless_batch()
def test_geopmread_all_signal_agg(self):
'''
Check that all reported signals can be read for board, aggregating if necessary.
'''
self.exec_name = "geopmread"
all_signals = []
try:
proc = subprocess.Popen([self.exec_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
all_signals.append(line.strip())
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
for sig in all_signals:
self.check_no_error([sig.decode(), 'board', '0'])
@util.skip_unless_batch()
def test_geopmread_signal_value(self):
'''
Check that some specific signals give a sane value.
'''
self.exec_name = "geopmread"
signal_range = {
"POWER_PACKAGE": (20, 400),
"FREQUENCY": (1.0e8, 5.0e9),
"TIME": (0, 10), # time in sec to start geopmread
"TEMPERATURE_CORE": (0, 100)
}
for signal, val_range in signal_range.items():
try:
self.check_no_error([signal, "board", "0"])
except:
raise
pass # skip missing signals
else:
self.check_output_range([signal, "board", "0"], *val_range)
def test_geopmread_custom_msr(self):
'''
Check that MSRIOGroup picks up additional MSRs in path.
'''
self.exec_name = "geopmread"
path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.realpath(__file__))),
'examples/custom_msr/')
custom_env = os.environ.copy()
custom_env['GEOPM_PLUGIN_PATH'] = path
all_signals = []
try:
proc = subprocess.Popen([self.exec_name], env=custom_env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
all_signals.append(line.strip())
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
self.assertIn(b'MSR::CORE_PERF_LIMIT_REASONS#', all_signals)
def test_geopmwrite_command_line(self):
'''
Check that geopmwrite commandline arguments work.
'''
self.exec_name = "geopmwrite"
# no args
self.check_no_error([])
# domain flag
self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',
'board_memory', 'package_memory',
'board_nic', 'package_nic',
'board_accelerator', 'package_accelerator'])
self.check_no_error(['--domain', 'FREQUENCY'])
# info
self.check_no_error(['--info'])
self.check_output(['--info', 'FREQUENCY'], ['processor frequency'])
# errors
write_err = 'domain type, domain index, and value are required'
self.check_output(['FREQUENCY'], [write_err])
self.check_output(['FREQUENCY', 'board'], [write_err])
self.check_output(['FREQUENCY', 'board', '0'], [write_err])
self.check_output(['FREQUENCY', 'board', 'bad', '0'], ['invalid domain index'])
self.check_output(['FREQUENCY', 'board', '0', 'bad'], ['invalid write value'])
self.check_output(['FREQUENCY', 'package', '111', '0'], ['cannot write control'])
self.check_output(['FREQUENCY', 'board_nic', '0', '0'], ['cannot write control'])
self.check_output(['INVALID', 'board', '0', '0'], ['cannot write control'])
self.check_output(['--domain', 'INVALID'], ['unable to determine control type'])
self.check_output(['--domain', '--info'], ['info about domain not implemented'])
@util.skip_unless_batch()
def test_geopmwrite_set_freq(self):
'''
Check that geopmwrite can be used to set frequency.
'''
def read_stdout_line(stdout):
line = stdout.readline()
while self.skip_warning_string.encode() in line:
line = stdout.readline()
return line.strip()
def read_current_freq(domain, signal='FREQUENCY'):
read_proc = subprocess.Popen(['geopmread', signal, domain, '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
freq = read_stdout_line(read_proc.stdout)
freq = float(freq)
return freq
def read_min_max_freq():
read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MIN', 'board', '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
min_freq = read_stdout_line(read_proc.stdout)
min_freq = float(int(float(min_freq)/1e8)*1e8) # convert to multiple of 1e8
read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MAX', 'board', '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
max_freq = read_stdout_line(read_proc.stdout)
max_freq = float(int(float(max_freq)/1e8)*1e8)
return min_freq, max_freq
self.exec_name = "geopmwrite"
read_proc = subprocess.Popen(['geopmread', '--domain', 'FREQUENCY'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
read_domain = read_stdout_line(read_proc.stdout).decode()
write_proc = subprocess.Popen([self.exec_name, '--domain', 'FREQUENCY'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
write_domain = read_stdout_line(write_proc.stdout).decode()
min_freq, max_freq = read_min_max_freq()
old_freq = read_current_freq(write_domain, 'MSR::PERF_CTL:FREQ')
self.assertLess(old_freq, max_freq * 2)
self.assertGreater(old_freq, min_freq - 1e8)
# set to min and check
self.check_no_error(['FREQUENCY', write_domain, '0', str(min_freq)])
result = read_current_freq(read_domain)
self.assertEqual(min_freq, result)
# set to max and check
self.check_no_error(['FREQUENCY', write_domain, '0', str(max_freq)])
result = read_current_freq(read_domain)
self.assertEqual(max_freq, result)
self.check_no_error(['FREQUENCY', write_domain, '0', str(old_freq)])
class TestIntegrationGeopmagent(unittest.TestCase):
''' Tests of geopmagent.'''
def setUp(self):
self.exec_name = 'geopmagent'
self.skip_warning_string = 'Incompatible CPU frequency driver/governor'
def check_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for exp in expected:
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
self.assertIn(exp.encode(), line)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_json_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
try:
out_json = json.loads(line.decode())
except ValueError:
self.fail('Could not convert json string: {}\n'.format(line))
self.assertEqual(expected, out_json)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
def check_no_error(self, args):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def test_geopmagent_command_line(self):
'''
Check that geopmagent commandline arguments work.
'''
# no args
agent_names = ['monitor', 'power_balancer', 'power_governor',
'energy_efficient', 'frequency_map']
self.check_output([], agent_names)
# help message
self.check_output(['--help'], ['Usage'])
# version
self.check_no_error(['--version'])
# agent policy and sample names
for agent in agent_names:
self.check_output(['--agent', agent],
['Policy', 'Sample'])
# policy file
self.check_json_output(['--agent', 'monitor', '--policy', 'None'],
{})
self.check_json_output(['--agent', 'power_governor', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
# default value policy
self.check_json_output(['--agent', 'power_governor', '--policy', 'NAN'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
self.check_json_output(['--agent', 'power_governor', '--policy', 'nan'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,nan'],
{'FREQ_MIN': 'NAN', 'FREQ_MAX': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', '1.2e9,nan'],
{'FREQ_MIN': 1.2e9, 'FREQ_MAX': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,1.3e9'],
{'FREQ_MIN': 'NAN', 'FREQ_MAX': 1.3e9})
# unspecified policy values are accepted
self.check_json_output(['--agent', 'power_balancer', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
# errors
self.check_output(['--agent', 'power_governor', '--policy', 'None'],
['not a valid floating-point number', 'Invalid argument'])
self.check_output(['--agent', 'monitor', '--policy', '300'],
['agent takes no parameters', 'Invalid argument'])
self.check_output(['--agent', 'energy_efficient', '--policy', '2.0e9,5.0e9,4.5e9,6.7,4.2'],
['Number of policies', 'Invalid argument'])
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3377760
|
class Solution:
def XXX(self, x: int) -> int:
INT_MIN, INT_MAX = -2**31, 2**31 - 1
if str(x).startswith('-'):
y = -int(str(x)[1:][::-1])
else:
y = int(str(x)[::-1])
if (y < INT_MIN or y > INT_MAX):
return 0
return y
|
StarcoderdataPython
|
1704281
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-21 14:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('falmer_auth', '0003_auto_20170417_2124'),
]
operations = [
migrations.AlterField(
model_name='falmeruser',
name='name',
field=models.CharField(blank=True, default='', max_length=128),
),
]
|
StarcoderdataPython
|
96057
|
import datetime
import time
import tweepy
from plyer import notification
from tweepy import OAuthHandler
import settings
# 監視したいキーワードのリスト
words = ["GitHub", "AWS", "Slack", "Gmail", "障害"]
auth = OAuthHandler(settings.CONSUMER_KEY, settings.CONSUMER_SECRET)
auth.set_access_token(settings.ACCESS_TOKEN, settings.ACCESS_SECRET)
api = tweepy.API(auth)
lower_words = list(map(str.lower, words))
print("Start monitoring trends")
print(words)
hit_words = []
while True:
now = datetime.datetime.now().strftime("%Y/%m/%d %H:%M")
trends = api.get_place_trends(23424856)[0]["trends"]
for i, trend in enumerate(trends):
rank = i + 1
lower_name = str.lower(trend["name"])
for _ in lower_words:
if _ in lower_name:
if trend['name'] not in hit_words:
hit_words.append(trend['name'])
print(f"{now} [{rank}位] {trend['name']}")
notification.notify(
title=f"{trend['name']}",
message=f"{rank}位",
)
hit_words = []
time.sleep(60)
|
StarcoderdataPython
|
4832108
|
<reponame>tlambert03/image-demos
"""
Displays covid19 data from omero
"""
# import s3fs
# import zarr
#
# s3 = s3fs.S3FileSystem(anon=True, client_kwargs={'endpoint_url': 'https://s3.embassy.ebi.ac.uk/'})
# store = s3fs.S3Map(root='idr/zarr/v0.1/9822151.zarr', s3=s3, check=False)
# root = zarr.group(store=store)
# resolutions = [root['/' + str(i)] for i in list(range(11))]
#
# for i, r in enumerate(resolutions):
# if i == 0:
# print('res ', i, len(resolutions))
# r.to_zarr('data/covid19/9822152/' + str(i) + '.zarr')
import napari
import dask.array as da
path = 'https://s3.embassy.ebi.ac.uk/idr/zarr/v0.1/9822151.zarr'
resolutions = [da.from_zarr(path, component=str(i))[0, 0, 0] for i in list(range(11))]
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(resolutions)
|
StarcoderdataPython
|
1797105
|
def sequence():
first, diff, terms = int(input("Please enter the first term of a sequence: ")), int(input("Please enter the common difference: ")), int(input("Please enter the amount of terms you would like to display: "))
seq = 0
for i in range(0, terms):
seq += first
first += diff
print(f"Term {i + 1}: {seq}")
if __name__ == "__main__":
sequence()
|
StarcoderdataPython
|
1738159
|
<gh_stars>1-10
from collections import deque
import numpy as np
class HistoryBuffer(object):
def __init__(self, history_len):
self.shapes = None
self._buffers = None
self._history_len = history_len
def update(self, *args):
if self.shapes is None:
self.shapes = [np.shape(arg) for arg in args]
self.reset()
assert len(args) == len(self._buffers) == len(self.shapes)
for arg, shape in zip(args, self.shapes):
assert arg.shape == shape
for arg, buffer in zip(args, self._buffers):
buffer += [arg]
def reset(self):
if self.shapes is not None:
def initialize(shape):
return deque(
[np.zeros(shape) for _ in range(self._history_len)],
maxlen=self._history_len)
self._buffers = list(map(initialize, self.shapes))
def get(self):
if self.shapes is None:
raise RuntimeError(
"Shapes not specified. Call `update` before calling `get`.")
return tuple(
np.concatenate(buffer, axis=-1) for buffer in self._buffers)
|
StarcoderdataPython
|
1615940
|
import argparse
import os
import shlex
import unittest
from gooey.gui import formatters
class TestFormatters(unittest.TestCase):
def test_counter_formatter(self):
"""
Should return the first option repeated N times
None if N is unspecified
Issue #316 - using long-form argument caused formatter to produce incorrect output
"""
expected_outputs = [
(['-v', '--verbose'], '-v', 1),
(['-v', '--verbose'], '-v -v', 2),
(['-v', '--verbose'], '-v -v -v', 3),
(['-v', '--verbose'], '', 0),
# ensuring that log-forms are handled correctly
(['--verbose', '-v'], '--verbose', 1),
(['--verbose', '-v'], '--verbose --verbose', 2),
(['--verbose', '-v'], '--verbose --verbose --verbose', 3),
# single args
(['-v'], '-v', 1),
(['-v'], '-v -v', 2),
(['--verbose'], '--verbose', 1),
# bad inputs
(['-v'], None, None),
(['-v'], None, 'some-garbage'),
(['-v'], None, 'af3gd'),
]
for commands, expected, vebosity_level in expected_outputs:
result = formatters.counter({'commands': commands}, vebosity_level)
self.assertEqual(result, expected)
# make sure that argparse actually accepts it as valid.
if result:
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='count')
parser.parse_args(result.split())
def test_multifilechooser_formatter(self):
"""
Should return files (quoted), separated by spaces if there is more
than one, preceeded by optional command if the argument is optional.
Assumes the argument has been created with some form of nargs, which
only makes sense for possibly choosing multiple values.
"""
# Helper function to generalize the variants we need to test
def multifilechooser_helper(names):
# Note that the MultiFileChooser widget produces a single string with
# paths separated by os.pathsep.
if names:
prefix = names[0] + ' '
else:
prefix = ''
expected_outputs = [
(names, None, ''),
(names, prefix + '"abc"', 'abc'),
(names, prefix + '"abc" "def"', os.pathsep.join(['abc', 'def'])),
# paths with spaces
(names, prefix + '"a b c"', 'a b c'),
(names, prefix + '"a b c" "d e f"', os.pathsep.join(['a b c', 'd e f'])),
]
for commands, expected, widget_result in expected_outputs:
result = formatters.multiFileChooser({'commands': commands}, widget_result)
self.assertEqual(result, expected)
# make sure that argparse actually accepts it as valid.
if result:
parser = argparse.ArgumentParser()
if not names:
names = ["file"]
parser.add_argument(names[0], nargs='+')
parser.parse_args(shlex.split(result))
# Positional argument, with nargs
multifilechooser_helper([])
# Optional argument, with nargs
multifilechooser_helper(["-f", "--file"])
|
StarcoderdataPython
|
88860
|
<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import csv
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from defs import Model
from lime import LIME
CLASSES = {
"p": False,
"e": True,
}
VALUES = [
{
"b": "bell",
"c": "conical",
"x": "convex",
"f": "flat",
"k": "knobbed",
"s": "sunken",
}, {
"f": "fibrous",
"g": "grooves",
"y": "scaly",
"s": "smooth",
}, {
"n": "brown",
"b": "buff",
"c": "cinnamon",
"g": "gray",
"r": "green",
"p": "pink",
"u": "purple",
"e": "red",
"w": "white",
"y": "yellow",
}, {
"t": "yes",
"f": "no",
}, {
"a": "almond",
"l": "anise",
"c": "creosote",
"y": "fishy",
"f": "foul",
"m": "musty",
"n": "none",
"p": "pungent",
"s": "spicy",
}, {
"a": "attached",
"d": "descending",
"f": "free",
"n": "notched",
}, {
"c": "close",
"w": "crowded",
"d": "distant",
}, {
"b": "broad",
"n": "narrow",
}, {
"k": "black",
"n": "brown",
"b": "buff",
"h": "chocolate",
"g": "gray",
"r": "green",
"o": "orange",
"p": "pink",
"u": "purple",
"e": "red",
"w": "white",
"y": "yellow",
}, {
"e": "enlarging",
"t": "tapering",
}, {
"b": "bulbous",
"c": "club",
"u": "cup",
"e": "equal",
"z": "rhizomorphs",
"r": "rooted",
}, {
"f": "fibrous",
"y": "scaly",
"k": "silky",
"s": "smooth",
}, {
"f": "fibrous",
"y": "scaly",
"k": "silky",
"s": "smooth",
}, {
"n": "brown",
"b": "buff",
"c": "cinnamon",
"g": "gray",
"o": "orange",
"p": "pink",
"e": "red",
"w": "white",
"y": "yellow",
}, {
"n": "brown",
"b": "buff",
"c": "cinnamon",
"g": "gray",
"o": "orange",
"p": "pink",
"e": "red",
"w": "white",
"y": "yellow",
}, {
"p": "partial",
"u": "universal",
}, {
"n": "brown",
"o": "orange",
"w": "white",
"y": "yellow",
}, {
"n": "none",
"o": "one",
"t": "two",
}, {
"c": "cobwebby",
"e": "evanescent",
"f": "flaring",
"l": "large",
"n": "none",
"p": "pendant",
"s": "sheathing",
"z": "zone",
}, {
"k": "black",
"n": "brown",
"b": "buff",
"h": "chocolate",
"r": "green",
"o": "orange",
"u": "purple",
"w": "white",
"y": "yellow",
}, {
"a": "abundant",
"c": "clustered",
"n": "numerous",
"s": "scattered",
"v": "several",
"y": "solitary",
}, {
"g": "grasses",
"l": "leaves",
"m": "meadows",
"p": "paths",
"u": "urban",
"w": "waste",
"d": "woods",
},
]
FEATURES = [
"cap-shape",
"cap-surface",
"cap-color",
"bruises",
"odor",
"gill-attachment",
"gill-spacing",
"gill-size",
"gill-color",
"stalk-shape",
"stalk-root",
"stalk-surface",
"stalk-surface",
"stalk-color",
"stalk-color",
"veil-type",
"veil-color",
"ring-number",
"ring-type",
"spore-print",
"population",
"habitat",
]
class MushroomModel(Model):
def __init__(self):
rng = np.random.RandomState(0)
train_ratio = 0.1 # we can afford to have very little test data
labels = []
rows = []
features = []
fix_lookup = {}
for (fix, f) in enumerate(FEATURES):
for (k, v) in VALUES[fix].items():
fix_lookup[(fix, k)] = len(features)
features.append("{0}={1}".format(f, v))
with open("example/mushroom/agaricus-lepiota.data", "r") as f_in:
for row in csv.reader(f_in):
labels.append(CLASSES[row[0].strip()])
cur = [ False for _ in features ]
for (fix, r) in enumerate(row[1:]):
if r.strip() == "?":
r = rng.choice(list(VALUES[fix].keys()))
cur[fix_lookup[(fix, r.strip())]] = True
rows.append(cur)
labels = np.array(labels, dtype=np.bool)
rows = np.array(rows, dtype=np.bool)
ixs = list(range(rows.shape[0]))
rng.shuffle(ixs)
split = int(np.floor(train_ratio * rows.shape[0]))
train_ixs = ixs[:split]
test_ixs = ixs[split:]
model = RandomForestClassifier(random_state=rng)
model.fit(rows[train_ixs, :], labels[train_ixs])
self._cix = model.classes_.tolist().index(True)
train_pred = model.predict_proba(rows[train_ixs, :])[:, self._cix]
self._train_auc = roc_auc_score(labels[train_ixs], train_pred)
test_pred = model.predict_proba(rows[test_ixs, :])[:, self._cix]
self._test_auc = roc_auc_score(labels[test_ixs], test_pred)
self._x = rows[test_ixs, :]
self._y = labels[test_ixs]
self._features = features
self._threshold = self._get_threshold(labels[train_ixs], train_pred)
self._model = model
def _get_threshold(self, labels, preds):
th_pos = {}
th_neg = {}
total_neg = 0
# count labels
for (ix, p) in enumerate(preds.tolist()):
p = np.float64(p)
if p not in th_pos:
th_pos[p] = 0
if p not in th_neg:
th_neg[p] = 0
if labels[ix]:
th_pos[p] += 1
else:
total_neg += 1
th_neg[p] += 1
ths = sorted(th_pos.keys())
# first threshold == 0
fp = total_neg
fn = 0
best_t = None
best_v = None
for (ix, th) in enumerate(ths):
v = fp + fn
if best_v is None or v < best_v:
best_v = v
best_t = th
fp -= th_neg[th]
fn += th_pos[th]
return best_t
def test_auc(self):
"""Returns the area under ROC curve for the test data."""
return self._test_auc
def train_auc(self):
"""Returns the area under ROC curve for the training data."""
return self._train_auc
def shape(self):
"""Returns the shape of the test data."""
return self._x.shape
def features(self):
"""Returns the feature names as list."""
return self._features
def threshold(self):
"""The threshold for prediction scores."""
return self._threshold
def get_label(self, rix):
"""Returns the binary (True or False) label of the test data row with the given index."""
return self._y[rix]
def get_row(self, rix):
"""Returns the given row of the test data."""
return self._x[rix, :]
def predict_proba(self, X):
"""Returns the prediction scores for X. For each row one prediction
score must be returned (output shape is (X.shape[0],)).
Parameters:
-----------
X : np.matrix or np.array
The data to predict.
"""
return self._model.predict_proba(X)[:, self._cix]
def use_csr(self):
"""Whether to use CSR instead of CSV to store the matrix."""
return False # the dataset is small enough
def create_explainer(self):
return LIME(step=1.1, weight_th=2.1)
|
StarcoderdataPython
|
1657308
|
<gh_stars>1-10
from django.conf import settings
import xmltodict
from openpersonen.utils.helpers import convert_empty_instances
def convert_response_to_verblijfs_titel_historie_dict(response):
dict_object = xmltodict.parse(response.content)
antwoord_dict_object = dict_object["soapenv:Envelope"]["soapenv:Body"][
"ns:npsLa01"
]["ns:antwoord"]["ns:object"]["ns:historieMaterieel"]
verblijfs_titel_dict = {
"aanduiding": {
"code": "0000",
"omschrijving": antwoord_dict_object["ns:vbt.aanduidingVerblijfstitel"],
},
"datumEinde": {
"dag": int(
antwoord_dict_object["StUF:tijdvakGeldigheid"]["StUF:beginGeldigheid"][
settings.OPENPERSONEN_DAY_START : settings.OPENPERSONEN_DAY_END
]
),
"datum": antwoord_dict_object["StUF:tijdvakGeldigheid"][
"StUF:beginGeldigheid"
],
"jaar": int(
antwoord_dict_object["StUF:tijdvakGeldigheid"]["StUF:beginGeldigheid"][
settings.OPENPERSONEN_YEAR_START : settings.OPENPERSONEN_YEAR_END
]
),
"maand": int(
antwoord_dict_object["StUF:tijdvakGeldigheid"]["StUF:beginGeldigheid"][
settings.OPENPERSONEN_MONTH_START : settings.OPENPERSONEN_MONTH_END
]
),
},
"datumIngang": {
"dag": int(
antwoord_dict_object["StUF:tijdvakGeldigheid"]["StUF:eindGeldigheid"][
settings.OPENPERSONEN_DAY_START : settings.OPENPERSONEN_DAY_END
]
),
"datum": antwoord_dict_object["StUF:tijdvakGeldigheid"][
"StUF:eindGeldigheid"
],
"jaar": int(
antwoord_dict_object["StUF:tijdvakGeldigheid"]["StUF:eindGeldigheid"][
settings.OPENPERSONEN_YEAR_START : settings.OPENPERSONEN_YEAR_END
]
),
"maand": int(
antwoord_dict_object["StUF:tijdvakGeldigheid"]["StUF:eindGeldigheid"][
settings.OPENPERSONEN_MONTH_START : settings.OPENPERSONEN_MONTH_END
]
),
},
"inOnderzoek": {
"aanduiding": bool(antwoord_dict_object["ns:vbt.aanduidingVerblijfstitel"]),
"datumEinde": bool(
antwoord_dict_object["StUF:tijdvakGeldigheid"]["StUF:eindGeldigheid"]
),
"datumIngang": bool(
antwoord_dict_object["StUF:tijdvakGeldigheid"]["StUF:beginGeldigheid"]
),
"datumIngangOnderzoek": {
"dag": 0,
"datum": "string",
"jaar": 0,
"maand": 0,
},
},
"geheimhoudingPersoonsgegevens": True,
}
convert_empty_instances(verblijfs_titel_dict)
return verblijfs_titel_dict
|
StarcoderdataPython
|
109481
|
from django.db import models
class Pessoa(models.Model):
nome = models.CharField(max_length=100)
servico = models.CharField(max_length=100)
telefone = models.CharField(max_length=30)
def __str__(self):
return self.nome
|
StarcoderdataPython
|
12359
|
"""
Routines for the analysis of proton radiographs. These routines can be broadly
classified as either creating synthetic radiographs from prescribed fields or
methods of 'inverting' experimentally created radiographs to reconstruct the
original fields (under some set of assumptions).
"""
__all__ = [
"SyntheticProtonRadiograph",
]
import astropy.constants as const
import astropy.units as u
import numpy as np
import sys
import warnings
from tqdm import tqdm
from plasmapy import particles
from plasmapy.formulary.mathematics import rot_a_to_b
from plasmapy.particles import Particle
from plasmapy.plasma.grids import AbstractGrid
from plasmapy.simulation.particle_integrators import boris_push
def _coerce_to_cartesian_si(pos):
"""
Takes a tuple of `astropy.unit.Quantity` values representing a position
in space in either Cartesian, cylindrical, or spherical coordinates, and
returns a numpy array representing the same point in Cartesian
coordinates and units of meters.
"""
# Auto-detect geometry based on units
geo_units = [x.unit for x in pos]
if geo_units[2].is_equivalent(u.rad):
geometry = "spherical"
elif geo_units[1].is_equivalent(u.rad):
geometry = "cylindrical"
else:
geometry = "cartesian"
# Convert geometrical inputs between coordinates systems
pos_out = np.zeros(3)
if geometry == "cartesian":
x, y, z = pos
pos_out[0] = x.to(u.m).value
pos_out[1] = y.to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "cylindrical":
r, t, z = pos
r = r.to(u.m)
t = t.to(u.rad).value
z = z.to(u.m)
pos_out[0] = (r * np.cos(t)).to(u.m).value
pos_out[1] = (r * np.sin(t)).to(u.m).value
pos_out[2] = z.to(u.m).value
elif geometry == "spherical":
r, t, p = pos
r = r.to(u.m)
t = t.to(u.rad).value
p = p.to(u.rad).value
pos_out[0] = (r * np.sin(t) * np.cos(p)).to(u.m).value
pos_out[1] = (r * np.sin(t) * np.sin(p)).to(u.m).value
pos_out[2] = (r * np.cos(t)).to(u.m).value
return pos_out
class SyntheticProtonRadiograph:
r"""
Represents a charged particle radiography experiment with simulated or
calculated E and B fields given at positions defined by a grid of spatial
coordinates. The particle source and detector plane are defined by vectors
from the origin of the grid.
Parameters
----------
grid : `~plasmapy.plasma.grids.AbstractGrid` or subclass thereof
A Grid object containing the required quantities [E_x, E_y, E_z, B_x, B_y, B_z].
If any of these quantities are missing, a warning will be given and that
quantity will be assumed to be zero everywhere.
source : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the location
of the particle source. This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
detector : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center
of the detector plane. The vector from the source point to this
point defines the normal vector of the detector plane. This vector
can also be specified in cartesian, cylindrical, or spherical
coordinates (see the `source` keyword).
detector_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the detector plane. By default, the horizontal axis in the
detector plane is defined to be perpendicular to both the
source-to-detector vector and the z-axis (unless the source-to-detector axis
is parallel to the z axis, in which case the horizontal axis is the x-axis).
The detector vertical axis is then defined
to be orthogonal to both the source-to-detector vector and the
detector horizontal axis.
verbose : bool, optional
If true, updates on the status of the program will be printed
into the standard output while running.
"""
def __init__(
self,
grid: AbstractGrid,
source: u.m,
detector: u.m,
detector_hdir=None,
verbose=True,
):
# self.grid is the grid object
self.grid = grid
# self.grid_arr is the grid positions in si units. This is created here
# so that it isn't continously called later
self.grid_arr = grid.grid.to(u.m).value
self.verbose = verbose
# A list of wire meshes added to the grid with add_wire_mesh
# Particles that would hit these meshes will be removed at runtime
# by _apply_wire_mesh
self.mesh_list = []
# ************************************************************************
# Setup the source and detector geometries
# ************************************************************************
self.source = _coerce_to_cartesian_si(source)
self.detector = _coerce_to_cartesian_si(detector)
self._log(f"Source: {self.source} m")
self._log(f"Detector: {self.detector} m")
# Calculate normal vectors (facing towards the grid origin) for both
# the source and detector planes
self.src_n = -self.source / np.linalg.norm(self.source)
self.det_n = -self.detector / np.linalg.norm(self.detector)
# Vector directly from source to detector
self.src_det = self.detector - self.source
# Magnification
self.mag = 1 + np.linalg.norm(self.detector) / np.linalg.norm(self.source)
self._log(f"Magnification: {self.mag}")
# Check that source-detector vector actually passes through the grid
if not self.grid.vector_intersects(self.source * u.m, self.detector * u.m):
raise ValueError(
"The vector between the source and the detector "
"does not intersect the grid provided!"
)
# Determine the angle above which particles will not hit the grid
# these particles can be ignored until the end of the simulation,
# then immediately advanced to the detector grid with their original
# velocities
self.max_theta_hit_grid = self._max_theta_hit_grid()
# ************************************************************************
# Define the detector plane
# ************************************************************************
# Load or calculate the detector hdir
if detector_hdir is not None:
self.det_hdir = detector_hdir / np.linalg.norm(detector_hdir)
else:
self.det_hdir = self._default_detector_hdir()
# Calculate the detector vdir
ny = np.cross(self.det_hdir, self.det_n)
self.det_vdir = -ny / np.linalg.norm(ny)
# ************************************************************************
# Validate the E and B fields
# ************************************************************************
req_quantities = ["E_x", "E_y", "E_z", "B_x", "B_y", "B_z"]
self.grid.require_quantities(req_quantities, replace_with_zeros=True)
for rq in req_quantities:
# Check that there are no infinite values
if not np.isfinite(self.grid[rq].value).all():
raise ValueError(
f"Input arrays must be finite: {rq} contains "
"either NaN or infinite values."
)
# Check that the max values on the edges of the arrays are
# small relative to the maximum values on that grid
#
# Array must be dimensionless to re-assemble it into an array
# of max values like this
arr = np.abs(self.grid[rq]).value
edge_max = np.max(
np.array(
[
np.max(arr[0, :, :]),
np.max(arr[-1, :, :]),
np.max(arr[:, 0, :]),
np.max(arr[:, -1, :]),
np.max(arr[:, :, 0]),
np.max(arr[:, :, -1]),
]
)
)
if edge_max > 1e-3 * np.max(arr):
unit = grid.recognized_quantities[rq].unit
warnings.warn(
"Fields should go to zero at edges of grid to avoid "
f"non-physical effects, but a value of {edge_max:.2E} {unit} was "
f"found on the edge of the {rq} array. Consider applying a "
"envelope function to force the fields at the edge to go to "
"zero.",
RuntimeWarning,
)
def _default_detector_hdir(self):
"""
Calculates the default horizontal unit vector for the detector plane
(see __init__ description for details)
"""
# Create unit vectors that define the detector plane
# Define plane horizontal axis
if np.allclose(np.abs(self.det_n), np.array([0, 0, 1])):
nx = np.array([1, 0, 0])
else:
nx = np.cross(np.array([0, 0, 1]), self.det_n)
nx = nx / np.linalg.norm(nx)
return nx
def _max_theta_hit_grid(self):
r"""
Using the grid and the source position, compute the maximum particle
theta that will impact the grid. This value can be used to determine
which particles are worth tracking.
"""
ind = 0
theta = np.zeros([8])
for x in [0, -1]:
for y in [0, -1]:
for z in [0, -1]:
# Source to grid corner vector
vec = self.grid_arr[x, y, z, :] - self.source
# Calculate angle between vec and the source-to-detector
# axis, which is the central axis of the particle beam
theta[ind] = np.arccos(
np.dot(vec, self.src_det)
/ np.linalg.norm(vec)
/ np.linalg.norm(self.src_det)
)
ind += 1
return np.max(theta)
def _log(self, msg):
if self.verbose:
print(msg)
# Define some constants so they don't get constantly re-evaluated
_c = const.c.si.value
# *************************************************************************
# Create mesh
# *************************************************************************
def add_wire_mesh(
self, location, extent, nwires, wire_diameter, mesh_hdir=None, mesh_vdir=None
):
"""
Add a wire mesh grid between the particle source and the object grid
that blocks particles whose paths intersect the wires.
Parameters
----------
location : `~astropy.units.Quantity`, shape (3)
A vector pointing from the origin of the grid to the center of the
mesh grid. This location must be between the source and the
object grid.
This vector will be interpreted as
being in either cartesian, cylindrical, or spherical coordinates
based on its units. Valid geometries are:
* Cartesian (x,y,z) : (meters, meters, meters)
* cylindrical (r, theta, z) : (meters, radians, meters)
* spherical (r, theta, phi) : (meters, radians, radians)
In spherical coordinates theta is the polar angle.
extent : Tuple of 1 or 2 `~astropy.units.Quantity`
The size of the mesh grid (in the mesh plane). If one value
is provided, the mesh is circular and the value provided is
interpreted as the diameter. If two values are provided, the
mesh is rectangular and they the values are interpreted as the
width and height respectively.
nwires : Tuple of 1 or 2 ints, or a single int
The number of wires in the horizontal and vertical directions. If
only one value is provided, the number in the two directions is
assumed to be equal. Note that a wire will cross the center of the
mesh only when nwires is odd.
wire_diameter : `~astropy.units.Quantity`
The diameter of the wires.
mesh_hdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the horizontal
direction on the mesh plane. Modifying this vector can rotate the
mesh in the plane or tilt the mesh plane relative to the
source-detector axis. By default, `mesh_hdir` is set equal to
`detector_hdir` (see `detector_hdir` keyword in `__init__`).
mesh_vdir : `numpy.ndarray`, shape (3), optional
A unit vector (in Cartesian coordinates) defining the vertical
direction on the mesh plane. Modifying this vector can tilt the
mesh relative to the source-detector axis. By default, `mesh_vdir`
is defined to be perpendicular to `mesh_hdir` and the detector
plane normal (such that the mesh is parallel to the detector plane).
Raises
------
ValueError
Raises a ValueError if the provided mesh location is not
between the source and the object grid.
"""
location = _coerce_to_cartesian_si(location)
wire_radius = wire_diameter.si.value / 2
if not isinstance(extent, tuple):
extent = (extent,)
if len(extent) == 1:
radius = 0.5 * extent[0].si.value
width = extent[0].si.value
height = extent[0].si.value
elif len(extent) == 2:
radius = None
width = extent[0].si.value
height = extent[1].si.value
else:
raise ValueError(
"extent must be a tuple of 1 or 2 elements, but "
f"{len(extent)} elements were provided."
)
if not isinstance(nwires, tuple):
nwires = (nwires,)
if len(nwires) != 2:
nwires = (nwires[0], nwires[0])
# If no hdir/vdir is specified, calculate a default value
# If one is specified, make sure it is normalized
if mesh_hdir is None:
# Re-calculate the default here, in case the user
# specified a different det_hdir
mesh_hdir = self._default_detector_hdir()
else:
mesh_hdir = mesh_hdir / np.linalg.norm(mesh_hdir)
if mesh_vdir is None:
mesh_vdir = np.cross(mesh_hdir, self.det_n)
mesh_vdir = -mesh_vdir / np.linalg.norm(mesh_vdir)
else:
mesh_vdir = mesh_vdir / np.linalg.norm(mesh_vdir)
# Raise exception if mesh is AFTER the field grid
if np.linalg.norm(location - self.source) > np.linalg.norm(self.source):
raise ValueError(
f"The specified mesh location, {location},"
"is not between the source and the origin."
)
mesh_entry = {
"location": location,
"wire_radius": wire_radius,
"radius": radius,
"width": width,
"height": height,
"nwires": nwires,
"mesh_hdir": mesh_hdir,
"mesh_vdir": mesh_vdir,
}
self.mesh_list.append(mesh_entry)
def _apply_wire_mesh(
self,
location=None,
wire_radius=None,
radius=None,
width=None,
height=None,
nwires=None,
mesh_hdir=None,
mesh_vdir=None,
):
"""
Apply wire meshes that were added to self.mesh_list
"""
x = self._coast_to_plane(location, mesh_hdir, mesh_vdir)
# Particle positions in 2D on the mesh plane
xloc = np.dot(x - location, mesh_hdir)
yloc = np.dot(x - location, mesh_vdir)
# Create an array in which True indicates that a particle has hit a wire
# and False indicates that it has not
hit = np.zeros(self.nparticles, dtype=bool)
# Mark particles that overlap vertical or horizontal position with a wire
h_centers = np.linspace(-width / 2, width / 2, num=nwires[0])
for c in h_centers:
hit |= np.isclose(xloc, c, atol=wire_radius)
v_centers = np.linspace(-height / 2, height / 2, num=nwires[1])
for c in v_centers:
hit |= np.isclose(yloc, c, atol=wire_radius)
# Put back any particles that are outside the mesh boundaries
# First handle the case where the mesh is rectangular
if radius is None:
# Replace particles outside the x-boundary
hit[
np.logical_or(
xloc > np.max(h_centers) + wire_radius,
xloc < np.min(h_centers) - wire_radius,
)
] = False
# Replace particles outside the y-boundary
hit[
np.logical_or(
yloc > np.max(v_centers) + wire_radius,
yloc < np.min(v_centers) - wire_radius,
)
] = False
# Handle the case where the mesh is circular
else:
loc_rad = np.sqrt(xloc ** 2 + yloc ** 2)
hit[loc_rad > radius] = False
# In the case of a circular mesh, also create a round wire along the
# outside edge
hit[np.isclose(loc_rad, radius, atol=wire_radius)] = True
# Identify the particles that have hit something, then remove them from
# all of the arrays
keep_these_particles = ~hit
number_kept_particles = keep_these_particles.sum()
nremoved = self.nparticles - number_kept_particles
if self.nparticles - nremoved <= 0:
raise ValueError(
"The specified mesh is blocking all of the particles. "
f"The wire diameter ({2*wire_radius}) may be too large."
)
self.x = self.x[keep_these_particles, :]
self.v = self.v[keep_these_particles, :]
self.theta = self.theta[
keep_these_particles
] # Important to apply here to get correct grid_ind
self.nparticles = number_kept_particles
# *************************************************************************
# Particle creation methods
# *************************************************************************
def _angles_monte_carlo(self):
"""
Generates angles for each particle randomly such that the flux
per solid angle is uniform.
"""
# Create a probability vector for the theta distribution
# Theta must follow a sine distribution in order for the particle
# flux per solid angle to be uniform.
arg = np.linspace(0, self.max_theta, num=int(1e5))
prob = np.sin(arg)
prob *= 1 / np.sum(prob)
# Randomly choose theta's weighted with the sine probabilities
theta = np.random.choice(arg, size=self.nparticles, replace=True, p=prob)
# Also generate a uniform phi distribution
phi = np.random.uniform(high=2 * np.pi, size=self.nparticles)
return theta, phi
def _angles_uniform(self):
"""
Generates angles for each particle such that their velocities are
uniformly distributed on a grid in theta and phi. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
"""
# Calculate the approximate square root
n_per = np.floor(np.sqrt(self.nparticles)).astype(np.int32)
# Set new nparticles to be a perfect square
self.nparticles = n_per ** 2
# Create an imaginary grid positioned 1 unit from the source
# and spanning max_theta at the corners
extent = np.sin(self.max_theta) / np.sqrt(2)
arr = np.linspace(-extent, extent, num=n_per)
harr, varr = np.meshgrid(arr, arr, indexing="ij")
# calculate the angles from the source for each point in
# the grid.
theta = np.arctan(np.sqrt(harr ** 2 + varr ** 2))
phi = np.arctan2(varr, harr)
return theta.flatten(), phi.flatten()
@particles.particle_input
def create_particles(
self,
nparticles,
particle_energy,
max_theta=None,
particle: Particle = Particle("p+"),
distribution="monte-carlo",
):
r"""
Generates the angular distributions about the Z-axis, then
rotates those distributions to align with the source-to-detector axis.
By default, particles are generated over almost the entire pi/2. However,
if the detector is far from the source, many of these particles will
never be observed. The max_theta keyword allows these extraneous
particles to be neglected to focus computational resources on the
particles who will actually hit the detector.
nparticles : integer
The number of particles to include in the simulation. The default
is 1e5.
particle_energy : `~astropy.units.Quantity`
The energy of the particle, in units convertible to eV.
All particles are given the same energy.
max_theta : `~astropy.units.Quantity`, optional
The largest velocity vector angle (measured from the
source-to-detector axis) for which particles should be generated.
Decreasing this angle can eliminate particles that would never
reach the detector region of interest. If no value is given, a
guess will be made based on the size of the grid.
Units must be convertible to radians.
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unperturbed,they will form a uniform pattern
on the detection plane. This method
requires that `nparticles` be a perfect square. If it is not,
`nparticles` will be set as the largest perfect square smaller
than the provided `nparticles`.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self._log("Creating Particles")
# Load inputs
self.nparticles = int(nparticles)
self.particle_energy = particle_energy.to(u.eV).value
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
# If max_theta is not specified, make a guess based on the grid size
if max_theta is None:
self.max_theta = np.clip(
1.5 * self.max_theta_hit_grid, 0.01, 0.99 * np.pi / 2
)
else:
self.max_theta = max_theta.to(u.rad).value
# Calculate the velocity corresponding to the particle energy
ER = self.particle_energy * 1.6e-19 / (self.m * self._c ** 2)
v0 = self._c * np.sqrt(1 - 1 / (ER + 1) ** 2)
if distribution == "monte-carlo":
theta, phi = self._angles_monte_carlo()
elif distribution == "uniform":
theta, phi = self._angles_uniform()
# Temporarily save theta to later determine which particles
# should be tracked
self.theta = theta
# Construct the velocity distribution around the z-axis
self.v = np.zeros([self.nparticles, 3])
self.v[:, 0] = v0 * np.sin(theta) * np.cos(phi)
self.v[:, 1] = v0 * np.sin(theta) * np.sin(phi)
self.v[:, 2] = v0 * np.cos(theta)
# Calculate the rotation matrix that rotates the z-axis
# onto the source-detector axis
a = np.array([0, 0, 1])
b = self.detector - self.source
rot = rot_a_to_b(a, b)
# Apply rotation matrix to calculated velocity distribution
self.v = np.matmul(self.v, rot)
# Place particles at the source
self.x = np.tile(self.source, (self.nparticles, 1))
@particles.particle_input
def load_particles(
self, x, v, particle: Particle = Particle("p+"),
):
r"""
Load arrays of particle positions and velocities
x : `~astropy.units.Quantity`, shape (N,3)
Positions for N particles
v: `~astropy.units.Quantity`, shape (N,3)
Velocities for N particles
particle : ~plasmapy.particles.Particle or string representation of same, optional
Representation of the particle species as either a `Particle` object
or a string representation. The default particle is protons.
distribution: str
A keyword which determines how particles will be distributed
in velocity space. Options are:
- 'monte-carlo': velocities will be chosen randomly,
such that the flux per solid angle is uniform.
- 'uniform': velocities will be distrbuted such that,
left unpreturbed,they will form a uniform pattern
on the detection plane.
Simulations run in the `uniform` mode will imprint a grid pattern
on the image, but will well-sample the field grid with a
smaller number of particles. The default is `monte-carlo`
"""
self.q = particle.charge.to(u.C).value
self.m = particle.mass.to(u.kg).value
if x.shape[0] != v.shape[0]:
raise ValueError(
"Provided x and v arrays have inconsistent numbers "
" of particles "
f"({x.shape[0]} and {v.shape[0]} respectively)."
)
else:
self.nparticles = x.shape[0]
self.x = x.to(u.m).value
self.v = v.to(u.m / u.s).value
self.theta = np.arccos(
np.inner(self.v, self.src_n) / np.linalg.norm(self.v, axis=-1)
)
n_wrong_way = np.sum(np.where(self.theta > np.pi / 2, 1, 0))
if n_wrong_way > 1:
warnings.warn(
f"{100*n_wrong_way/self.nparticles:.2f}% of particles "
"initialized are heading away from the grid. Check the orientation "
" of the provided velocity vectors.",
RuntimeWarning,
)
# *************************************************************************
# Run/push loop methods
# *************************************************************************
def _adaptive_dt(self, Ex, Ey, Ez, Bx, By, Bz):
r"""
Calculate the appropriate dt based on a number of considerations
including the local grid resolution (ds) and the gyroperiod of the
particles in the current fields.
"""
# If dt was explicitly set, skip the rest of this function
if self.dt.size == 1:
return self.dt
# Compute the timestep indicated by the grid resolution
ds = self.grid.grid_resolution.to(u.m).value
gridstep = 0.5 * (np.min(ds) / self.vmax)
# If not, compute a number of possible timesteps
# Compute the cyclotron gyroperiod
Bmag = np.max(np.sqrt(Bx ** 2 + By ** 2 + Bz ** 2)).to(u.T).value
# Compute the gyroperiod
if Bmag == 0:
gyroperiod = np.inf
else:
gyroperiod = 2 * np.pi * self.m / (self.q * np.max(Bmag))
# TODO: introduce a minimum timestep based on electric fields too!
# Create an array of all the possible time steps we computed
candidates = np.array([gyroperiod / 12, gridstep])
# Enforce limits on dt
candidates = np.clip(candidates, self.dt[0], self.dt[1])
# dt is the min of the remaining candidates
return np.min(candidates)
def _coast_to_grid(self):
r"""
Coasts all particles to the timestep when the first particle should
be entering the grid. Doing in this in one step (rather than pushing
the particles through zero fields) saves computation time.
"""
# Distance from the source to the nearest gridpoint
dist = np.min(np.linalg.norm(self.grid_arr - self.source, axis=3))
# Find the particle with the highest speed towards the grid
vmax = np.max(np.dot(self.v, self.src_n))
# Time for fastest possible particle to reach the grid.
t = dist / vmax
# Coast the particles to the advanced position
self.x = self.x + self.v * t
def _coast_to_plane(self, center, hdir, vdir, x=None):
"""
Calculates the positions where the current trajectories of each
particle impact a plane, described by the plane's center and
horizontal and vertical unit vectors.
Returns an [nparticles, 3] array of the particle positions in the plane
By default this function does not alter self.x. The optional keyword
x can be used to pass in an output array that will used to hold
the positions in the plane. This can be used to directly update self.x
as follows:
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x = self.x)
"""
normal = np.cross(hdir, vdir)
# Calculate the time required to evolve each particle into the
# plane
t = np.inner(center[np.newaxis, :] - self.x, normal) / np.inner(self.v, normal)
# Calculate particle positions in the plane
if x is None:
# If no output array is provided, preallocate
x = np.empty_like(self.x)
x[...] = self.x + self.v * t[:, np.newaxis]
# Check that all points are now in the plane
# (Eq. of a plane is nhat*x + d = 0)
plane_eq = np.dot(x - center, normal)
assert np.allclose(plane_eq, 0, atol=1e-6)
return x
def _remove_deflected_particles(self):
r"""
Removes any particles that have been deflected away from the detector
plane (eg. those that will never hit the grid)
"""
dist_remaining = np.dot(self.x, self.det_n) + np.linalg.norm(self.detector)
v_towards_det = np.dot(self.v, -self.det_n)
# If particles have not yet reached the detector plane and are moving
# away from it, they will never reach the detector.
# So, we can remove them from the arrays
# Find the indices of all particles that we should keep:
# i.e. those still moving towards the detector.
ind = np.logical_not((v_towards_det < 0) & (dist_remaining > 0)).nonzero()[0]
# Drop the other particles
self.x = self.x[ind, :]
self.v = self.v[ind, :]
self.v_init = self.v_init[ind, :]
self.nparticles_grid = self.x.shape[0]
# Store the number of particles deflected
self.fract_deflected = (self.nparticles - ind.size) / self.nparticles
# Warn the user if a large number of particles are being deflected
if self.fract_deflected > 0.05:
warnings.warn(
f"{100*self.fract_deflected:.1f}% particles have been "
"deflected away from the detector plane. The fields "
"provided may be too high to successfully radiograph "
"with this particle energy.",
RuntimeWarning,
)
def _push(self):
r"""
Advance particles using an implementation of the time-centered
Boris algorithm
"""
# Get a list of positions (input for interpolator)
pos = self.x[self.grid_ind, :] * u.m
# Update the list of particles on and off the grid
self.on_grid = self.grid.on_grid(pos)
# entered_grid is zero at the end if a particle has never
# entered the grid
self.entered_grid += self.on_grid
# Estimate the E and B fields for each particle
# Note that this interpolation step is BY FAR the slowest part of the push
# loop. Any speed improvements will have to come from here.
if self.field_weighting == "volume averaged":
Ex, Ey, Ez, Bx, By, Bz = self.grid.volume_averaged_interpolator(
pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True,
)
elif self.field_weighting == "nearest neighbor":
Ex, Ey, Ez, Bx, By, Bz = self.grid.nearest_neighbor_interpolator(
pos, "E_x", "E_y", "E_z", "B_x", "B_y", "B_z", persistent=True,
)
# Create arrays of E and B as required by push algorithm
E = np.array(
[Ex.to(u.V / u.m).value, Ey.to(u.V / u.m).value, Ez.to(u.V / u.m).value]
)
E = np.moveaxis(E, 0, -1)
B = np.array([Bx.to(u.T).value, By.to(u.T).value, Bz.to(u.T).value])
B = np.moveaxis(B, 0, -1)
# Calculate the adaptive timestep from the fields currently experienced
# by the particles
# If user sets dt explicitly, that's handled in _adpative_dt
dt = self._adaptive_dt(Ex, Ey, Ez, Bx, By, Bz)
# TODO: Test v/c and implement relativistic Boris push when required
# vc = np.max(v)/_c
x = self.x[self.grid_ind, :]
v = self.v[self.grid_ind, :]
boris_push(x, v, B, E, self.q, self.m, dt)
self.x[self.grid_ind, :] = x
self.v[self.grid_ind, :] = v
def _stop_condition(self):
r"""
The stop condition is that most of the particles have entered the grid
and almost all have now left it.
"""
# Count the number of particles who have entered, which is the
# number of non-zero entries in entered_grid
self.num_entered = np.nonzero(self.entered_grid)[0].size
# How many of the particles have entered the grid
self.fract_entered = np.sum(self.num_entered) / self.nparticles_grid
# Of the particles that have entered the grid, how many are currently
# on the grid?
# if/else avoids dividing by zero
if np.sum(self.num_entered) > 0:
still_on = np.sum(self.on_grid) / np.sum(self.num_entered)
else:
still_on = 0.0
if self.fract_entered > 0.1 and still_on < 0.001:
# Warn user if < 10% of the particles ended up on the grid
if self.num_entered < 0.1 * self.nparticles:
warnings.warn(
f"Only {100*self.num_entered/self.nparticles:.2f}% of "
"particles entered the field grid: consider "
"decreasing the max_theta to increase this "
"number.",
RuntimeWarning,
)
return True
else:
return False
def run(
self, dt=None, field_weighting="volume averaged",
):
r"""
Runs a particle-tracing simulation.
Timesteps are adaptively calculated based on the
local grid resolution of the particles and the electric and magnetic
fields they are experiencing. After all particles
have left the grid, they are advanced to the
detector plane where they can be used to construct a synthetic
diagnostic image.
Parameters
----------
dt : `~astropy.units.Quantity`, optional
An explicitly set timestep in units convertable to seconds.
Setting this optional keyword overrules the adaptive time step
capability and forces the use of this timestep throughout. If a tuple
of timesteps is provided, the adaptive timstep will be clamped
between the first and second values.
field_weighting : str
String that selects the field weighting algorithm used to determine
what fields are felt by the particles. Options are:
* 'nearest neighbor': Particles are assigned the fields on
the grid vertex closest to them.
* 'volume averaged' : The fields experienced by a particle are a
volume-average of the eight grid points surrounding them.
The default is 'volume averaged'.
Returns
-------
None.
"""
# Load and validate inputs
field_weightings = ["volume averaged", "nearest neighbor"]
if field_weighting in field_weightings:
self.field_weighting = field_weighting
else:
raise ValueError(
f"{field_weighting} is not a valid option for ",
"field_weighting. Valid choices are",
f"{field_weightings}",
)
if dt is None:
# Set dt as an infinite range by default (auto dt with no restrictions)
self.dt = np.array([0.0, np.inf]) * u.s
else:
self.dt = dt
self.dt = (self.dt).to(u.s).value
# Check to make sure particles have already been generated
if not hasattr(self, "x"):
raise ValueError(
"Either the create_particles or load_particles method must be "
"called before running the particle tracing algorithm."
)
# If meshes have been added, apply them now
for mesh in self.mesh_list:
self._apply_wire_mesh(**mesh)
# Store a copy of the initial velocity distribution in memory
# This will be used later to calculate the maximum deflection
self.v_init = np.copy(self.v)
# Calculate the maximum velocity
# Used for determining the grid crossing maximum timestep
self.vmax = np.max(np.linalg.norm(self.v, axis=-1))
# Determine which particles should be tracked
# This array holds the indices of all particles that WILL hit the grid
# Only these particles will actually be pushed through the fields
self.grid_ind = np.where(self.theta < self.max_theta_hit_grid)[0]
self.nparticles_grid = len(self.grid_ind)
self.fract_tracked = self.nparticles_grid / self.nparticles
# Create flags for tracking when particles during the simulation
# on_grid -> zero if the particle is off grid, 1
self.on_grid = np.zeros([self.nparticles_grid])
# Entered grid -> non-zero if particle EVER entered the grid
self.entered_grid = np.zeros([self.nparticles_grid])
# Generate a null distribution of points (the result in the absence of
# any fields) for statistical comparison
self.x0 = self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir)
# Advance the particles to the near the start of the grid
self._coast_to_grid()
# Initialize a "progress bar" (really more of a meter)
# Setting sys.stdout lets this play nicely with regular print()
pbar = tqdm(
initial=0,
total=self.nparticles_grid + 1,
disable=not self.verbose,
desc="Particles on grid",
unit="particles",
bar_format="{l_bar}{bar}{n:.1e}/{total:.1e} {unit}",
file=sys.stdout,
)
# Push the particles until the stop condition is satisfied
# (no more particles on the simulation grid)
while not self._stop_condition():
n_on_grid = np.sum(self.on_grid)
pbar.n = n_on_grid
pbar.last_print_n = n_on_grid
pbar.update()
self._push()
pbar.close()
# Remove particles that will never reach the detector
self._remove_deflected_particles()
# Advance the particles to the image plane
self._coast_to_plane(self.detector, self.det_hdir, self.det_vdir, x=self.x)
# Log a summary of the run
self._log("Run completed")
self._log("Fraction of particles tracked: " f"{self.fract_tracked*100:.1f}%")
self._log(
"Fraction of tracked particles that entered the grid: "
f"{self.fract_entered*100:.1f}%"
)
self._log(
"Fraction of tracked particles deflected away from the "
"detector plane: "
f"{self.fract_deflected*100}%"
)
@property
def max_deflection(self):
"""
The maximum deflection experienced by one of the particles, determined
by comparing their initial and final velocitiy vectors.
This value can be used to determine the charged particle radiography regime
using the dimensionless number defined by Kugland et al. 2012
Returns
-------
max_deflection : float
The maximum deflection in radians
"""
# Normalize the initial and final velocities
v_norm = self.v / np.linalg.norm(self.v, axis=1, keepdims=True)
v_init_norm = self.v_init / np.linalg.norm(self.v_init, axis=1, keepdims=True)
# Compute the dot product
proj = np.sum(v_norm * v_init_norm, axis=1)
# In case of numerical errors, make sure the output is within the domain of
# arccos
proj = np.where(proj > 1, 1, proj)
max_deflection = np.max(np.arccos(proj))
return max_deflection * u.rad
# *************************************************************************
# Synthetic diagnostic methods (creating output)
# *************************************************************************
def synthetic_radiograph(
self, size=None, bins=[200, 200], ignore_grid=False, optical_density=False
):
r"""
Calculate a "synthetic radiograph" (particle count histogram in the
image plane).
Parameters
----------
size : `~astropy.units.Quantity`, shape (2,2)
The size of the detector array, specified as the minimum
and maximum values included in both the horizontal and vertical
directions in the detector plane coordinates. Shape is
[[hmin,hmax], [vmin, vmax]]. Units must be convertable to meters.
bins : array of integers, shape (2)
The number of bins in each direction in the format [hbins, vbins].
The default is [200,200].
ignore_grid: bool
If True, returns the intensity in the image plane in the absence
of simulated fields.
optical_density: bool
If True, return the optical density rather than the intensity
.. math::
OD = -log_{10}(Intensity/I_0)
where I_O is the intensity on the detector plane in the absence of
simulated fields. Default is False.
Returns
-------
hax : `~astropy.units.Quantity` array shape (hbins,)
The horizontal axis of the synthetic radiograph in meters.
vax : `~astropy.units.Quantity` array shape (vbins, )
The vertical axis of the synthetic radiograph in meters.
intensity : ndarray, shape (hbins, vbins)
The number of particles counted in each bin of the histogram.
"""
# Note that, at the end of the simulation, all particles were moved
# into the image plane.
# If ignore_grid is True, use the predicted positions in the absence of
# simulated fields
if ignore_grid:
x = self.x0
else:
x = self.x
# Determine locations of points in the detector plane using unit
# vectors
xloc = np.dot(x - self.detector, self.det_hdir)
yloc = np.dot(x - self.detector, self.det_vdir)
if size is None:
# If a detector size is not given, choose lengths based on the
# dimensions of the grid
w = self.mag * np.max(
[
np.max(np.abs(self.grid.pts0.to(u.m).value)),
np.max(np.abs(self.grid.pts1.to(u.m).value)),
np.max(np.abs(self.grid.pts2.to(u.m).value)),
]
)
# The factor of 5 here is somewhat arbitrary: we just want a
# region a few times bigger than the image of the grid on the
# detector, since particles could be deflected out
size = 5 * np.array([[-w, w], [-w, w]]) * u.m
# Generate the histogram
intensity, h, v = np.histogram2d(
xloc, yloc, range=size.to(u.m).value, bins=bins
)
# h, v are the bin edges: compute the centers to produce arrays
# of the right length (then trim off the extra point)
h = ((h + np.roll(h, -1)) / 2)[0:-1]
v = ((v + np.roll(v, -1)) / 2)[0:-1]
# Throw a warning if < 50% of the particles are included on the
# histogram
percentage = np.sum(intensity) / self.nparticles
if percentage < 0.5:
warnings.warn(
f"Only {percentage:.2%} of the particles are shown "
"on this synthetic radiograph. Consider increasing "
"the size to include more.",
RuntimeWarning,
)
if optical_density:
# Generate the null radiograph
x, y, I0 = self.synthetic_radiograph(size=size, bins=bins, ignore_grid=True)
# Calculate I0 as the mean of the non-zero values in the null
# histogram. Zeros are just outside of the illuminate area.
I0 = np.mean(I0[I0 != 0])
# Overwrite any zeros in intensity to avoid log10(0)
intensity[intensity == 0] = 1
# Calculate the optical_density
intensity = -np.log10(intensity / I0)
return h * u.m, v * u.m, intensity
|
StarcoderdataPython
|
3367259
|
'''
middleware/getOS.py
- Gets running Operating System of user
'''
import platform
def handle():
OS = platform.system()
osList = []
tmpList = ['Windows', 'Darwin', 'Linux']
osList.append(OS)
for item in tmpList:
if item not in osList:
osList.append(item)
return osList
|
StarcoderdataPython
|
1741368
|
import functools
import sys
import traceback
from gi.repository import Gtk
from . import window_creator
def show_error(exctype, value, tb, gtk_main=False):
"""
Shows window showing detailed information about critical error.
:param exctype: converted to string and displayed (may contain additional text)
:param value: converted to string and displayed
:param tb: converted to string and displayed
:param gtk_main: True if Gtk.main() should be called.
It can be used if program crashes before main loop is started elsewhere.
"""
def on_button_clicked(*_):
window.close()
window, objects = window_creator.create_window_error()
cancel_button = objects["cancel_button"]
cancel_button.connect("clicked", on_button_clicked)
exctype_textv = objects["textview1"]
value_textv = objects["textview2"]
traceback_textv = objects["textview3"]
exctype_textv.get_buffer().set_text("Exception type: " + str(exctype))
value_textv.get_buffer().set_text(str(value))
traceback_textv.get_buffer().set_text("".join(traceback.format_exception(exctype, value, tb)))
if gtk_main:
Gtk.main()
def catch_exception(function):
"""
Can be used to catch any exception and display error message. This shouldn't be used.
:param function: function to be decorated
:return: decorated function
"""
@functools.wraps(function)
def inner(*args, **kwargs):
try:
return function(*args, **kwargs)
except:
show_error(*sys.exc_info())
return inner
def catch_global_exception(exctype, value, tb):
"""
Shows window showing detailed information about critical error. Also puts standard error message into console.
It is expected that main Gtk loop is started.
This can be used as global exception hook.
See `show_error` function for more details.
:param exctype: exception type
:param value: value
:param tb: traceback
"""
show_error(exctype, value, tb, gtk_main=False)
sys.__excepthook__(exctype, value, tb)
def catch_global_exception_with_gtk_main(exctype, value, tb):
"""
Shows window showing detailed information about critical error. Also puts standard error message into console.
It is expected that main Gtk loop is not started.
This can be used as global exception hook.
See `show_error` function for more details.
:param exctype: exception type
:param value: value
:param tb: traceback
"""
show_error(exctype, value, tb, gtk_main=True)
sys.__excepthook__(exctype, value, tb)
|
StarcoderdataPython
|
168946
|
<reponame>pfe-everis/lcd
import torch
import torch.nn as nn
import torch.nn.functional as F
class PatchNetEncoder(nn.Module):
def __init__(self, embedding_size):
super(PatchNetEncoder, self).__init__()
self.embedding_size = embedding_size
self.conv1 = nn.Conv2d(3, 32, 4, 2, 1)
self.conv2 = nn.Conv2d(32, 64, 4, 2, 1)
self.conv3 = nn.Conv2d(64, 128, 4, 2, 1)
self.conv4 = nn.Conv2d(128, 256, 4, 2, 1)
self.conv5 = nn.Conv2d(256, embedding_size, 4, 4)
self.bn1 = nn.BatchNorm2d(32)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(256)
def forward(self, x):
batch_size = x.shape[0]
x = x.permute(0, 3, 1, 2)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = self.conv5(x)
x = x.view(batch_size, -1)
return x
class PatchNetDecoder(nn.Module):
def __init__(self, embedding_size):
super(PatchNetDecoder, self).__init__()
self.embedding_size = embedding_size
self.deconv1 = nn.ConvTranspose2d(embedding_size, 256, 4, 4)
self.deconv2 = nn.ConvTranspose2d(256, 128, 4, 2, 1)
self.deconv3 = nn.ConvTranspose2d(128, 64, 4, 2, 1)
self.deconv4 = nn.ConvTranspose2d(64, 32, 4, 2, 1)
self.deconv5 = nn.ConvTranspose2d(32, 3, 4, 2, 1)
self.bn1 = nn.BatchNorm2d(256)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(64)
self.bn4 = nn.BatchNorm2d(32)
def forward(self, x):
batch_size = x.shape[0]
x = x.view(batch_size, self.embedding_size, 1, 1)
x = F.relu(self.bn1(self.deconv1(x)))
x = F.relu(self.bn2(self.deconv2(x)))
x = F.relu(self.bn3(self.deconv3(x)))
x = F.relu(self.bn4(self.deconv4(x)))
x = torch.sigmoid(self.deconv5(x))
x = x.permute(0, 2, 3, 1)
return x
class PatchNetAutoencoder(nn.Module):
def __init__(self, embedding_size, normalize=True):
super(PatchNetAutoencoder, self).__init__()
self.normalize = normalize
self.embedding_size = embedding_size
self.encoder = PatchNetEncoder(embedding_size)
self.decoder = PatchNetDecoder(embedding_size)
def forward(self, x):
z = self.encode(x)
y = self.decode(z)
return y, z
def encode(self, x):
z = self.encoder(x)
if self.normalize:
z = F.normalize(z)
return z
def decode(self, z):
y = self.decoder(z)
return y
|
StarcoderdataPython
|
4837886
|
<filename>gui/blockify/blockifydbus.py
"""spotifydbus
Usage:
spotifydbus (toggle | next | prev | stop | play) [-v...] [options]
spotifydbus get [title | artist | length | status | all] [-v...] [options]
spotifydbus (openuri <uri> | seek <secs> | setpos <pos>) [-v...] [options]
Options:
-l, --log=<path> Enables logging to the logfile/-path specified.
-q, --quiet Don't print anything to stdout.
-v Verbosity of the logging module, up to -vvv.
-h, --help Show this help text.
--version Show current version of spotifydbus.
"""
import logging
import os
import re
import sys
import dbus
try:
from docopt import docopt
except ImportError:
print "ImportError: Please install docopt to use the DBus CLI."
log = logging.getLogger("dbus")
class BlockifyDBus(object):
"Wrapper for Spotify's DBus interface."
def __init__(self, bus=None):
self.obj_path = "/org/mpris/MediaPlayer2"
self.prop_path = "org.freedesktop.DBus.Properties"
self.player_path = "org.mpris.MediaPlayer2.Player"
self.spotify_path = None
if not bus:
bus = dbus.SessionBus()
self.session_bus = bus
for name in bus.list_names():
if re.match(r".*mpris.*spotify", name):
self.spotify_path = str(name)
if self.is_running():
self.proxy = self.session_bus.get_object(self.spotify_path,
self.obj_path)
self.properties = dbus.Interface(self.proxy, self.prop_path)
self.player = dbus.Interface(self.proxy, self.player_path)
else:
self.properties = None
self.player = None
self.proxy = None
log.error("Spotify not found in DBus session. Is it running?")
def is_running(self):
"TODO: Make this not redundant"
if self.spotify_path is None:
return False
return True
def get_property(self, key):
"Gets the value from any available property."
if self.is_running():
return self.properties.Get(self.player_path, key)
def set_property(self, key, value):
"Sets the value for any available property."
if self.is_running():
return self.properties.Set(self.player_path, key, value)
def playpause(self):
"Toggles the current song between Play and Pause."
if self.is_running():
can_pause = self.get_property("CanPause")
can_play = self.get_property("CanPlay")
if can_pause and can_play:
self.player.PlayPause()
else:
log.warn("Cannot Play/Pause")
def play(self):
"DEFUNCT: Tries to play the current title."
if self.is_running():
can_play = self.get_property("CanPlay")
if can_play:
self.player.Play()
else:
log.warn("Cannot Play")
def stop(self):
"Tries to stop playback. PlayPause is probably preferable."
if self.is_running():
self.player.Stop()
def next(self):
"Tries to skip to next song."
if self.is_running():
can_next = self.get_property("CanGoNext")
if can_next:
self.player.Next()
else:
log.warn("Cannot Go Next")
def prev(self):
"Tries to go back to last song."
if self.is_running():
can_prev = self.get_property("CanGoPrevious")
if can_prev:
self.player.Previous()
else:
log.warn("Cannot Go Previous.")
def set_position(self, track, position):
if self.is_running():
self.player.SetPosition(track, position)
def open_uri(self, uri):
if self.is_running():
self.player.OpenUri(uri)
def seek(self, seconds):
"DEFUNCT: Calls seek method."
if self.is_running():
can_seek = self.get_property("CanSeek")
if can_seek:
self.player.Seek(seconds)
else:
log.warn("Cannot Seek.")
def get_song_status(self):
"Get current PlaybackStatus (Paused/Playing...)."
if self.is_running():
return self.get_property("PlaybackStatus")
def get_song_length(self):
"Gets the length of current song from metadata (in seconds)."
if self.is_running():
metadata = self.get_property("Metadata")
if metadata:
return int(metadata["mpris:length"] / 1000000)
def get_song_title(self):
"Gets title of current song from metadata"
if self.is_running():
metadata = self.get_property("Metadata")
if metadata:
return metadata["xesam:title"].encode("utf-8")
def get_song_album(self):
"Gets album of current song from metadata"
if self.is_running():
metadata = self.get_property("Metadata")
if metadata:
return metadata["xesam:album"].encode("utf-8")
def get_song_artist(self):
"Gets the artist of current song from metadata"
if self.is_running():
metadata = self.get_property("Metadata")
if metadata:
return str(metadata["xesam:artist"][0])
def print_info(self):
"Print all the DBus info we can get our hands on."
try:
interfaces = self.properties.GetAll(self.player_path)
metadata = self.get_property("Metadata")
i_keys = list(map(str, interfaces.keys()))
i_keys.remove("Metadata")
i_keys.sort()
for i in i_keys:
if len(i) < 7:
print i, "\t\t= ", self.get_property(i)
else:
print i, "\t= ", self.get_property(i)
print ""
d_keys = list(metadata.keys())
d_keys.sort()
for k in d_keys:
d = k.split(":")[1]
if d == "artist":
print d, "\t\t= ", metadata[k][0]
# elif d == "length":
elif len(d) < 7:
print d, "\t\t= ", metadata[k]
else:
print d, "\t= ", metadata[k]
except AttributeError as e:
log.error("Could not get properties: {}".format(e))
def init_logger(logpath=None, loglevel=1, quiet=False):
"Initializes the logger for system messages."
logger = logging.getLogger()
# Set the loglevel.
if loglevel > 3:
loglevel = 3 # Cap at 3 to avoid index errors.
levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
logger.setLevel(levels[loglevel])
logformat = "%(asctime)-14s %(levelname)-8s %(message)s"
formatter = logging.Formatter(logformat, "%Y-%m-%d %H:%M:%S")
if not quiet:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
log.debug("Added logging console handler.")
log.info("Loglevel is {}.".format(levels[loglevel]))
if logpath:
try:
logfile = os.path.abspath(logpath)
file_handler = logging.FileHandler(logfile)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log.debug("Added logging file handler: {}.".format(logfile))
except IOError:
log.error("Could not attach file handler.")
def main():
"Entry point for the CLI DBus interface."
args = docopt(__doc__, version="0.2")
init_logger(args["--log"], args["-v"], args["--quiet"])
spotify = BlockifyDBus()
if args["toggle"]:
spotify.playpause()
elif args["next"]:
spotify.next()
elif args["prev"]:
spotify.prev()
elif args["play"]:
spotify.play()
elif args["stop"]:
spotify.stop()
if args["openuri"]:
spotify.open_uri(args["<uri>"])
elif args["seek"]:
spotify.seek(args["<secs>"])
elif args["setpos"]:
spotify.set_pos(args["<pos>"])
if args["title"]:
print spotify.get_song_title()
elif args["artist"]:
print spotify.get_song_artist()
elif args["status"]:
print spotify.get_song_status()
elif args["all"]:
spotify.print_info()
elif args["get"]:
length = spotify.get_song_length()
m, s = divmod(length, 60)
if args["length"]:
print "{}m{}s ({})".format(m, s, length)
else:
rating = spotify.get_property("Metadata")["xesam:autoRating"]
artist = spotify.get_song_artist()
title = spotify.get_song_title()
album = spotify.get_song_album()
state = spotify.get_song_status()
print "{} - {} ({}), {}m{}s, {} ({})".format(artist, title, album,
m, s, rating, state)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1784679
|
<filename>predictionerror.py
import abc
import core
import torch.nn as nn
######################################################################################################
######################################################################################################
######################################################################################################
class DVAEpredictionError(metaclass=abc.ABCMeta):
def __init__(
self
):
"""
Compares input with output and returns the loss
This is an abstract class meant to be inherited
"""
pass
@abc.abstractmethod
def store_loss(
self,
input_x,
encoder_output,
loss_recorder: core.DVAEloss
):
pass
######################################################################################################
######################################################################################################
######################################################################################################
class DVAEpredictionErrorL2(DVAEpredictionError):
def __init__(self):
"""
Returns loss based on cross entropy. The input is expected to be a tensor.
This loss can be used to predict classes, with values [0,1].
"""
super().__init__()
def store_loss(
self,
input_x,
encoder_output,
loss_recorder: core.DVAEloss
):
"""
Compute the loss
"""
loss = ((encoder_output - input_x)**2).mean()
loss_recorder.add_reconstruction_loss(loss)
######################################################################################################
######################################################################################################
######################################################################################################
class DVAEpredictionErrorCE(DVAEpredictionError):
def __init__(self):
"""
Returns loss based on cross entropy. The input is expected to be a tensor.
This loss can be used to predict classes, with values [0,1].
"""
super().__init__()
def store_loss(
self,
input_x,
encoder_output,
loss_recorder: core.DVAEloss
):
"""
Compute the loss
"""
input_dim = 666 # todo somehow get from encoder_output shape?
loss = nn.CrossEntropyLoss(reduction='none')(encoder_output, input_x.reshape(-1, input_dim)).sum(-1).mean()
loss_recorder.add_reconstruction_loss(loss)
######################################################################################################
######################################################################################################
######################################################################################################
class DVAEpredictionErrorLogp(DVAEpredictionError):
def __init__(self):
"""
Returns loss based on -log(p), where p is based on a Distribution to be provided.
This loss is generally suitable whenever the distribution of values is explicitly modelled
"""
super().__init__()
def store_loss(
self,
input_x,
encoder_output,
loss_recorder: core.DVAEloss
):
"""
Compute the loss
"""
loss = -encoder_output.log_prob(input_x).sum(-1).mean()
loss_recorder.add_reconstruction_loss(loss)
|
StarcoderdataPython
|
107523
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
mods = ('clu.all',
'clu.abstract',
'clu.constants.consts',
'clu.constants.polyfills',
'clu.config.base',
'clu.config.settings',
'clu.config.ns',
'clu.csv',
'clu.fs.appdirectories',
'clu.fs.pypath',
'clu.keyvalue',
'clu.dispatch',
'clu.scripts.ansicolors',
'clu.scripts.boilerplate',
'clu.sanitizer')
class TestReplModules(object):
""" Run the tests for the “clu.repl.modules” module. """
def test_compare_module_lookups_for_all_things_no_args(self):
from clu.repl.modules import compare_module_lookups_for_all_things
from clu.repl.modules import Results, Mismatches
from clu.repl.modules import Result, Mismatch
results, mismatches = compare_module_lookups_for_all_things()
assert type(results) is Results
assert type(mismatches) is Mismatches
assert results.total > 500
assert results.total == sum(len(record.thingnames) for record in results.result_records)
assert len(mismatches.mismatch_records) == mismatches.total
assert len(mismatches.mismatch_records) < 50 # should be around 15 or 16
assert mismatches.failure_rate < 10.0 # last I checked this was ~2.841
assert mismatches.failure_rate == 100 * (float(mismatches.total) / float(results.total))
assert all(type(record) is Result for record in results.result_records)
assert all(type(record) is Mismatch for record in mismatches.mismatch_records)
def test_compare_module_lookups_for_all_things_variadic_args(self):
from clu.repl.modules import compare_module_lookups_for_all_things
from clu.repl.modules import Results, Mismatches
from clu.repl.modules import Result, Mismatch
modules = ('predicates', 'typology', 'mathematics', 'naming')
prefixd = tuple(f"clu.{nm}" for nm in modules)
results, mismatches = compare_module_lookups_for_all_things(*prefixd)
assert type(results) is Results
assert type(mismatches) is Mismatches
assert results.total > 100
assert results.total == sum(len(record.thingnames) for record in results.result_records)
assert len(mismatches.mismatch_records) == mismatches.total
assert len(mismatches.mismatch_records) < 50 # should be around 5 or 6
assert mismatches.failure_rate < 10.0 # last I checked this was ~2.841
assert mismatches.failure_rate == 100 * (float(mismatches.total) / float(results.total))
assert all(type(record) is Result for record in results.result_records)
assert all(type(record) is Mismatch for record in mismatches.mismatch_records)
@pytest.mark.parametrize('modulename', mods)
def test_modulemap(self, modulename):
from clu.naming import nameof, qualified_import
from clu.repl.modules import ModuleMap
from copy import copy, deepcopy
module = qualified_import(modulename)
try:
modmap = ModuleMap(module)
except ValueError as exc:
assert "one or more things" in str(exc)
else:
assert len(modmap) == len(module.__all__)
for thingname in dir(module):
assert modmap[thingname] == getattr(module, thingname)
for thingname in reversed(modmap):
assert modmap[thingname] == getattr(module, thingname)
# calculate “most”:
most = max(len(thingname) for thingname in dir(module))
assert most == modmap.most()
# set membership check for modmap.keys():
frozenthings = frozenset(module.__all__)
assert frozenthings.issuperset(modmap.keys())
assert frozenthings.issubset(modmap.keys())
# set membership check for modmap.items():
frozenpairs = frozenset(zip(modmap.keys(), modmap.values()))
assert frozenpairs.issuperset(modmap.items())
assert frozenpairs.issubset(modmap.items())
# repr checks:
assert repr(dict(modmap)) in repr(modmap)
assert repr(modmap).startswith(nameof(ModuleMap))
# copy/clone checks:
assert copy(modmap) == modmap
assert deepcopy(modmap) == modmap
assert modmap.clone() == modmap
def test_modulemap_error_conditions(self, consts):
from clu.repl.modules import ModuleMap
with pytest.raises(TypeError) as exc:
ModuleMap(None)
assert "valid module required" in str(exc.value)
with pytest.raises(TypeError) as exc:
ModuleMap(True)
assert "module instance required" in str(exc.value)
constmap = ModuleMap(consts)
# dunder attribute that exists in consts:
with pytest.raises(KeyError) as exc:
constmap['__dir__']
assert "__dir__" in str(exc.value)
# non-dunder attribute that does not exist in consts:
with pytest.raises(KeyError) as exc:
constmap['YO_DOGG']
assert "YO_DOGG" in str(exc.value)
|
StarcoderdataPython
|
3279762
|
# Copyright (c) 2019 <NAME>.
# Uranium is released under the terms of the LGPLv3 or higher.
import sys
import ctypes # type: ignore
from PyQt5.QtGui import QOpenGLVersionProfile, QOpenGLContext, QOpenGLFramebufferObject, QOpenGLBuffer
from PyQt5.QtWidgets import QMessageBox
from typing import Any, TYPE_CHECKING, cast
from UM.Logger import Logger
from UM.Version import Version
from UM.View.GL.FrameBufferObject import FrameBufferObject
from UM.View.GL.ShaderProgram import ShaderProgram
from UM.View.GL.ShaderProgram import InvalidShaderProgramError
from UM.View.GL.Texture import Texture
from UM.View.GL.OpenGLContext import OpenGLContext
from UM.i18n import i18nCatalog # To make dialogs translatable.
i18n_catalog = i18nCatalog("uranium")
if TYPE_CHECKING:
from UM.Mesh.MeshData import MeshData
## Convenience methods for dealing with OpenGL.
#
# This class simplifies dealing with OpenGL and different Python OpenGL bindings. It
# mostly describes an interface that should be implemented for dealing with basic OpenGL
# functionality using these different OpenGL bindings. Additionally, it provides singleton
# handling. The implementation-defined subclass must be set as singleton instance as soon
# as possible so that any calls to getInstance() return a proper object.
class OpenGL:
VertexBufferProperty = "__vertex_buffer"
IndexBufferProperty = "__index_buffer"
## Different OpenGL chipset vendors.
class Vendor:
NVidia = 1
AMD = 2
Intel = 3
Other = 4
def __init__(self) -> None:
if OpenGL.__instance is not None:
raise RuntimeError("Try to create singleton '%s' more than once" % self.__class__.__name__)
OpenGL.__instance = self
super().__init__()
profile = QOpenGLVersionProfile()
profile.setVersion(OpenGLContext.major_version, OpenGLContext.minor_version)
profile.setProfile(OpenGLContext.profile)
context = QOpenGLContext.currentContext()
if not context:
Logger.log("e", "Startup failed due to OpenGL context creation failing")
QMessageBox.critical(None, i18n_catalog.i18nc("@message", "Failed to Initialize OpenGL", "Could not initialize an OpenGL context. This program requires OpenGL 2.0 or higher. Please check your video card drivers."))
sys.exit(1)
self._gl = context.versionFunctions(profile) # type: Any #It's actually a protected class in PyQt that depends on the implementation of your graphics card.
if not self._gl:
Logger.log("e", "Startup failed due to OpenGL initialization failing")
QMessageBox.critical(None, i18n_catalog.i18nc("@message", "Failed to Initialize OpenGL", "Could not initialize OpenGL. This program requires OpenGL 2.0 or higher. Please check your video card drivers."))
sys.exit(1)
# It would be nice to be able to not necessarily need OpenGL FrameBuffer Object support, but
# due to a limitation in PyQt, currently glReadPixels or similar methods are not available.
# This means we can only get frame buffer contents through methods that indirectly call
# those methods, in this case primarily QOpenGLFrameBufferObject::toImage(), making us
# hard-depend on FrameBuffer Objects.
if not self.hasFrameBufferObjects():
Logger.log("e", "Startup failed, OpenGL does not support Frame Buffer Objects")
QMessageBox.critical(None, i18n_catalog.i18nc("Critical OpenGL Extensions Missing", "Critical OpenGL extensions are missing. This program requires support for Framebuffer Objects. Please check your video card drivers."))
sys.exit(1)
self._gl.initializeOpenGLFunctions()
self._gpu_vendor = OpenGL.Vendor.Other #type: int
vendor_string = self._gl.glGetString(self._gl.GL_VENDOR)
if vendor_string is None:
vendor_string = "Unknown"
vendor_string = vendor_string.lower()
if "nvidia" in vendor_string:
self._gpu_vendor = OpenGL.Vendor.NVidia
elif "amd" in vendor_string or "ati" in vendor_string:
self._gpu_vendor = OpenGL.Vendor.AMD
elif "intel" in vendor_string:
self._gpu_vendor = OpenGL.Vendor.Intel
self._gpu_type = "Unknown" # type: str
# WORKAROUND: Cura/#1117 Cura-packaging/12
# Some Intel GPU chipsets return a string, which is not undecodable via PyQt5.
# This workaround makes the code fall back to a "Unknown" renderer in these cases.
try:
self._gpu_type = self._gl.glGetString(self._gl.GL_RENDERER)
except UnicodeDecodeError:
Logger.log("e", "DecodeError while getting GL_RENDERER via glGetString!")
self._opengl_version = self._gl.glGetString(self._gl.GL_VERSION) #type: str
self._opengl_shading_language_version = Version("0.0") # type: Version
try:
self._opengl_shading_language_version = Version(self._gl.glGetString(self._gl.GL_SHADING_LANGUAGE_VERSION))
except:
self._opengl_shading_language_version = Version("1.0")
if not self.hasFrameBufferObjects():
Logger.log("w", "No frame buffer support, falling back to texture copies.")
Logger.log("d", "Initialized OpenGL subsystems.")
Logger.log("d", "OpenGL Version: %s", self._opengl_version)
Logger.log("d", "OpenGL Vendor: %s", self._gl.glGetString(self._gl.GL_VENDOR))
Logger.log("d", "OpenGL Renderer: %s", self._gpu_type)
Logger.log("d", "GLSL Version: %s", self._opengl_shading_language_version)
## Check if the current OpenGL implementation supports FrameBuffer Objects.
#
# \return True if FBOs are supported, False if not.
def hasFrameBufferObjects(self) -> bool:
return QOpenGLFramebufferObject.hasOpenGLFramebufferObjects()
## Get the current OpenGL version.
#
# \return Version of OpenGL
def getOpenGLVersion(self) -> str:
return self._opengl_version
## Get the current OpenGL shading language version.
#
# \return Shading language version of OpenGL
def getOpenGLShadingLanguageVersion(self) -> "Version":
return self._opengl_shading_language_version
## Get the current GPU vendor name.
#
# \return Name of the vendor of current GPU
def getGPUVendorName(self) -> str:
return self._gl.glGetString(self._gl.GL_VENDOR)
## Get the current GPU vendor.
#
# \return One of the items of OpenGL.Vendor.
def getGPUVendor(self) -> int:
return self._gpu_vendor
## Get a string describing the current GPU type.
#
# This effectively should return the OpenGL renderer string.
def getGPUType(self) -> str:
return self._gpu_type
## Get the OpenGL bindings object.
#
# This should return an object that has all supported OpenGL functions
# as methods and additionally defines all OpenGL constants. This object
# is used to make direct OpenGL calls so should match OpenGL as closely
# as possible.
def getBindingsObject(self) -> Any:
return self._gl
## Create a FrameBuffer Object.
#
# This should return an implementation-specifc FrameBufferObject subclass.
def createFrameBufferObject(self, width: int, height: int) -> FrameBufferObject:
return FrameBufferObject(width, height)
## Create a Texture Object.
#
# This should return an implementation-specific Texture subclass.
def createTexture(self) -> Texture:
return Texture(self._gl)
## Create a ShaderProgram Object.
#
# This should return an implementation-specifc ShaderProgram subclass.
def createShaderProgram(self, file_name: str) -> ShaderProgram:
shader = ShaderProgram()
# The version_string must match the keys in shader files.
if OpenGLContext.isLegacyOpenGL():
version_string = "" # Nothing is added to "fragment" and "vertex"
else:
version_string = "41core"
try:
shader.load(file_name, version = version_string)
except InvalidShaderProgramError:
# If the loading failed, it could be that there is no specific shader for this version.
# Try again without a version nr to get the generic one.
if version_string != "":
shader.load(file_name, version = "")
return shader
## Create a Vertex buffer for a mesh.
#
# This will create a vertex buffer object that is filled with the
# vertex data of the mesh.
#
# By default, the associated vertex buffer should be cached using a
# custom property on the mesh. This should use the VertexBufferProperty
# property name.
#
# \param mesh The mesh to create a vertex buffer for.
# \param kwargs Keyword arguments.
# Possible values:
# - force_recreate: Ignore the cached value if set and always create a new buffer.
def createVertexBuffer(self, mesh: "MeshData", **kwargs: Any) -> QOpenGLBuffer:
if not kwargs.get("force_recreate", False) and hasattr(mesh, OpenGL.VertexBufferProperty):
return getattr(mesh, OpenGL.VertexBufferProperty)
buffer = QOpenGLBuffer(QOpenGLBuffer.VertexBuffer)
buffer.create()
buffer.bind()
float_size = ctypes.sizeof(ctypes.c_float)
int_size = ctypes.sizeof(ctypes.c_int)
buffer_size = mesh.getVertexCount() * 3 * float_size # Vertex count * number of components * sizeof(float32)
if mesh.hasNormals():
buffer_size += mesh.getVertexCount() * 3 * float_size # Vertex count * number of components * sizeof(float32)
if mesh.hasColors():
buffer_size += mesh.getVertexCount() * 4 * float_size # Vertex count * number of components * sizeof(float32)
if mesh.hasUVCoordinates():
buffer_size += mesh.getVertexCount() * 2 * float_size # Vertex count * number of components * sizeof(float32)
for attribute_name in mesh.attributeNames():
attribute = mesh.getAttribute(attribute_name)
if attribute["opengl_type"] == "vector2f":
buffer_size += mesh.getVertexCount() * 2 * float_size
elif attribute["opengl_type"] == "vector4f":
buffer_size += mesh.getVertexCount() * 4 * float_size
elif attribute["opengl_type"] == "int":
buffer_size += mesh.getVertexCount() * int_size
elif attribute["opengl_type"] == "float":
buffer_size += mesh.getVertexCount() * float_size
else:
Logger.log(
"e", "Could not determine buffer size for attribute [%s] with type [%s]" % (attribute_name, attribute["opengl_type"]))
buffer.allocate(buffer_size)
offset = 0
vertices = mesh.getVerticesAsByteArray()
if vertices is not None:
buffer.write(0, vertices, len(vertices))
offset += len(vertices)
if mesh.hasNormals():
normals = cast(bytes, mesh.getNormalsAsByteArray())
buffer.write(offset, normals, len(normals))
offset += len(normals)
if mesh.hasColors():
colors = cast(bytes, mesh.getColorsAsByteArray())
buffer.write(offset, colors, len(colors))
offset += len(colors)
if mesh.hasUVCoordinates():
uvs = cast(bytes, mesh.getUVCoordinatesAsByteArray())
buffer.write(offset, uvs, len(uvs))
offset += len(uvs)
for attribute_name in mesh.attributeNames():
attribute = mesh.getAttribute(attribute_name)
attribute_byte_array = attribute["value"].tostring()
buffer.write(offset, attribute_byte_array, len(attribute_byte_array))
offset += len(attribute_byte_array)
buffer.release()
setattr(mesh, OpenGL.VertexBufferProperty, buffer)
return buffer
## Create an index buffer for a mesh.
#
# This will create an index buffer object that is filled with the
# index data of the mesh.
#
# By default, the associated index buffer should be cached using a
# custom property on the mesh. This should use the IndexBufferProperty
# property name.
#
# \param mesh The mesh to create an index buffer for.
# \param kwargs Keyword arguments.
# Possible values:
# - force_recreate: Ignore the cached value if set and always create a new buffer.
def createIndexBuffer(self, mesh: "MeshData", **kwargs: Any):
if not mesh.hasIndices():
return None
if not kwargs.get("force_recreate", False) and hasattr(mesh, OpenGL.IndexBufferProperty):
return getattr(mesh, OpenGL.IndexBufferProperty)
buffer = QOpenGLBuffer(QOpenGLBuffer.IndexBuffer)
buffer.create()
buffer.bind()
data = cast(bytes, mesh.getIndicesAsByteArray()) # We check for None at the beginning of the method
if 'index_start' in kwargs and 'index_stop' in kwargs:
buffer.allocate(data[4 * kwargs['index_start']:4 * kwargs['index_stop']], 4*(kwargs['index_stop'] - kwargs['index_start']))
else:
buffer.allocate(data, len(data))
buffer.release()
setattr(mesh, OpenGL.IndexBufferProperty, buffer)
return buffer
__instance = None # type: OpenGL
@classmethod
def getInstance(cls, *args, **kwargs) -> "OpenGL":
return cls.__instance
|
StarcoderdataPython
|
1687336
|
<filename>single_im_annotate.py
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import argparse, os
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="input file to predict objects")
parser.add_argument("-b", "--labels", type=str, default= "./annotations/label_map.pbtxt", help="path to label map")
parser.add_argument("-c", "--ckpt", type=str, default="./training/frozen_inference_graph_v4.pb", help="path to checkpoint inference graph")
parser.add_argument("-t", "--threshold", type=float, default=0.30, help="confidence threshold for annotations, betweem 0 and 1")
args = parser.parse_args()
PATH_TO_LABELS = args.labels
PATH_TO_CKPT = args.ckpt
CONF_THR = args.threshold
NUM_CLASSES = 1
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def save_image(data, filename):
sizes = np.shape(data)
fig = plt.figure(figsize=(1,1))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data, cmap = plt.get_cmap("bone"))
plt.savefig(filename,dpi = 1200)
plt.close()
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with detection_graph.as_default():
with tf.compat.v1.Session(graph=detection_graph) as sess:
image_np = plt.imread(args.file).copy()
if len(image_np.shape) == 2 or image_np.shape[2] == 1:
print("Converting grayscale image ...")
image_np = np.stack((image_np,)*3, axis=-1)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Extract image tensor
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Extract detection boxes
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Extract detection scores
scores = detection_graph.get_tensor_by_name('detection_scores:0')
# Extract detection classes
classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Extract number of detectionsd
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=4,
max_boxes_to_draw=100,
min_score_thresh=CONF_THR)
save_image(image_np, args.file.split(".")[0] + "_annot.jpg")
|
StarcoderdataPython
|
3308483
|
<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import sqlalchemy
from aria.storage import (
ModelStorage,
sql_mapi
)
from aria import modeling
from aria.modeling.exceptions import ValueFormatException
from ..storage import (
release_sqlite_storage,
init_inmemory_model_storage
)
from . import MockModel
from ..mock import (
models,
context as mock_context
)
@pytest.fixture
def storage():
base_storage = ModelStorage(sql_mapi.SQLAlchemyModelAPI,
initiator=init_inmemory_model_storage)
base_storage.register(MockModel)
yield base_storage
release_sqlite_storage(base_storage)
@pytest.fixture(scope='module', autouse=True)
def module_cleanup():
modeling.models.aria_declarative_base.metadata.remove(MockModel.__table__) # pylint: disable=no-member
@pytest.fixture
def context(tmpdir):
ctx = mock_context.simple(str(tmpdir))
yield ctx
release_sqlite_storage(ctx.model)
def test_inner_dict_update(storage):
inner_dict = {'inner_value': 1}
mock_model = MockModel(model_dict={'inner_dict': inner_dict, 'value': 0})
storage.mock_model.put(mock_model)
storage_mm = storage.mock_model.get(mock_model.id)
assert storage_mm == mock_model
storage_mm.model_dict['inner_dict']['inner_value'] = 2
storage_mm.model_dict['value'] = -1
storage.mock_model.update(storage_mm)
storage_mm = storage.mock_model.get(storage_mm.id)
assert storage_mm.model_dict['inner_dict']['inner_value'] == 2
assert storage_mm.model_dict['value'] == -1
def test_inner_list_update(storage):
mock_model = MockModel(model_list=[0, [1]])
storage.mock_model.put(mock_model)
storage_mm = storage.mock_model.get(mock_model.id)
assert storage_mm == mock_model
storage_mm.model_list[1][0] = 'new_inner_value'
storage_mm.model_list[0] = 'new_value'
storage.mock_model.update(storage_mm)
storage_mm = storage.mock_model.get(storage_mm.id)
assert storage_mm.model_list[1][0] == 'new_inner_value'
assert storage_mm.model_list[0] == 'new_value'
def test_model_to_dict(context):
service = context.service
service = service.to_dict()
expected_keys = [
'description',
'created_at',
'updated_at'
]
for expected_key in expected_keys:
assert expected_key in service
def test_relationship_model_ordering(context):
service = context.model.service.get_by_name(models.SERVICE_NAME)
source_node = context.model.node.get_by_name(models.DEPENDENT_NODE_NAME)
target_node = context.model.node.get_by_name(models.DEPENDENCY_NODE_NAME)
new_node_template = modeling.models.NodeTemplate(
name='new_node_template',
type=source_node.type,
service_template=service.service_template
)
new_node = modeling.models.Node(
name='new_node',
type=source_node.type,
service=service,
version=None,
node_template=new_node_template,
state=modeling.models.Node.INITIAL,
)
source_node.outbound_relationships.append(modeling.models.Relationship(
source_node=source_node,
target_node=new_node,
))
new_node.outbound_relationships.append(modeling.models.Relationship( # pylint: disable=no-member
source_node=new_node,
target_node=target_node,
))
context.model.node_template.put(new_node_template)
context.model.node.put(new_node)
context.model.node.refresh(source_node)
context.model.node.refresh(target_node)
def flip_and_assert(node, direction):
"""
Reversed the order of relationships and assert effects took place.
:param node: the node instance to operate on
:param direction: the type of relationships to flip (inbound/outbound)
:return:
"""
assert direction in ('inbound', 'outbound')
def get_relationships():
return getattr(node, direction + '_relationships')
relationships = get_relationships()
assert len(relationships) == 2
reversed_relationship = list(reversed(relationships))
assert relationships != reversed_relationship
relationships[:] = reversed_relationship
context.model.node.update(node)
assert get_relationships() == reversed_relationship
flip_and_assert(source_node, 'outbound')
flip_and_assert(target_node, 'inbound')
class StrictClass(modeling.models.aria_declarative_base, modeling.mixins.ModelMixin):
__tablename__ = 'strict_class'
strict_dict = sqlalchemy.Column(modeling.types.StrictDict(basestring, basestring))
strict_list = sqlalchemy.Column(modeling.types.StrictList(basestring))
def test_strict_dict():
strict_class = StrictClass()
def assert_strict(sc):
with pytest.raises(ValueFormatException):
sc.strict_dict = {'key': 1}
with pytest.raises(ValueFormatException):
sc.strict_dict = {1: 'value'}
with pytest.raises(ValueFormatException):
sc.strict_dict = {1: 1}
assert_strict(strict_class)
strict_class.strict_dict = {'key': 'value'}
assert strict_class.strict_dict == {'key': 'value'}
assert_strict(strict_class)
with pytest.raises(ValueFormatException):
strict_class.strict_dict['key'] = 1
with pytest.raises(ValueFormatException):
strict_class.strict_dict[1] = 'value'
with pytest.raises(ValueFormatException):
strict_class.strict_dict[1] = 1
def test_strict_list():
strict_class = StrictClass()
def assert_strict(sc):
with pytest.raises(ValueFormatException):
sc.strict_list = [1]
assert_strict(strict_class)
strict_class.strict_list = ['item']
assert strict_class.strict_list == ['item']
assert_strict(strict_class)
with pytest.raises(ValueFormatException):
strict_class.strict_list[0] = 1
|
StarcoderdataPython
|
3292590
|
<filename>scripts/parseCommonLogs.py<gh_stars>1-10
from glob import glob
import os
import datetime as dt
from parseOutputFile import parse_output
import math
def progressBar(current, total, barLength = 20):
percent = float(current) * 100 / total
arrow = '-' * int(percent/100 * barLength - 1) + '>'
spaces = ' ' * (barLength - len(arrow))
print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
def parse_common_logs(run_paths):
bcast_latencies_per_run = {}
bytes_per_second = []
dupes_per_second = []
sync_time_per_second = []
disk_usage_per_second = []
sync_start_times = {}
sync_end_times = {}
for run_path in run_paths: # RUNS
node_files = glob(run_path + "/*.log")
print(f"Found {len(node_files)} node files for {os.path.basename(run_path)}")
send_times = {}
reception_times = {}
run_start_time, first_dead_time, first_cooldown_time, first_message_time, last_start_time, catastrophe_start_time, \
churn_start_time, churn_end_time = parse_output(run_path)
if len(bytes_per_second) == 0:
for _ in range(math.ceil(first_dead_time)):
bytes_per_second.append(0)
dupes_per_second.append(0)
sync_time_per_second.append((0, 0))
disk_usage_per_second.append((0, 0))
idx = 0
for node_file in node_files: # NODES
progressBar(idx, len(node_files))
idx += 1
file = open(node_file, "r")
start_time = -1
for i in file:
line = i.split(" ")
if "Hello" in line[3]:
start_time = dt.datetime.strptime(line[1], '%d/%m/%Y-%H:%M:%S,%f').timestamp() - run_start_time
# BROADCAST LATENCY
elif line[3] == "SENT":
send_times[line[4]] = dt.datetime.strptime(line[1],
'%d/%m/%Y-%H:%M:%S,%f').timestamp() - run_start_time
if send_times[line[4]] > first_dead_time:
print(f"Erro não convergiu {line[4]}: send time {send_times[line[4]]}; first dead {first_dead_time}")
exit()
elif line[3] == "RECEIVED":
reception_time = dt.datetime.strptime(line[1], '%d/%m/%Y-%H:%M:%S,%f').timestamp() - run_start_time
msg_id = line[4]
if msg_id not in reception_times:
reception_times[msg_id] = []
reception_times[msg_id].append((start_time, reception_time))
# BYTES
elif len(line) > 6 and "BytesSent" in line[5]:
bytes_time = dt.datetime.strptime(line[1], '%d/%m/%Y-%H:%M:%S,%f').timestamp() - run_start_time
if bytes_time < first_dead_time:
bytes_per_second[math.floor(bytes_time)] += int(line[5].split("=")[1])
# DUPLICATES
elif line[3] == "DUPLICATE":
dupe_time = dt.datetime.strptime(line[1], '%d/%m/%Y-%H:%M:%S,%f').timestamp() - run_start_time
if dupe_time < first_dead_time:
dupes_per_second[math.floor(dupe_time)] += 1
# SYNC TIMES
elif line[3] == "STARTED_SYNC":
sync_start_times[line[4]] = dt.datetime.strptime(line[1],
'%d/%m/%Y-%H:%M:%S,%f').timestamp() - run_start_time
elif line[3] == "ENDED_SYNC":
sync_end_times[line[4]] = dt.datetime.strptime(line[1],
'%d/%m/%Y-%H:%M:%S,%f').timestamp() - run_start_time
# DISK USAGE
elif "DiskUsage" in line[3]:
disk_usage_time = dt.datetime.strptime(line[1], '%d/%m/%Y-%H:%M:%S,%f').timestamp() - run_start_time
if disk_usage_time < first_dead_time:
n_print_sec, disk_usage_sec = disk_usage_per_second[math.floor(disk_usage_time)]
disk_usage_per_second[math.floor(disk_usage_time)] = (n_print_sec + 1, disk_usage_sec + int(line[3].split("=")[1]))
# AFTER NODES
# BROADCAST LATENCY
broadcast_latencies = []
for msg_id, send_time in send_times.items():
if msg_id in reception_times:
last_reception = -1
for start_time, reception_time in reception_times[msg_id]:
if start_time < send_time and reception_time > last_reception:
last_reception = reception_time
broadcast_latencies.append((send_time, last_reception - send_time))
bcast_latencies_per_run[run_path] = broadcast_latencies
# AFTER RUNS
# BROADCAST LATENCY
n_latencies = 0
total_latency = 0
for run_latencies in bcast_latencies_per_run.values():
for send_time, bcast_latency in run_latencies:
n_latencies += 1
total_latency += bcast_latency
avg_broadcast_latency = total_latency / n_latencies
latencies_per_second = []
for _ in range(math.ceil(first_dead_time)):
latencies_per_second.append((0, 0))
for run_latencies in bcast_latencies_per_run.values():
for send_time, bcast_latency in run_latencies:
if send_time > first_dead_time:
print("Erro não convergiu")
exit()
n_ops_sec, bcast_latency_sec = latencies_per_second[math.floor(send_time)]
latencies_per_second[math.floor(send_time)] = (n_ops_sec + 1, bcast_latency_sec + bcast_latency)
avg_latencies_per_second = []
for n_ops_sec, bcast_latency_sec in latencies_per_second:
if n_ops_sec == 0:
avg_latencies_per_second.append(0)
else:
avg_latencies_per_second.append(bcast_latency_sec/n_ops_sec)
# BYTES
total_bytes_per_second = []
for bytes_sec in bytes_per_second:
total_bytes_per_second.append(bytes_sec/len(run_paths))
total_bytes = sum(total_bytes_per_second) / len(run_paths)
# DUPLICATES
total_dupes_per_second = []
for dupes_sec in dupes_per_second:
total_dupes_per_second.append(dupes_sec / len(run_paths))
total_dupes = sum(total_dupes_per_second) / len(run_paths)
# SYNC TIMES
for msg_id, start_time in sync_start_times.items():
if start_time < first_dead_time and msg_id in sync_end_times:
n_sync_sec, sync_time_sec = sync_time_per_second[math.floor(start_time)]
sync_time_per_second[math.floor(start_time)] = (n_sync_sec + 1, sync_time_sec + sync_end_times[msg_id] - start_time)
avg_sync_time_per_second = []
avg_n_syncs_per_second = []
total_sync_time = 0
n_syncs = 0
for n_sync_sec, sync_time_sec in sync_time_per_second:
total_sync_time += sync_time_sec
n_syncs += n_sync_sec
if n_sync_sec == 0:
avg_sync_time_per_second.append(0)
else:
avg_sync_time_per_second.append(sync_time_sec / n_sync_sec)
avg_n_syncs_per_second.append(n_sync_sec / len(run_paths))
if n_syncs > 0:
avg_sync_time = total_sync_time / n_syncs
else:
avg_sync_time = -1
total_sync_time = total_sync_time / len(run_paths)
n_syncs = n_syncs / len(run_paths)
# DISK USAGE
avg_disk_usage_per_second = []
for n_print_sec, disk_usage_sec in disk_usage_per_second:
if n_print_sec == 0:
avg_disk_usage_per_second.append(0)
else:
avg_disk_usage_per_second.append(disk_usage_sec / n_print_sec)
# TREE STABILIZATION IN CATASTROPHE
tree_stabilization_time = -1
last_idx = -1
if catastrophe_start_time != -1:
for idx, n_syncs in enumerate(avg_n_syncs_per_second):
if n_syncs != 0:
last_idx = idx
tree_stabilization_time = last_idx - catastrophe_start_time
return {"AVG_BCAST_LATENCY": avg_broadcast_latency,
"AVG_LATENCIES_PER_SECOND": avg_latencies_per_second,
"TOTAL_BYTES": total_bytes,
"TOTAL_BYTES_PER_SECOND": total_bytes_per_second,
"TOTAL_DUPES": total_dupes,
"TOTAL_DUPES_PER_SECOND": total_dupes_per_second,
"AVG_SYNC_TIME_PER_SECOND": avg_sync_time_per_second,
"AVG_N_SYNCS_PER_SECOND": avg_n_syncs_per_second,
"AVG_SYNC_TIME": avg_sync_time,
"N_SYNCS": n_syncs,
"TOTAL_SYNC_TIME": total_sync_time,
"AVG_DISK_USAGE_PER_SECOND": avg_disk_usage_per_second,
"TREE_STABILIZATION_TIME": tree_stabilization_time,
"FIRST_NODE_DEAD": first_dead_time,
"FIRST_NODE_COOLDOWN": first_cooldown_time,
"FIRST_MESSAGE": first_message_time,
"LAST_NODE_START": last_start_time,
"START_CATASTROPHE": catastrophe_start_time,
"START_CHURN": churn_start_time,
"END_CHURN": churn_end_time}
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.