max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
inn/inn_hotels/report/room_discrepancy/room_discrepancy.py | vinhnguyent090/front-desk | 4 | 12797251 | <gh_stars>1-10
# Copyright (c) 2013, Core Initiative and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from datetime import datetime
def execute(filters=None):
columns = [
{
'fieldname': 'date',
'label': 'Date',
'fieldtype': 'Date',
},
{
'fieldname': 'room',
'label': 'Room',
'fieldtype': 'Link',
'options': 'Inn Room',
},
{
'fieldname': 'type',
'label': 'Type',
'fieldtype': 'Link',
'options': 'Inn Room Type',
},
{
'fieldname': 'system_fo',
'label': 'System FO',
'fieldtype': 'Data',
},
{
'fieldname': 'actual_hk',
'label': 'Actual HK',
'fieldtype': 'Data',
},
]
data = get_data()
print(data)
return columns, data
def get_rooms():
return frappe.db.sql("""
select number as room, room_type as type, room_status as system_fo
from `tabInn Room` order by number""", as_dict=True)
def get_data():
rooms = get_rooms()
now = datetime.date(datetime.now())
for room in rooms:
room['date'] = now
words = room['system_fo'].split()
letters = [word[0] for word in words]
room['system_fo'] = "".join(letters)
return rooms | 2.359375 | 2 |
examples/recipes/prax_shake_head.py | pi-top/pi-top-Python-SDK | 28 | 12797252 | from time import sleep
from pitop import TiltRollHeadController
# Create a head controller object
head = TiltRollHeadController()
# Initialize the servo angles
head.roll.target_angle = 0
head.tilt.target_angle = 50
sleep(1)
# Nod 6 times at max speed 5 degrees either side of current angle. Blocks program execution until finished.
head.nod(times=6, angle=5, speed=100, block=True)
# Shake 4 times at half speed 10 degrees either side of current angle. Blocks program execution until finished.
head.shake(times=4, angle=10, speed=50, block=True)
# Shake and nod at the same time with default speed and angle
# Setting nod with block=False ensures the program continues to the next command
head.nod(times=6, block=False)
head.shake(times=6, block=True)
| 3.296875 | 3 |
python/unit_test/get_data_test.py | aaronlam88/cmpe295 | 5 | 12797253 | import sys
sys.path.insert(0, '../models')
from get_data import GetData
# from python.ultilities.get_data import GetData
import unittest
import csv
class TestGetData(unittest.TestCase):
def test_getAllFeatures1(self):
getData = GetData()
features = getData.getAllFeatures()
self.assertIsNotNone(features)
def test_getAllFeatures2(self):
getData = GetData(101)
features = getData.getAllFeatures()
self.assertIsNotNone(features)
self.assertEqual(len(features), 100)
def test_getAllFeatures3(self):
getData = GetData(5)
features = getData.getAllFeatures('open', 'close')
self.assertIsNotNone(features)
self.assertEqual(len(features[0][0]), 2)
if __name__ == '__main__':
unittest.main() | 2.75 | 3 |
tests/test_cli.py | wizardsoftheweb/gitflow-easyrelease | 0 | 12797254 | # pylint: disable=missing-docstring
from __future__ import print_function
from mock import patch
from gitflow_easyrelease import cli
@patch('gitflow_easyrelease.cli_file.ColorOutput')
@patch('gitflow_easyrelease.cli_file.Subcommand')
@patch('gitflow_easyrelease.cli_file.Application')
def test_execution(mock_app, mock_sub, mock_color):
mock_color.assert_not_called()
mock_sub.assert_not_called()
mock_app.assert_not_called()
cli()
mock_color.assert_called_once()
assert 1 <= mock_sub.call_count
mock_app.assert_called_once()
| 2.109375 | 2 |
lib/command.py | GrimHacker/hashcatter | 2 | 12797255 | <reponame>GrimHacker/hashcatter
'''
.1111... | Title: command
.10000000000011. .. | Author: <NAME>
.00 000... | Email: <EMAIL>
1 01.. | Description:
.. | executes a command as a subprocess
.. |
GrimHacker .. |
.. |
grimhacker.com .. |
@_grimhacker .. |
--------------------------------------------------------------------------------
Created on 22 Sep 2013
@author: GrimHacker
'''
import logging
from subprocess import CalledProcessError
from lib.async_subprocess import AsyncPopen, PIPE
class Command():
def __init__(self):
self.log = logging.getLogger(__name__)
def _stdout(self, out):
"""
print line from stdout of executed command
"""
for line in out.split("\n"):
if line != "": # output anything that isn't a blank line
self.log.info("{0}".format(line))
def _stderr(self, err):
"""
print line from stderr of executed command
"""
for line in err.split("\n"):
if line != "": # output anything that isn't a blank line
self.log.warning("{0}".format(line))
def _execute(self, cmd):
"""
run the specified command as a subprocess
"""
self.log.debug("running: '{0}'".format(cmd))
try:
proc = AsyncPopen(cmd, stdout=PIPE, stderr=PIPE)
while proc.poll() is None: # while subprocess hasn't finished
out, err = proc.communicate("s")
if err is not None:
self._stderr(err)
if out is not None:
self._stdout(out)
#line = proc.stdout.readline().strip("\n")
#self._stdout(line)
# line = proc.stderr.readline().strip("\n") # waits for stderr. #TODO: need to put this in a thread
# self._stderr(line)
# when we get to here the subprocess has finished running
except CalledProcessError, e:
self.log.error("{0}: {1}".format(e.errno, e.strerror))
#return "{0}: {1}".format(e.errno, e.strerror) | 2.5 | 2 |
kontaktmap.py | muellermartin/kontaktmap | 0 | 12797256 | import os
from flask import Flask, render_template, request, json
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/save', methods=['POST'])
def save():
with open('data.json', 'w+') as f:
f.write(json.dumps(request.get_json()))
return ''
@app.route('/load')
def load():
result = '{ "markers": [] }'
if os.path.isfile('data.json'):
with open('data.json', 'r') as f:
result = f.read()
return json.jsonify(result)
if __name__ == '__main__':
app.run()
| 2.796875 | 3 |
relaax/common/metrics.py | j0k/relaax | 4 | 12797257 | from __future__ import print_function
class Metrics(object):
def scalar(self, name, y, x=None):
raise NotImplementedError
| 1.90625 | 2 |
server/insctructions/aggregator.py | Ashkan-Agc/web-time-tracker | 5 | 12797258 | <reponame>Ashkan-Agc/web-time-tracker<gh_stars>1-10
from django.db.models import Sum, Avg, Max, Min
from TimeTracker.models import Sites
from datetime import datetime
from .dataFunc import DataInstruct
class Aggregators():
def __init__(self, user_id):
self.now = datetime.now()
self.date = self.now.strftime("%Y-%m-%d")
self.userSites = Sites.objects.filter(user_id=user_id).all()
self.userSitesDay = self.userSites.filter(date=self.date).all()
def get_data(self, query, agg):
time = DataInstruct()
url = self.userSitesDay.filter(time=query['dailyTime__{}'.format(agg)])
m = ""
for i in url:
m = i.url
data = {
"url": m,
"agg": time.convert_to_time(query['dailyTime__{}'.format(agg)])
}
return data
def max(self):
q = self.userSitesDay.aggregate(Max('dailyTime'))
return self.get_data(q, 'max')
def min(self):
q = self.userSitesDay.aggregate(Min('dailyTime'))
return self.get_data(q, 'min')
def average(self):
q = self.userSitesDay.aggregate(Avg('dailyTime'))
return self.get_data(q, 'avg')
def sum(self):
q = self.userSitesDay.aggregate(Sum('dailyTime'))
return self.get_data(q, 'sum')
| 2.34375 | 2 |
qvi/misc/plot.py | amirdib/quantized-variational-inference | 0 | 12797259 | <gh_stars>0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors
from scipy.spatial import Voronoi, voronoi_plot_2d
from scipy.signal import savgol_filter
from qvi.core.experiments import compute_traces_from_multiple_trainning
class _TFColor(object):
"""Enum of colors used in TF docs."""
red = '#F15854'
blue = '#5DA5DA'
orange = '#FAA43A'
green = '#60BD68'
pink = '#F17CB0'
brown = '#B2912F'
purple = '#B276B2'
yellow = '#DECF3F'
gray = '#4D4D4D'
qmc = '#33a02c'
rqmc = '#fdae61'
qvi = '#2c7bb6'
rqvi = '#abd9e9'
mc = '#d7191c'
def __getitem__(self, i):
return [
self.red,
self.orange,
self.green,
self.blue,
self.pink,
self.brown,
self.purple,
self.yellow,
self.gray,
][i % 9]
TFColor = _TFColor()
def plot2d(array, ax=None, **kwargs):
''' Two dimension plot of an array.
Args:
array: 2D array. The right-most index is the dimension index.
ax: Matplotlib Axes. If None, one would be created.
kwargs: key word argument for the scatter plot.
'''
ax = plt.subplot() if ax is None else ax
X = array[:, 0]
Y = array[:, 1]
ax.scatter(X, Y, **kwargs)
def plot_experiments(experiments, axes, dataset, limits=None, gradylimit=None, dataset_name=None, vi_type=['all'], abscissa='epochs', num_burnin_steps=0, norm=1, **kwargs):
"""Plotting MCVI, QVI and RQVI Experiment
Args:
experiments: Iterable of Experiment instances.
axes: Matplotlib Axe.
dataset: Dataset Number. Must be comptatible with axes.
limites: Python `tuple` for xlim and ylim for each plot in Axes.
name: Python `str` name dataset name for title display.
name: Iterable of Python `str`: 'All', 'mc', 'QVI', 'RQVI'.
abscissa: Python `str`: `time` or `epochs`.
num_burnin_steps: Python `int`: Number of step to ignore for display.
"""
for k, experiment in enumerate(sorted(experiments, key=lambda e: e.optimizer_params['learning_rate'])):
line = 2*dataset
elboax = axes[line, k]
gradax = axes[line+1, k]
qmc_ls = 'x'
rqmc_ls = 'v'
qvi_ls = ''
rqvi_ls = '.'
mc_ls = '^'
every = 70
if 'all' in vi_type or 'rqvi' in vi_type:
plot(elboax, gradax, experiment.rqvitraces, abscissa=abscissa,marker=rqvi_ls,markevery=every,
name='RQVI', log_scale=False, c=TFColor.rqvi, num_burnin_steps=num_burnin_steps, norm=norm,**kwargs)
if 'all' in vi_type or 'qmc' in vi_type:
plot(elboax, gradax, experiment.qmcvitraces, abscissa=abscissa,marker=qmc_ls,markevery=every,
name='QMCVI', log_scale=False, c=TFColor.qmc,num_burnin_steps=num_burnin_steps, gradaxalpha=.6, norm=norm,**kwargs)
if 'all' in vi_type or 'rqmc' in vi_type:
plot_multiple_traces(elboax, gradax, experiment.rqmcvitraces, color=TFColor.rqmc,marker=rqmc_ls,markevery=every,
abscissa=abscissa, name='RQMC multiple', log_scale=True, num_burnin_steps=num_burnin_steps, norm=norm,**kwargs)
if 'all' in vi_type or 'qvi' in vi_type:
plot(elboax, gradax, experiment.qvitraces, abscissa=abscissa,marker=qvi_ls,markevery=every,
name='QVI', log_scale=False, c=TFColor.qvi, num_burnin_steps=num_burnin_steps,gradaxalpha=.6, norm=norm,**kwargs)
if 'all' in vi_type or 'mc' in vi_type:
plot_multiple_traces(elboax, gradax, experiment.mcvitraces,color=TFColor.red,marker=mc_ls,markevery=every,
abscissa=abscissa, name='MC VI Multiple', log_scale=True, num_burnin_steps=num_burnin_steps, norm=norm,**kwargs)
elboax.set_xticks(ticks=[])
if abscissa == 'time':
xlabel = 'time(s)'
elif abscissa == 'epochs':
xlabel = 'iterations'
xlabel = None
else:
xlabel = None
gradax.set_xlabel(xlabel)
elboax.set_yscale('symlog')
gradax.set_yscale('symlog')
if limits is not None:
xlim, ylim = limits
elboax.set_xlim(xlim)
gradax.set_xlim(xlim)
elboax.set_ylim(ylim)
if gradylimit is not None:
xlim, _ = limits
gradax.set_xlim(xlim)
gradax.set_ylim(*gradylimit)
if k != 0:
gradax.set_yticks(ticks=[])
elboax.set_yticks(ticks=[])
gradax.tick_params(axis=u'y', which=u'both', length=0)
elboax.tick_params(axis=u'both', which=u'both', length=0)
if k == 0:
elboax.set_ylabel('ELBO')
gradax.set_ylabel(r'$\mathbb{E}|g|_{2}$')
elboax.set_title('{}'.format(
dataset_name) + r'$(\alpha=${:.0e})'.format(experiment.optimizer_params['learning_rate']))
def plot(elboax, gradax, trace, abscissa='time', name='', log_scale=False, c=None, num_burnin_steps=0, alpha=1, gradaxalpha=1,norm=1, **kwargs):
loss, timestamps, grads = trace
if abscissa == 'time':
x = timestamps - timestamps[num_burnin_steps]
elif abscissa == 'epochs':
x = np.arange(0, len(loss))
grads = tf.reduce_sum(tf.square(grads), axis=-1)/norm
if log_scale is True:
grads = tf.math.log(grads)
loss = tf.math.log(loss)
elboax.plot(x[num_burnin_steps:], -
loss[num_burnin_steps:], c=c, label=name, alpha=alpha,**kwargs)
gradax.plot(x[num_burnin_steps:],
grads[num_burnin_steps:], c=c, label=name, alpha=gradaxalpha,**kwargs)
def plot_multiple_traces(elboax, gradax, traces, abscissa='time', name='', log_scale=False, num_burnin_steps=0,color='red', norm=1,**kwargs):
losses, timestamps, grads = compute_traces_from_multiple_trainning(traces)
if abscissa == 'time':
x = timestamps - timestamps[num_burnin_steps]
elif abscissa == 'epochs':
x = np.arange(0, len(losses))
mean_var_ts(ts=losses[num_burnin_steps:], x=x[num_burnin_steps:],
ax=elboax, label='MCVI', log_scale=log_scale, coeff=-1, color=color, norm=1,**kwargs)
mean_var_ts(ts=grads[num_burnin_steps:], x=x[num_burnin_steps:],
ax=gradax, label='MCVI', log_scale=log_scale,color=color,norm=norm,**kwargs)
def mean_var_ts(ts, x, ax, label=None, log_scale=False,color='red', coeff=1, norm=1, lw=.7,**kwargs):
window_length_mean = 51
window_length_var = 51
mean = tf.reduce_mean(ts/norm, axis=-1)
variance = tf.math.reduce_std(ts/norm, axis=-1)
smoothed_mean = savgol_filter(mean, window_length_mean, 2)
smoothed_variance = savgol_filter(variance, window_length_var, 3)
import pdb
#pdb.set_trace()
edgecolor = '#CC4F1B'
alpha = .6
if log_scale is True:
logmean = tf.math.log(smoothed_mean)
logvariance = smoothed_variance/smoothed_mean
ax.fill_between(x, coeff*tf.math.exp(logmean-logvariance), coeff*tf.math.exp(logmean+logvariance),
alpha=alpha, edgecolor=edgecolor, facecolor=color)
ax.plot(x, coeff*smoothed_mean, c=color, label=label, lw=lw,**kwargs)
else:
ax.plot(x, coeff*smoothed_mean, c=color, label=label, lw=lw,**kwargs)
ax.fill_between(x, coeff*smoothed_mean-smoothed_variance, coeff*smoothed_mean+smoothed_variance,
alpha=alpha, edgecolor=edgecolor, facecolor=color)
def scatter_plot_voronoi(qdist,n, ax, title=''):
q_samples = qdist.sample(n)
speed = qdist.weights
minima = min(speed)
maxima = max(speed)
norm = colors.Normalize(vmin=minima, vmax=maxima, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap=cm.Reds)
vor = Voronoi(q_samples.numpy())
voronoi_plot_2d(vor, show_points=False, show_vertices=False, s=1,ax=ax)
for r in range(len(vor.point_region)):
if r == 12:
pass
else:
region = vor.regions[vor.point_region[r]]
if not -1 in region:
polygon = [vor.vertices[i] for i in region]
plt.fill(*zip(*polygon), color=mapper.to_rgba(speed[r]))
ax.plot(q_samples[:,0], q_samples[:,1], 'ko', markersize=2)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.title.set_text(title)
def plot_heatmap_2d(dist, xmin=-4.0, xmax=4.0, ymin=-4.0, ymax=4.0, mesh_count=1000, name=None):
plt.figure()
x = tf.linspace(xmin, xmax, mesh_count)
y = tf.linspace(ymin, ymax, mesh_count)
X, Y = tf.meshgrid(x, y)
concatenated_mesh_coordinates = tf.transpose(tf.stack([tf.reshape(Y, [-1]), tf.reshape(X, [-1])]))
prob = dist.prob(concatenated_mesh_coordinates)
#plt.hexbin(concatenated_mesh_coordinates[:,0], concatenated_mesh_coordinates[:,1], C=prob, cmap='rainbow')
prob = prob.numpy()
plt.imshow(tf.transpose(tf.reshape(prob, (mesh_count, mesh_count))), origin="lower")
plt.xticks([0, mesh_count * 0.25, mesh_count * 0.5, mesh_count * 0.75, mesh_count], [xmin, xmin/2, 0, xmax/2, xmax])
plt.yticks([0, mesh_count * 0.25, mesh_count * 0.5, mesh_count * 0.75, mesh_count], [ymin, ymin/2, 0, ymax/2, ymax])
if name:
plt.savefig(name + ".png", format="png")
| 2.296875 | 2 |
keychain/keychain_urls.py | osgee/keychainserver | 2 | 12797260 | <reponame>osgee/keychainserver
from django.conf.urls import url, include
from keychain import keychain_client_urls
from keychain import keychain_web_urls
from keychain.views import appview
app_name = 'keychain'
urlpatterns = [
url(r'^web/', include(keychain_web_urls)),
url(r'^client/', include(keychain_client_urls)),
url(r'^app/$', appview.ListView.as_view(), name='app_list'),
url(r'^app/signup/$', appview.signup, name='app_signup'),
url(r'^app/service/(?P<app_id>\w{32})/$', appview.service, name='app_service'),
url(r'^app/service/(?P<app_id>\w{32})/(?P<service_id>\w{32})/$', appview.query, name='service_query'),
]
| 1.960938 | 2 |
tools/fake_server.py | uActor/uActor | 1 | 12797261 | <filename>tools/fake_server.py
import socket
import argparse
import struct
import msgpack
parser = argparse.ArgumentParser()
parser.add_argument("host")
parser.add_argument("port", type=int)
parser.add_argument("node_id")
arguments = parser.parse_args()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((arguments.host, arguments.port))
s.listen()
connection, remote = s.accept()
print(remote)
with connection:
sub_message = {}
sub_message["sender_node_id"] = arguments.node_id
sub_message["sender_actor_type"] = "test_actor"
sub_message["sender_instance_id"] = "1"
sub_message["type"] = "subscription_update"
sub_message["subscription_node_id"] = arguments.node_id
sub_msg = msgpack.packb(sub_message)
connection.send(struct.pack("!i", len(sub_msg)))
connection.send(sub_msg)
sub_message_size_data = connection.recv(4, socket.MsgFlag.MSG_WAITALL)
sub_message_size = struct.unpack("!i", sub_message_size_data)[0]
sub_message_data = connection.recv(sub_message_size)
sub_message = msgpack.unpackb(sub_message_data, raw=False)
print(sub_message)
assert "subscription_node_id" in sub_message
spawn_message = {}
spawn_message["sender_node_id"] = arguments.node_id
spawn_message["sender_actor_type"] = "test_actor"
spawn_message["spawn_node_id"] = sub_message["subscription_node_id"]
spawn_message["spawn_actor_type"] = "remotely_spawned_actor"
spawn_message["spawn_instance_id"] = arguments.node_id
spawn_message["spawn_code"] = f"""
function receive(message)
print(message.sender_node_id.."."..message.sender_actor_type.."."..message.sender_instance_id.." -> "..node_id.."."..actor_type.."."..instance_id);
if(message["type"] == "init" or message["type"] == "periodic_timer") then
send({{node_id="{arguments.node_id}", actor_type="test_actor", instance_id="1", message="ping"}});
delayed_publish({{node_id=node_id, actor_type=actor_type, instance_id=instance_id, type="periodic_timer"}}, 5000);
end
if(message["type"] == "init") then
delayed_publish({{node_id=node_id, actor_type=actor_type, instance_id=instance_id, type="exit"}}, 20000);
end
if(message["type"] == "exit") then
send({{node_id="{arguments.node_id}", actor_type="test_actor", instance_id="1", message="goodbye"}});
end
end"""
spawn_message["node_id"] = sub_message["subscription_node_id"]
spawn_message["instance_id"] = "1"
spawn_message["actor_type"] = "lua_runtime"
spawn_msg = msgpack.packb(spawn_message)
connection.send(struct.pack("!i", len(spawn_msg)))
connection.send(spawn_msg)
while True:
res = connection.recv(4, socket.MsgFlag.MSG_WAITALL)
if not res:
break
size = struct.unpack("!i", res)[0]
data = connection.recv(size)
if not data:
break
message = msgpack.unpackb(data, raw=False)
print(message)
| 2.328125 | 2 |
tests/schema/test_visitor.py | mabrains/ALIGN-public | 119 | 12797262 | import pytest
from align.schema.types import BaseModel, Optional, List, Dict
from align.schema.visitor import Visitor, Transformer, cache
@pytest.fixture
def dummy():
class DummyModel(BaseModel):
arg1: str
arg2: Optional[str]
arg3: List[str]
arg4: List[Optional[str]]
arg5: Dict[str, str]
arg6: Dict[str, Optional[str]]
arg7: "Optional[DummyModel]"
arg8: "Optional[List[DummyModel]]"
DummyModel.update_forward_refs()
base = DummyModel(
arg1 = 'arg1',
arg3 = ['arg3_1', 'arg3_2'],
arg4 = [],
arg5 = {'arg5_k': 'arg5_v'},
arg6 = {'arg6_k': None}
)
dummy = DummyModel(
arg1 = 'arg1',
arg3 = ['arg3_1', 'arg3_2'],
arg4 = [],
arg5 = {'arg5_k': 'arg5_v'},
arg6 = {'arg6_k': None},
arg7 = base,
arg8 = [base, base]
)
return dummy
def test_visitor_no_output(dummy):
assert Visitor().visit(dummy) == []
def test_visitor_raw_output(dummy):
class StrValVisitor(Visitor):
def visit_str(self, node):
return node
assert StrValVisitor().visit(dummy) == [
'arg1',
'arg3_1',
'arg3_2',
'arg5_v',
'arg1',
'arg3_1',
'arg3_2',
'arg5_v',
'arg1',
'arg3_1',
'arg3_2',
'arg5_v',
'arg1',
'arg3_1',
'arg3_2',
'arg5_v',
]
def test_visitor_processed_output(dummy):
class DummyCounter(Visitor):
'''Simply counts the number of times the dummy class is encountered'''
def visit_DummyModel(self, node):
return sum(self.generic_visit(node)) + 1
assert DummyCounter().visit(dummy) == 4
def test_transformer_no_visitor(dummy):
assert Transformer().visit(dummy.arg1) is dummy.arg1
assert Transformer().visit(dummy.arg2) is dummy.arg2
assert Transformer().visit(dummy.arg3) is dummy.arg3
assert Transformer().visit(dummy.arg4) is dummy.arg4
assert Transformer().visit(dummy.arg5) is dummy.arg5
assert Transformer().visit(dummy.arg6) is dummy.arg6
assert Transformer().visit(dummy.arg7) is dummy.arg7
assert Transformer().visit(dummy.arg8) is dummy.arg8
assert Transformer().visit(dummy) is dummy
def test_transformer_string_visitor(dummy):
class AddStringPrefix(Transformer):
def visit_str(self, node):
return 'prefix_' + node
transformed = AddStringPrefix().visit(dummy)
assert isinstance(transformed, dummy.__class__)
# String in subtree
assert transformed.arg1 == 'prefix_arg1'
assert transformed.arg1 is not dummy.arg1
# No string in subtree
assert transformed.arg2 == None
assert transformed.arg2 is dummy.arg2
# String in subtree
assert transformed.arg3 == ['prefix_arg3_1', 'prefix_arg3_2']
assert transformed.arg3 is not dummy.arg3
# No string in subtree
assert transformed.arg4 == []
assert transformed.arg4 is dummy.arg4, f'old:({id(dummy.arg4)}, {dummy.arg4}), new:({id(transformed.arg4)}, {transformed.arg4})'
# String in subtree
assert transformed.arg5 == {'arg5_k': 'prefix_arg5_v'}
assert transformed.arg5 is not dummy.arg5
# No string in subtree
assert transformed.arg6 == {'arg6_k': None}
assert transformed.arg6 is dummy.arg6
# Expected result for arg7 and arg8
basedict = {'arg1': 'prefix_arg1',
'arg2': None,
'arg3': ['prefix_arg3_1',
'prefix_arg3_2'],
'arg4': [],
'arg5': {'arg5_k': 'prefix_arg5_v'},
'arg6': {'arg6_k': None},
'arg7': None,
'arg8': None}
# String in subtree
assert transformed.arg7 == basedict
assert transformed.arg7 is not dummy.arg7
# String in subtree
assert transformed.arg8 == [basedict, basedict]
assert transformed.arg8 is not dummy.arg8
# Ensure cache is working for generic_visitor
assert transformed.arg7 is transformed.arg8[0]
assert transformed.arg8[0] is transformed.arg8[1]
def test_cache(dummy):
class UncachedTransformer(Transformer):
def visit_DummyModel(self, node):
if not hasattr(self, 'top'):
self.top = node
return self.generic_visit(node)
else:
return node.copy()
control = UncachedTransformer().visit(dummy)
assert control.arg7 is not control.arg8[0]
assert control.arg8[0] is not control.arg8[1]
class CachedTransformer(Transformer):
@cache # DO THIS FOR MOST VISITORS
def visit_DummyModel(self, node):
if not hasattr(self, 'top'):
self.top = node
return self.generic_visit(node)
else:
return node.copy()
transformed = CachedTransformer().visit(dummy)
assert transformed.arg7 is transformed.arg8[0]
assert transformed.arg8[0] is transformed.arg8[1]
| 2.234375 | 2 |
code/02_modeling/02_model_creation/DataReader.py | suryappal/MachineLearningSamples-BiomedicalEntityExtraction | 40 | 12797263 | <gh_stars>10-100
from keras.preprocessing import sequence
import numpy as np
import nltk
from nltk.tokenize import sent_tokenize
import _pickle as cPickle
class DataReader:
def __init__ (self, input_resources_pickle_file =None):
# Some constants
self.num_classes = 0
self.num_embedding_features = 0
self.max_sentence_len_train =0
# Other stuff
self.wordvecs = None
self.word_to_ix_map = {}
self.n_sentences_all = 0
self.tag_to_vector_map = {}
self.vector_to_tag_map = {}
if not (input_resources_pickle_file is None):
self.load_resources_pickle_file (input_resources_pickle_file)
##################################################
# decode_prediction_sequence
##################################################
def decode_prediction_sequence (self, pred_seq):
pred_tags = []
for class_prs in pred_seq:
class_vec = np.zeros(self.num_classes, dtype=np.int32)
class_vec[np.argmax(class_prs)] = 1
if tuple(class_vec.tolist()) in self.vector_to_tag_map:
pred_tags.append(self.vector_to_tag_map[tuple(class_vec.tolist())])
else:
print(tuple(class_vec.tolist()))
return pred_tags
##################################################
# load_resources_pickle_file
##################################################
def load_resources_pickle_file (self, input_resources_pickle_file):
print("Loading the resources pickle file {}".format(input_resources_pickle_file))
with open(input_resources_pickle_file, 'rb') as f:
pickle_content = cPickle.load(f, encoding='bytes')
self.word_to_ix_map = pickle_content["word_to_ix_map"]
self.wordvecs = pickle_content["wordvecs"]
self.num_embedding_features = pickle_content["num_embedding_features"]
self.num_classes = pickle_content["num_classes"]
self.max_sentence_len_train = pickle_content["max_sentence_len_train"]
self.tag_to_vector_map = pickle_content["tag_to_vector_map"]
self.vector_to_tag_map = pickle_content["vector_to_tag_map"]
self.zero_vec_pos = pickle_content["zero_vec_pos"]
##################################################
# load_embedding_lookup_table
##################################################
def load_embedding_lookup_table (self, embeddings_file):
###Load the Word2Vec Model###
print("Loading the W2V model from file {}".format(embeddings_file))
#W2V_model = cPickle.load(open(embeddings_file, "rb"))
with open(embeddings_file, 'rb') as f:
W2V_model = cPickle.load(f, encoding='bytes')
vocab = list(W2V_model.keys())
self.word_to_ix_map = {}
self.wordvecs = []
###Create LookUp Table for words and their word vectors###
print("Creating the lookup table")
for index, word in enumerate(vocab):
self.word_to_ix_map[word] = index
self.wordvecs.append(W2V_model[vocab[index]])
self.wordvecs = np.array(self.wordvecs)
print("Number of entries in the lookup table = {}".format(len(self.wordvecs)))
self.num_embedding_features = len(self.wordvecs[0])
print("embedding size = {}".format(self.num_embedding_features))
# Add a zero vector for the Paddings
self.wordvecs = np.vstack((self.wordvecs, np.zeros(self.num_embedding_features)))
self.zero_vec_pos = self.wordvecs.shape[0] - 1
print("Done")
return (self.wordvecs)
##################################################
## read_and_parse_training_data
##################################################
def read_and_parse_training_data (self, train_file, output_resources_pickle_file):
print("Loading the training data from file {}".format(train_file))
with open(train_file, 'r') as f_train:
self.tag_to_vector_map = {} # For storing one hot vector notation for each Tag
self.vector_to_tag_map = {}
self.num_classes = 0 # Used to put 1 in the one hot vector notation
raw_data_train = []
raw_words_train = []
raw_tags_train = []
# Process all lines in the file
for line in f_train:
line = line.strip()
if not line:
raw_data_train.append( (tuple(raw_words_train), tuple(raw_tags_train)))
raw_words_train = []
raw_tags_train = []
continue
word, tag = line.split('\t')
raw_words_train.append(word)
raw_tags_train.append(tag)
if tag not in self.tag_to_vector_map:
self.tag_to_vector_map[tag] = None
self.num_classes += 1
print("number of training examples = " + str(len(raw_data_train)))
all_tags = sorted(list(self.tag_to_vector_map.keys()))
for tag_class_id, tag in enumerate(all_tags):
one_hot_vec = np.zeros(self.num_classes +1, dtype=np.int32)
one_hot_vec[tag_class_id] = 1
self.tag_to_vector_map[tag] = tuple(one_hot_vec)
self.vector_to_tag_map[tuple(one_hot_vec)] = tag
#Adding a None Tag
one_hot_vec = np.zeros(self.num_classes +1, dtype=np.int32)
one_hot_vec[self.num_classes] = 1
self.tag_to_vector_map['NONE'] = tuple(one_hot_vec)
self.vector_to_tag_map[tuple(one_hot_vec)] = 'NONE'
self.num_classes += 1
self.n_sentences_all = len(raw_data_train)
# Find the maximum sequence length for Training data
self.max_sentence_len_train = 0
for seq in raw_data_train:
if len(seq[0]) > self.max_sentence_len_train:
self.max_sentence_len_train = len(seq[0])
############## Create Train Vectors################
all_X_train, all_Y_train = [], []
unk_words = []
count = 0
for word_seq, tag_seq in raw_data_train:
elem_wordvecs, elem_tags = [], []
for ix in range(len(word_seq)):
w = word_seq[ix]
t = tag_seq[ix]
w = w.lower()
if w in self.word_to_ix_map :
count += 1
elem_wordvecs.append(self.word_to_ix_map[w])
elem_tags.append(self.tag_to_vector_map[t])
elif "UNK" in self.word_to_ix_map :
unk_words.append(w)
elem_wordvecs.append(self.word_to_ix_map["UNK"])
elem_tags.append(self.tag_to_vector_map[t])
else:
unk_words.append(w)
w = "UNK"
new_wv = 2 * np.random.randn(self.num_embedding_features) - 1 # sample from normal distribution
norm_const = np.linalg.norm(new_wv)
new_wv /= norm_const
self.wordvecs = np.vstack((self.wordvecs, new_wv))
self.word_to_ix_map[w] = self.wordvecs.shape[0] - 1
elem_wordvecs.append(self.word_to_ix_map[w])
elem_tags.append(list(self.tag_to_vector_map[t]))
# Pad the sequences for missing entries to make them all the same length
nil_X = self.zero_vec_pos
nil_Y = np.array(self.tag_to_vector_map['NONE'])
pad_length = self.max_sentence_len_train - len(elem_wordvecs)
all_X_train.append( ((pad_length)*[nil_X]) + elem_wordvecs)
all_Y_train.append( ((pad_length)*[nil_Y]) + elem_tags)
all_X_train = np.array(all_X_train)
all_Y_train = np.array(all_Y_train)
print("UNK WORD COUNT = " + str(len(unk_words)))
print("Found WORDS COUNT = " + str(count))
print("TOTAL WORDS COUNT= " + str(count+len(unk_words)))
self.save_resources(output_resources_pickle_file)
print("Done")
return (all_X_train, all_Y_train)
##################################################
# save_resources
##################################################
def save_resources(self, output_resources_pickle_file):
print("saving the resources into the file {}".format(output_resources_pickle_file))
pickle_content = {}
pickle_content["word_to_ix_map"] = self.word_to_ix_map
pickle_content["wordvecs"] = self.wordvecs
pickle_content["num_embedding_features"] = self.num_embedding_features
pickle_content["num_classes"] = self.num_classes
pickle_content["max_sentence_len_train"] = self.max_sentence_len_train
pickle_content["tag_to_vector_map"] = self.tag_to_vector_map
pickle_content["vector_to_tag_map"] = self.vector_to_tag_map
pickle_content["zero_vec_pos"] = self.zero_vec_pos
cPickle.dump(pickle_content, open(output_resources_pickle_file, "wb"))
print("Done")
##################################################
# read_and_parse_test_data
##################################################
def read_and_parse_test_data (self, test_file):
print("Loading test data from file {}".format(test_file))
with open(test_file, 'r') as f_test:
data_set = []
sentence_words = []
sentence_tags = []
# Process all lines in the file
for line in f_test:
line = line.strip()
if not line:
data_set.append( (tuple(sentence_words), tuple(sentence_tags)))
sentence_words = []
sentence_tags = []
continue
word, tag = line.split('\t')
sentence_words.append(word)
sentence_tags.append(tag)
print("number of test examples = " + str(len(data_set)))
self.n_sentences_all = len(data_set)
#Create TEST feature vectors
all_X_test, all_Y_test = [], []
num_tokens_list = []
unk_words = []
count = 0
for word_seq, tag_seq in data_set:
if len(word_seq) > self.max_sentence_len_train:
print("skip the extra words in the long sentence")
word_seq = word_seq[:self.max_sentence_len_train]
tag_seq = tag_seq[:self.max_sentence_len_train]
elem_wordvecs, elem_tags = [], []
for ix in range(len(word_seq)):
w = word_seq[ix]
w = w.lower()
t = tag_seq[ix]
#ignore the word if it has uncovered ground truth entity type
if not (t in self.tag_to_vector_map):
continue
if w in self.word_to_ix_map:
count += 1
elem_wordvecs.append(self.word_to_ix_map[w])
elem_tags.append(self.tag_to_vector_map[t])
elif "UNK" in self.word_to_ix_map :
unk_words.append(w)
elem_wordvecs.append(self.word_to_ix_map["UNK"])
elem_tags.append(self.tag_to_vector_map[t])
else:
unk_words.append(w)
w = "UNK"
self.word_to_ix_map[w] = self.wordvecs.shape[0] - 1
elem_wordvecs.append(self.word_to_ix_map[w])
elem_tags.append(self.tag_to_vector_map[t])
# Pad the sequences for missing entries to make all the sentences the same length
nil_X = self.zero_vec_pos
nil_Y = np.array(self.tag_to_vector_map['NONE'])
num_tokens_list.append(len(elem_wordvecs))
pad_length = self.max_sentence_len_train - len(elem_wordvecs)
all_X_test.append( ((pad_length)*[nil_X]) + elem_wordvecs)
all_Y_test.append( ((pad_length)*[nil_Y]) + elem_tags)
all_X_test = np.array(all_X_test)
all_Y_test = np.array(all_Y_test)
print("UNK WORD COUNT = " + str(len(unk_words)))
print("Found WORDS COUNT = " + str(count))
print("TOTAL WORDS COUNT = " + str(count+len(unk_words)))
print("Done")
return (all_X_test, all_Y_test, data_set, num_tokens_list)
##################################################
# get_feature_vectors_2
##################################################
def get_feature_vectors_2 (self, data_file):
print("Loading unlabeled data from file {}".format(data_file))
with open(data_file, 'r') as f_data:
all_sentences_words = []
# Process all lines in the file
for line in f_data:
text = line.strip()
#break the input text into sentences before tokenization
sentences = sent_tokenize(text)
for sent in sentences:
sentence_words = nltk.word_tokenize(sent)
all_sentences_words.append( tuple(sentence_words) )
self.n_sentences_all = len(all_sentences_words)
print("number of unlabeled examples = {}".format(self.n_sentences_all))
return self.create_feature_vectors(all_sentences_words)
##################################################
# get_feature_vectors_1
##################################################
def get_feature_vectors_1 (self, data_list):
print("Reading unlabeled data from dataframe")
# list of list of tokens
all_sentences_words = []
# Process all lines in the file
for line in data_list:
text = line.strip()
#break the input text into sentences before tokenization
sentences = sent_tokenize(text)
for sent in sentences:
sentence_words = nltk.word_tokenize(sent)
all_sentences_words.append( tuple(sentence_words) )
self.n_sentences_all = len(all_sentences_words)
print("number of unlabeled examples = {}".format(self.n_sentences_all))
return self.create_feature_vectors(all_sentences_words)
##################################################
# create_feature_vectors
##################################################
def create_feature_vectors(self, all_sentences_words):
all_X_data = []
word_seq_list = []
num_tokens_list = []
unk_words = []
count = 0
for word_seq in all_sentences_words:
if len(word_seq) > self.max_sentence_len_train:
print("skip the extra words in the long sentence")
word_seq = word_seq[:self.max_sentence_len_train]
word_seq_list.append(word_seq)
elem_wordvecs = []
for ix in range(len(word_seq)):
w = word_seq[ix]
w = w.lower()
if w in self.word_to_ix_map:
count += 1
elem_wordvecs.append(self.word_to_ix_map[w])
elif "UNK" in self.word_to_ix_map :
unk_words.append(w)
elem_wordvecs.append(self.word_to_ix_map["UNK"])
else:
unk_words.append(w)
w = "UNK"
self.word_to_ix_map[w] = self.wordvecs.shape[0] - 1
elem_wordvecs.append(self.word_to_ix_map[w])
# Pad the sequences for missing entries to make them all the same length
nil_X = self.zero_vec_pos
num_tokens_list.append(len(elem_wordvecs))
pad_length = self.max_sentence_len_train - len(elem_wordvecs)
all_X_data.append( ((pad_length)*[nil_X]) + elem_wordvecs)
all_X_data = np.array(all_X_data)
print("UNK WORD COUNT = " + str(len(unk_words)))
print("Found WORDS COUNT = " + str(count))
print("TOTAL WORDS = " + str(count+len(unk_words)))
print("Done")
return (all_X_data, word_seq_list, num_tokens_list)
| 2.65625 | 3 |
elasticlog/customlogger.py | gustavohenrique/elasticlog | 1 | 12797264 | # coding: utf-8
import sys
import logging
import settings
logFormatter = logging.Formatter('%(asctime)s [%(levelname)-5.5s] %(message)s')
logger = logging.getLogger()
fileHandler = logging.FileHandler('{0}'.format(settings.LOG_FILE_PATH))
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler) | 2.328125 | 2 |
phr/ciudadano/migrations/0006_auto_20170131_1114.py | richardqa/django-ex | 0 | 12797265 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-01-31 11:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ciudadano', '0005_auto_20170127_1841'),
]
operations = [
migrations.RemoveField(
model_name='ciudadano',
name='uuid',
),
migrations.AlterField(
model_name='ciudadano',
name='numero_documento',
field=models.CharField(blank=True, max_length=11, null=True, unique=True,
verbose_name='Número de documento'),
),
]
| 1.484375 | 1 |
src/train/kuka_reach/with_image/train_with_rllib.py | borninfreedom/deep-rl-with-robots | 0 | 12797266 | <filename>src/train/kuka_reach/with_image/train_with_rllib.py
import time
import ray
import ray.rllib.agents.ppo as ppo
from ray.tune.logger import pretty_print
from env import CustomSkipFrame, KukaCamReachEnv
from ray import tune
from ray.tune import grid_search
from ray.rllib.env.env_context import EnvContext
from ray.tune.registry import register_env, register_trainable
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.agents.impala import ImpalaTrainer
if __name__=='__main__':
ray.shutdown()
ray.init(ignore_reinit_error=True)
# env_config={
# "is_render":False,
# "is_good_view":False,
# "max_steps_one_episode":1000,
# }
# env=KukaCamReachEnv(env_config)
# env=CustomSkipFrame(env)
register_env("kuka_env",lambda config: CustomSkipFrame(KukaCamReachEnv(config)))
#register_env("kuka_env",lambda config: KukaCamReachEnv(config))
config = {
"env": "kuka_env",
"model":{
"conv_filters":[[32,[3,3],2],[32,[3,3],2],[32,[3,3],2],[32,[3,3],2],[1152,[6,6],1]],
# "conv_filters":"relu",
"post_fcnet_hiddens":[512,251],
"post_fcnet_activation":"relu",
},
"env_config":{
"is_render":False,
"is_good_view":False,
"max_steps_one_episode":1000,
},
"num_workers":10,
"num_gpus":1,
"framework":"torch",
# "render_env":False,
# "num_gpus_per_worker":0,
# "num_envs_per_worker":5,
# "rollout_fragment_length":1000,
# "train_batch_size":4000,
# "batch_mode":"complete_episodes",
#"lr":0.0001,
# "lr":grid_search([5e-5,0.0001])
}
config_for_trainer = {
"env": "kuka_env",
# "model":{
# "conv_filters":[[32,[3,3],2],[32,[3,3],2],[32,[3,3],2],[32,[3,3],2],[1152,[6,6],1]],
# # "conv_filters":"relu",
# "post_fcnet_hiddens":[512,251],
# "post_fcnet_activation":"relu",
# },
"env_config":{
"is_render":False,
"is_good_view":False,
"max_steps_one_episode":1000,
},
"num_workers":1,
"num_gpus":1,
"framework":"torch",
}
stop = {
"episode_reward_mean": 0.99,
"training_iteration":200,
}
# trainer=PPOTrainer(config=config_for_trainer)
# print(trainer.get_policy().model)
#
# trainer=ImpalaTrainer(config=config_for_trainer)
# print(trainer.get_policy().model)
results = tune.run(
"SAC", # Specify the algorithm to train
config=config,
stop=stop,
checkpoint_freq=1,
)
metric="episode_reward_mean"
best_trial = results.get_best_trial(metric=metric, mode="max", scope="all")
best_checkpoint=results.get_best_checkpoint(best_trial,metric=metric,mode="max")
print('best checkpoint: ',best_checkpoint)
ray.shutdown() | 1.992188 | 2 |
tests/test_graphs.py | jodahoney/pyinterview | 2 | 12797267 | <reponame>jodahoney/pyinterview
import pytest
from collections import deque
from pyinterview.graphs import (
undirected_adj_list,
directed_adj_list,
inbound_degrees,
find_sources,
)
@pytest.mark.parametrize(
("edges", "expected"),
[
([("A", "B"), ("A", "C")], {"A": ["B", "C"], "B": ["A"], "C": ["A"]}),
([("A", "B"), ("B", "C")], {"A": ["B"], "B": ["A", "C"], "C": ["B"]}),
],
)
def test_undirected_adj_list(edges, expected):
assert undirected_adj_list(edges) == expected
@pytest.mark.parametrize(
("edges", "expected"),
[
([("A", "B"), ("A", "C")], {"A": ["B", "C"], "B": [], "C": []}),
([("A", "B"), ("B", "C")], {"A": ["B"], "B": ["C"], "C": []}),
],
)
def test_directed_adj_list(edges, expected):
assert directed_adj_list(edges) == expected
@pytest.mark.parametrize(
("adj_list", "expected"),
[
({"A": ["B", "C"], "B": [], "C": []}, {"A": 0, "B": 1, "C": 1}),
({"A": ["B"], "B": ["C"], "C": []}, {"A": 0, "B": 1, "C": 1}),
({"A": ["B"], "B": ["A"]}, {"A": 1, "B": 1}),
],
)
def test_inbound_degrees(adj_list, expected):
assert inbound_degrees(adj_list) == expected
@pytest.mark.parametrize(
("inbounnd_degrees", "expected"),
[
({"A": 0, "B": 1, "C": 1}, deque(["A"])),
({"A": 0, "B": 1, "C": 1}, deque(["A"])),
({"A": 1, "B": 1}, deque([])),
],
)
def test_find_sources(inbounnd_degrees, expected):
assert find_sources(inbounnd_degrees) == expected
| 2.515625 | 3 |
web/premises/views.py | beratdogan/arguman.org | 0 | 12797268 | # -*- coding:utf-8 -*-
import json
from datetime import timedelta
from markdown2 import markdown
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.db.models import Max
from django.utils.timezone import now
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.views.generic import DetailView, TemplateView, CreateView, View
from django.views.generic.edit import UpdateView
from django.db.models import Count
from blog.models import Post
from premises.utils import int_or_zero
from premises.models import Contention, Premise
from premises.forms import (ArgumentCreationForm, PremiseCreationForm,
PremiseEditForm, ReportForm)
from premises.signals import (added_premise_for_premise,
added_premise_for_contention, reported_as_fallacy,
supported_a_premise)
from premises.templatetags.premise_tags import check_content_deletion
from newsfeed.models import Entry
class ContentionDetailView(DetailView):
template_name = "premises/contention_detail.html"
model = Contention
def get_context_data(self, **kwargs):
contention = self.get_object()
view = ("list-view" if self.request.GET.get("view") == "list"
else "tree-view")
edit_mode = (
self.request.user.is_superuser or
self.request.user.is_staff or
contention.user == self.request.user)
return super(ContentionDetailView, self).get_context_data(
view=view,
path=contention.get_absolute_url(),
edit_mode=edit_mode,
**kwargs)
class ContentionJsonView(DetailView):
model = Contention
def render_to_response(self, context, **response_kwargs):
contention = self.get_object(self.get_queryset())
return HttpResponse(json.dumps({
"nodes": self.build_tree(contention, self.request.user),
}), content_type="application/json")
def build_tree(self, contention, user):
return {
"name": contention.title,
"parent": None,
"pk": contention.pk,
"owner": contention.owner,
"sources": contention.sources,
"is_singular": self.is_singular(contention),
"children": self.get_premises(contention, user)
}
def get_premises(self, contention, user, parent=None):
children = [{
"pk": premise.pk,
"name": premise.text,
"parent": parent.text if parent else None,
"reportable_by_authenticated_user": self.user_can_report(premise, user),
"report_count": premise.reports.count(),
"user": {
"id": premise.user.id,
"username": premise.user.username,
"absolute_url": reverse("auth_profile",
args=[premise.user.username])
},
"sources": premise.sources,
"premise_type": premise.premise_class(),
"children": (self.get_premises(contention, user, parent=premise)
if premise.published_children().exists() else [])
} for premise in contention.published_premises(parent)]
return children
def user_can_report(self, premise, user):
if user.is_authenticated() and user != premise.user:
return not premise.reported_by(user)
return False
def is_singular(self, contention):
result = (contention
.premises
.all()
.aggregate(max_sibling=Max('sibling_count')))
return result['max_sibling'] <= 1
class HomeView(TemplateView):
template_name = "index.html"
tab_class = "featured"
paginate_by = 20
def get_context_data(self, **kwargs):
contentions = self.get_contentions()
if self.request.user.is_authenticated():
notifications_qs = self.get_unread_notifications()
notifications = list(notifications_qs)
self.mark_as_read(notifications_qs)
else:
notifications = None
return super(HomeView, self).get_context_data(
next_page_url=self.get_next_page_url(),
tab_class=self.tab_class,
notifications=notifications,
has_next_page=self.has_next_page(),
announcements=self.get_announcements(),
contentions=contentions, **kwargs)
def get_announcements(self):
return Post.objects.filter(is_announcement=True)
def get_offset(self):
return int_or_zero(self.request.GET.get("offset"))
def get_limit(self):
return self.get_offset() + self.paginate_by
def has_next_page(self):
total = self.get_contentions(paginate=False).count()
return total > (self.get_offset() + self.paginate_by)
def get_next_page_url(self):
offset = self.get_offset() + self.paginate_by
return '?offset=%(offset)s' % {
"offset": offset
}
def get_unread_notifications(self):
return (self.request.user
.notifications
.filter(is_read=False)
[:5])
def mark_as_read(self, notifications):
pks = notifications.values_list("id", flat=True)
(self.request.user
.notifications
.filter(id__in=pks)
.update(is_read=True))
def get_contentions(self, paginate=True):
contentions = (Contention
.objects
.featured())
if paginate:
contentions = (contentions[self.get_offset(): self.get_limit()])
return contentions
class NotificationsView(HomeView):
template_name = "notifications.html"
def get_context_data(self, **kwargs):
notifications_qs = self.request.user.notifications.all()[:40]
notifications = list(notifications_qs)
self.mark_as_read(notifications_qs)
return super(HomeView, self).get_context_data(
notifications=notifications,
**kwargs)
class SearchView(HomeView):
tab_class = 'search'
def get_context_data(self, **kwargs):
return super(SearchView, self).get_context_data(
keywords=self.get_keywords(),
**kwargs
)
def get_keywords(self):
return self.request.GET.get('keywords') or ""
def get_next_page_url(self):
offset = self.get_offset() + self.paginate_by
return '?offset=%(offset)s&keywords=%(keywords)s' % {
"offset": offset,
"keywords": self.get_keywords()
}
def get_contentions(self, paginate=True):
keywords = self.request.GET.get('keywords')
if not keywords or len(keywords) < 2:
result = Contention.objects.none()
else:
result = (Contention
.objects
.filter(title__icontains=keywords))
if paginate:
result = result[self.get_offset():self.get_limit()]
return result
class NewsView(HomeView):
tab_class = "news"
def get_contentions(self, paginate=True):
contentions = Contention.objects.filter(
is_published=True)
if paginate:
contentions = contentions[self.get_offset():self.get_limit()]
return contentions
class UpdatedArgumentsView(HomeView):
tab_class = "updated"
def get_contentions(self, paginate=True):
contentions = (Contention
.objects
.filter(is_published=True)
.order_by('-date_modification'))
if paginate:
contentions = contentions[self.get_offset():self.get_limit()]
return contentions
class ControversialArgumentsView(HomeView):
tab_class = "controversial"
def get_contentions(self, paginate=True):
last_week = now() - timedelta(days=3)
contentions = (Contention
.objects
.annotate(num_children=Count('premises'))
.order_by('-num_children')
.filter(date_modification__gte=last_week))
if paginate:
return contentions[self.get_offset():self.get_limit()]
return contentions
class AboutView(TemplateView):
template_name = "about.html"
def get_context_data(self, **kwargs):
content = markdown(render_to_string("about.md"))
return super(AboutView, self).get_context_data(
content=content, **kwargs)
class TosView(TemplateView):
template_name = "tos.html"
def get_context_data(self, **kwargs):
content = markdown(render_to_string("tos.md"))
return super(TosView, self).get_context_data(
content=content, **kwargs)
class ArgumentCreationView(CreateView):
template_name = "premises/new_contention.html"
form_class = ArgumentCreationForm
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.ip_address = self.request.META['REMOTE_ADDR']
response = super(ArgumentCreationView, self).form_valid(form)
form.instance.update_sibling_counts()
return response
class ArgumentUpdateView(UpdateView):
template_name = "premises/edit_contention.html"
form_class = ArgumentCreationForm
def get_queryset(self):
contentions = Contention.objects.all()
if self.request.user.is_superuser:
return contentions
return contentions.filter(user=self.request.user)
def form_valid(self, form):
form.instance.user = self.request.user
response = super(ArgumentUpdateView, self).form_valid(form)
form.instance.update_sibling_counts()
return response
class ArgumentPublishView(DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
if contention.premises.exists():
contention.is_published = True
contention.save()
messages.info(request, u"Argüman yayına alındı.")
else:
messages.info(request, u"Argümanı yayına almadan önce en az 1 "
u"önerme ekleyin.")
return redirect(contention)
class ArgumentUnpublishView(DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
contention.is_published = False
contention.save()
messages.info(request, u"Argüman yayından kaldırıldı.")
return redirect(contention)
class ArgumentDeleteView(DetailView):
def get_queryset(self):
return Contention.objects.filter(user=self.request.user)
def post(self, request, slug):
contention = self.get_object()
if check_content_deletion(contention):
# remove notification
Entry.objects.delete(contention.get_newsfeed_type(), contention.id)
contention.delete()
messages.info(request, u"Argümanınız silindi.")
return redirect("home")
else:
messages.info(request, u"Argümanınız silinecek durumda değil.")
return redirect(contention)
delete = post
class PremiseEditView(UpdateView):
template_name = "premises/edit_premise.html"
form_class = PremiseEditForm
def get_queryset(self):
premises = Premise.objects.all()
if self.request.user.is_superuser:
return premises
return premises.filter(user=self.request.user)
def form_valid(self, form):
response = super(PremiseEditView, self).form_valid(form)
form.instance.argument.update_sibling_counts()
return response
def get_context_data(self, **kwargs):
return super(PremiseEditView, self).get_context_data(
#contention=self.get_contention(),
**kwargs)
class PremiseCreationView(CreateView):
template_name = "premises/new_premise.html"
form_class = PremiseCreationForm
def get_context_data(self, **kwargs):
return super(PremiseCreationView, self).get_context_data(
contention=self.get_contention(),
parent=self.get_parent(),
**kwargs)
def form_valid(self, form):
contention = self.get_contention()
form.instance.user = self.request.user
form.instance.argument = contention
form.instance.parent = self.get_parent()
form.instance.is_approved = True
form.instance.ip_address = self.request.META['REMOTE_ADDR']
form.save()
contention.update_sibling_counts()
if form.instance.parent:
added_premise_for_premise.send(sender=self,
premise=form.instance)
else:
added_premise_for_contention.send(sender=self,
premise=form.instance)
contention.date_modification = timezone.now()
contention.save()
return redirect(contention)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
def get_parent(self):
parent_pk = self.kwargs.get("pk")
if parent_pk:
return get_object_or_404(Premise, pk=parent_pk)
class PremiseSupportView(View):
def get_premise(self):
premises = Premise.objects.exclude(user=self.request.user)
return get_object_or_404(premises, pk=self.kwargs['pk'])
def post(self, request, *args, **kwargs):
premise = self.get_premise()
premise.supporters.add(self.request.user)
supported_a_premise.send(sender=self, premise=premise,
user=self.request.user)
return redirect(self.get_contention())
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
class PremiseUnsupportView(PremiseSupportView):
def delete(self, request, *args, **kwargs):
premise = self.get_premise()
premise.supporters.remove(self.request.user)
return redirect(self.get_contention())
post = delete
class PremiseDeleteView(View):
def get_premise(self):
if self.request.user.is_staff:
premises = Premise.objects.all()
else:
premises = Premise.objects.filter(user=self.request.user)
return get_object_or_404(premises,
pk=self.kwargs['pk'])
def delete(self, request, *args, **kwargs):
premise = self.get_premise()
premise.delete()
premise.update_sibling_counts()
contention = self.get_contention()
if not contention.premises.exists():
contention.is_published = False
contention.save()
return redirect(contention)
post = delete
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
class ReportView(CreateView):
form_class = ReportForm
template_name = "premises/report.html"
def get_context_data(self, **kwargs):
return super(ReportView, self).get_context_data(
premise=self.get_premise(),
**kwargs)
def get_contention(self):
return get_object_or_404(Contention, slug=self.kwargs['slug'])
def get_premise(self):
return get_object_or_404(Premise, pk=self.kwargs['pk'])
def get_initial(self):
return {
'contention': self.get_contention(),
'premise': self.get_premise(),
'reporter': self.request.user
}
def form_valid(self, form):
contention = self.get_contention()
premise = self.get_premise()
form.instance.contention = contention
form.instance.premise = premise
form.instance.reporter = self.request.user
form.save()
reported_as_fallacy.send(sender=self, report=form.instance)
return redirect(contention)
| 1.929688 | 2 |
tensorbank/tf/points.py | pshved/tensorbank | 1 | 12797269 | <reponame>pshved/tensorbank
"""Point operations
===================
Batch operations on points in D-dimensional Eucledian space R^D.
"""
import tensorflow as tf
def pairwise_l2_distance(a, b, sqrt=True):
"""Compute pairwise L2 distance between all points in A and B.
L2 norm is ``sqrt(sum(|x_i - y_i| ^ 2))``
Args:
a (Tensor [N x K x D]): point coordinates. N is batch size, K is the
number of points in a batch, D is the dimension of the Euclidian
space.
b (Tensor [N x M x D]): point coordinates, N is batch size, M is the
number of points in a batch, D is the dimension of the euclidian
space.
sqrt (bool, optional): whether take the square root. Defaults to True.
Returns:
Tensor [N x K x M]: pairwise L2 distance between each pair of points.
"""
a = tf.convert_to_tensor(a)
b = tf.convert_to_tensor(b)
# (a_i - b_j)^2 = a_i^2 + b_j^2 - 2 * a_i * b_j
a_ext = tf.expand_dims(a, 2) # N x K x 1 x D
b_ext = tf.expand_dims(b, 1) # N x 1 x M x D
a2 = tf.reduce_sum(a_ext * a_ext, axis=3) # N x K x 1
b2 = tf.reduce_sum(b_ext * b_ext, axis=3) # N x 1 x M
ab = tf.matmul(a, tf.transpose(b, (0, 2, 1))) # N x K x M
L2_square = a2 + b2 - 2 * ab # N x K x M
if sqrt:
return L2_square ** 0.5
else:
return L2_square
def pairwise_l1_distance(a, b):
"""Compute pairwise L1 distance between all points in A and B.
L1 norm is ``sum(|x_i - y_i|)``
Args:
a (Tensor [N x K x D]): point coordinates. N is batch size, K is the
number of points in a batch, D is the dimension of the Euclidian
space.
b (Tensor [N x M x D]): point coordinates, N is batch size, M is the
number of points in a batch, D is the dimension of the euclidian
space.
Returns:
Tensor [N x K x M]: pairwise L1 distance between each pair of points.
"""
a = tf.convert_to_tensor(a)
b = tf.convert_to_tensor(b)
a_ext = tf.expand_dims(a, 2) # N x K x 1 x D
b_ext = tf.expand_dims(b, 1) # N x 1 x M x D
return tf.reduce_sum(tf.abs(a_ext - b_ext), axis=3) # N x K x M
def pairwise_l_inf_distance(a, b):
"""Compute pairwise L∞ distance between all points in A and B.
L-infinity is ``max(|x_i - y_i|)``
Args:
a (Tensor [N x K x D]): point coordinates. N is batch size, K is the
number of points in a batch, D is the dimension of the Euclidian
space.
b (Tensor [N x M x D]): point coordinates, N is batch size, M is the
number of points in a batch, D is the dimension of the euclidian
space.
Returns:
Tensor [N x K x M]: pairwise L-infinity distance between each pair of
points.
"""
a = tf.convert_to_tensor(a)
b = tf.convert_to_tensor(b)
a_ext = tf.expand_dims(a, 2) # N x K x 1 x D
b_ext = tf.expand_dims(b, 1) # N x 1 x M x D
return tf.reduce_max(tf.abs(a_ext - b_ext), axis=3) # N x K x M
| 3.015625 | 3 |
odd_collector/adapters/kafka/mappers/parser/types.py | opendatadiscovery/odd-collector | 0 | 12797270 | from typing import Dict, Any
RawSchema = Dict[str, Any]
Field = Dict[str, Any]
| 2.03125 | 2 |
backend_sqlalchemy/backend_app/common/contents.py | jiz148/medical_app | 0 | 12797271 | FORGET_MY_PASSWORD_SUBJECT = """Password Change Confirmation"""
FORGET_MY_PASSWORD_CONTENT = """hello {user},\n
Please click the following link to change your password:\n
http://jinchispace.com:5001/newpass?token={token}\n
This link will be expired in {minutes} minutes."""
| 1.601563 | 2 |
blaze/command/replay.py | henry1jin/alohamora | 5 | 12797272 | """ Implements the commands for viewing and manipulating the training manifest """
import json
import time
import os
from blaze.action import Policy
from blaze.logger import logger as log
from blaze.mahimahi.server import start_server
from . import command
@command.argument("replay_dir", help="The directory containing the save files captured by mahimahi")
@command.argument("--policy", help="The file path to a JSON-formatted push policy to serve")
@command.argument("--cert_path", help="Location of the server certificate")
@command.argument("--key_path", help="Location of the server key")
@command.argument(
"--cache_time", help="Do not cache objects which expire in less than this time (in seconds)", type=int, default=None
)
@command.argument(
"--extract_critical_requests",
help="true or false to specify if server should inject critical request extractor",
action="store_true",
)
@command.command
def replay(args):
"""
Starts a replay environment for the given replay directory, including setting up interfaces, running
a DNS server, and configuring and running an nginx server to serve the requests
"""
policy = None
cert_path = os.path.abspath(args.cert_path) if args.cert_path else None
key_path = os.path.abspath(args.key_path) if args.key_path else None
if args.policy:
log.debug("reading policy", push_policy=args.policy)
with open(args.policy, "r") as policy_file:
policy_dict = json.load(policy_file)
policy = Policy.from_dict(policy_dict)
with start_server(
args.replay_dir,
cert_path,
key_path,
policy,
cache_time=args.cache_time,
extract_critical_requests=args.extract_critical_requests,
):
while True:
time.sleep(86400)
| 2.421875 | 2 |
examples/example_publisher_subscriber/consumer.py | vladcalin/pymicroservice | 2 | 12797273 | <gh_stars>1-10
import gemstone
from gemstone.event.transport import RabbitMqEventTransport
class ConsumerService(gemstone.MicroService):
name = "consumer"
port = 8000
event_transports = [
RabbitMqEventTransport("192.168.1.71", 5672, username="admin", password="<PASSWORD>")
]
@gemstone.event_handler("test")
def broadcast_msg(self, message):
print(message)
if __name__ == '__main__':
ConsumerService().start()
| 2.40625 | 2 |
amocrm_asterisk_ng/crm/amocrm/kernel/calls/call_records/file_converters/impl/PydubFileConverter.py | iqtek/amocrn_asterisk_ng | 0 | 12797274 | import os
import aiofiles
from pydub import AudioSegment
from amocrm_asterisk_ng.domain import File
from amocrm_asterisk_ng.domain import Filetype
from ..core import IFileConverter
from ...CallRecordsConfig import CallRecordsConfig
__all__ = [
"PydubFileConverter",
]
class PydubFileConverter(IFileConverter):
__slots__ = (
"__config",
)
def __init__(
self,
config: CallRecordsConfig,
) -> None:
self.__config = config
async def __get_content_from_file(
self,
path: str
) -> bytes:
async with aiofiles.open(path, mode='rb') as f:
content = await f.read()
return content
async def convert(self, file: File, new_filetype: Filetype) -> File:
if file.type == new_filetype:
return file
if not os.path.exists(self.__config.tmp_directory):
try:
os.makedirs(self.__config.tmp_directory)
except OSError as exc:
raise Exception(
f"FileConverter: conversion directory error: `{exc!r}`."
)
filepath = os.path.join(self.__config.tmp_directory, file.name)
async with aiofiles.open(filepath, mode='wb') as f:
await f.write(file.content)
if file.type == Filetype.MP3:
audio = AudioSegment.from_mp3(filepath)
elif file.type == Filetype.WAV:
audio = AudioSegment.from_wav(filepath)
elif file.type == Filetype.WAVE:
audio = AudioSegment.from_WAVE(filepath)
else:
raise Exception(f"Non-convertible type: `{file.type}`.")
new_filepath = os.path.join(
self.__config.tmp_directory,
"converted_" + file.name,
)
if new_filetype == Filetype.MP3:
new_format = "mp3"
elif new_filetype == Filetype.WAV:
new_format = "wav"
elif new_filetype == Filetype.WAVE:
new_format = "wave"
else:
raise Exception(
f"Non-convertible type: `{new_filetype}`."
)
audio.export(
new_filepath,
format=new_format,
bitrate='16k'
)
content = await self.__get_content_from_file(new_filepath)
os.remove(filepath)
os.remove(new_filepath)
return File(
name=file.name,
type=new_filetype,
content=content,
)
| 2.484375 | 2 |
tpapi/client.py | ash30/tpapi | 0 | 12797275 | <reponame>ash30/tpapi<filename>tpapi/client.py
import os
import json
import itertools
import urllib
import requests
import entities
import collections
"""
Future Todo:
- Pass entity objects into edits
- TP client caching
"""
# Utils #
def is_sequence(elem):
"Returns true for iterables other than strings"
if isinstance(
elem,collections.Sequence
) and not isinstance(elem,basestring):
return True
else: return False
def encode_sequence(seq):
return ','.join([str(x) for x in seq])
# Response formats #
class TPJsonResponseFormat(object):
def parse(self,response_object):
return response_object.json()
def __str__(self):
return "json"
class TPEntityResponseFormat(TPJsonResponseFormat):
def parse(self,response_object):
d = super(TPEntityResponseFormat,self).parse(
response_object
)
return (d.get('Items',(d,)),d.get('Next'))
# HTTP layer #
class HTTPRequestDispatcher():
"""
A simple component wrapper over request.py functionality
takes care of sending the http requests and can be easily
mocked out for overall testing of the library
"""
def __init__(self,response_format=TPEntityResponseFormat):
self.auth = None
self._default_response_format = response_format()
self._requests = requests # for mocking
def encode_params(self,params):
""" Override default requests.py param data
serialisation to suit TP
"""
final_params = {
k:encode_sequence(v) if is_sequence(v) else str(v)
for k,v in params.iteritems() if v
}
param_string = "&".join(
["{}={}".format(k,v) for k,v in final_params.iteritems()]
)
return urllib.quote(param_string,safe='=&,')
def append_params(self,url,params):
"Combine params and url into single string"
final_url = "{}{}{}".format(
url,
"?" if "?" not in url else "&",
self.encode_params(params),
)
return final_url
def make_request(self,method,url,params,response_format,**kwargs):
params['format'] = response_format
final_url = self.append_params(url,params)
print final_url
r = self._requests.request(method,final_url,auth=self.auth,**kwargs)
try:
r.raise_for_status()
except:
print "ERROR",final_url
print r.content
raise
return response_format.parse(r)
def single_get_request(self,url,params,response_format=None):
if not response_format:
response_format = self._default_response_format
"Submit a get request to tp api endpoint"
return self.make_request('get',url,params,response_format)
def paginated_get_request(self,url,params):
""" Generator over a series of requests inorder
to capture paginated resources
"""
response,next_url = self.single_get_request(url,params)
assert isinstance(
response, collections.Sequence
), "Error: Paginated Requests assume iterable response"
yield response
while next_url:
response,next_url = self.single_get_request(next_url,params={})
yield response
def post_request(self,url,params,message_body,response_format=None):
if not response_format:
response_format = self._default_response_format
encoded_message = json.dumps(message_body)
headers = {
"content-type":"application/"+str(response_format),
"content-length":len(encoded_message)
}
return self.make_request(
'post',url,params,response_format=response_format,
headers=headers,data=encoded_message
)
# Clients #
class BasicClient(object):
"""
Submits reqests to TP and returns data
The two main use cases for this class:
api endpoints created from user queries
api endpoints required for entity data and construction
a deprecated third case used to be absolute url
endpoints for pagination but this functionality
has been moved to the requester level
"""
def __init__(self,url,requester):
self.requester = requester
self.tp_api_url = url
def authenticate(self,auth):
"Replace requester delegate with authenicated one"
self.requester.auth = auth
def raw_request(self,url,params={},response_format=TPJsonResponseFormat):
"Mainly used to return raw response"
final_url = '/'.join([self.tp_api_url,url])
return self.requester.single_get_request(
final_url,params,response_format())
# SHOULD WE LEAVE PARAMS AS {}?
def get_entities(self,entity_endpoint,params={},return_limit=50):
"""
@params entity_endpoint:
can any of the following <entity type> ,
<entity type>/<id>, <entity type>/<id>/<collection>
"""
assert isinstance(return_limit,int) and return_limit > 0,\
"return limit should be non negative integer"
final_url = '/'.join([self.tp_api_url,entity_endpoint])
return itertools.islice(
itertools.chain.from_iterable(
self.requester.paginated_get_request(final_url,params)
),
0, return_limit
)
def create_entity(self,entity_endpoint,data,params={}):
"Create Entity given a dict of attributes"
# The base client creation does no error checking on uploaded data
final_url = '/'.join([self.tp_api_url,entity_endpoint])
new_entity= self.requester.post_request(
final_url,params,data,
response_format = TPJsonResponseFormat(),
)
return new_entity
class ObjectMappingClient(BasicClient):
""" Extends the basic client to auto instanciate
entitiy classes from data
"""
def __init__(
self,url,requester,
entity_class_factory=entities.EntityClassFactory
):
super(ObjectMappingClient,self).__init__(url,requester)
self.entity_class_factory = entity_class_factory(self)
def get_entities(self,entity_endpoint,params={},return_limit=50):
"Extend method to return list of entity instances"
entity_data = super(TPEntityClient,self).get_entities(
entity_endpoint,params,return_limit
)
if not entity_data: return [] # guard
# THIS DOESN'T WORK AS I SLICE WILL BE TRUE
resource_type_hint = entity_endpoint.split('/')[0]
entity_class = self.entity_class_factory.get(resource_type_hint,immutable=True)
return itertools.imap(
lambda n:entity_class(n), entity_data
)
def create_entity(self,entity_endpoint,data,params={}):
"Create Entity given a dict of attributes"
# Create a local mutable entity to check data
entity_class = self.entity_class_factory.get(
entity_endpoint,
immutable = True,
)
mutable_entity_class =self.entity_class_factory.get(
entity_endpoint,
immutable = False,
)
proposed_entity = mutable_entity_class.create_from_data(data)
msg_content = proposed_entity.toDict()
msg_content.pop('Id',None) # No ID for creation!
# Send request and return resultant entity
dct = super(ObjectMappingClient,self).create_entity(
entity_endpoint,msg_content,params
)
return entity_class(dct)
# Aliases for backwards compatability
TPEntityClient = ObjectMappingClient
| 2.5 | 2 |
ex-mundo2/ex065.py | PedroPegado/ex-cursoemvideo | 0 | 12797276 | parar = False
soma = 0
cont = 0
while parar == False:
num = int(input('''\033[1;36mDigite um numero inteiro:\033[m '''))
soma += num
cont += 1
print('''Deseja continuar?
[ 1 ] SIM
[ 2 ] NÃO''')
opcao = int(input('Escolha sua opção: '))
if opcao == 2:
parar = True
elif opcao == 1:
parar = False
else:
print('OPÇÃO INVALIDA.')
media = soma/cont
print(f'A média é {media}')
| 3.734375 | 4 |
xavier/builder/model.py | fabriciotorquato/pyxavier | 5 | 12797277 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import numpy as np
import torch
import torch.optim as optim
from sklearn.model_selection import train_test_split
from torch import nn
import xavier.constants.config as config
from xavier.constants.type import Type
from xavier.core.dataLoader import DataLoader
from xavier.core.dataset import Dataset
from xavier.core.training import Training
from xavier.core.transformation import get_standard, init_standard
from xavier.net.cnn import Cnn
from xavier.net.mlp import Mlp
from xavier.net.rnn_lstm_2 import Rnn
torch.manual_seed(1234)
class Model(object):
def __init__(self, filenames, filenames_models, device, learning_rate, num_epoch, batch_size, input_layer=0, hidden_layer=0, output_layer=3, matriz_size=0, type=Type.mlp):
self.input_layer = input_layer
self.hidden_layer = hidden_layer
self.output_layer = output_layer
self.matriz_size = matriz_size
self.device = device
self.filenames = filenames
self.matriz_size = matriz_size
self.type = type
self.batch_size = batch_size
self.learning_rate = learning_rate
self.filenames_models = filenames_models
self.num_epoch = num_epoch
self.file_accucary = np.zeros(len(self.filenames))
self.version = config.VERSION
def __train_model(self, train_loader, valid_loader, test_loader, train_size, valid_size, show, filename):
self.model.first_time = time.time()
for epoch in range(1, self.num_epoch+1):
print "Epoch " + str(epoch) + "/" + str(self.num_epoch)
self.model.train(epoch, train_loader, train_size)
self.model.validation(valid_loader, valid_size)
print('Data train: ')
self.model.validation(train_loader, train_size, train=True)
print('Data Test: ')
return self.model.test(test_loader, show, filename)
def __build_model(self):
# build Model
if self.type == Type.mlp:
model = Mlp(self.device).to(self.device)
elif self.type == Type.rnn:
model = Rnn(self.device).to(self.device)
print(model)
elif self.type == Type.cnn:
model = Cnn(self.device).to(self.device)
# choose optimizer and loss function
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=self.learning_rate)
model = model.to(self.device)
model_training = Training(
model, criterion, optimizer, self.device)
return model_training
def get_dataset(self, file):
dataset = np.loadtxt(file + 'dataset.csv',
delimiter=',', dtype=np.float64)
dataset_x = np.asarray([l[1:] for l in dataset])
dataset_y = np.asarray([l[0] for l in dataset])
return dataset_x, dataset_y
def get_normalization(self, X_train, X_test):
if self.type == Type.mlp:
X_train, standard = init_standard(X_train)
X_test = get_standard(X_test, standard)
return X_train, X_test, standard
elif self.type == Type.rnn:
X_train, standard = init_standard(X_train)
X_test = get_standard(X_test, standard)
return X_train, X_test, standard
elif self.type == Type.cnn:
X_train, standard = init_standard(X_train)
X_test = get_standard(X_test, standard)
return X_train, X_test, standard
def get_loader(self, type, batch_size, X_train, y_train, X_test, y_test, matriz_size):
if type == Type.mlp:
train_data = Dataset(X_train, y_train, Type.mlp)
test_data = Dataset(X_test, y_test, Type.mlp)
elif type == Type.rnn:
train_data = Dataset(X_train, y_train, Type.rnn, matriz_size)
test_data = Dataset(X_test, y_test, Type.rnn, matriz_size)
elif type == Type.cnn:
train_data = Dataset(X_train, y_train, Type.cnn, matriz_size)
test_data = Dataset(X_test, y_test, Type.cnn, matriz_size)
dataLoader = DataLoader()
train_loader, valid_loader, train_size, valid_size = dataLoader.get_train(
train_data, batch_size)
test_loader = dataLoader.get_test(test_data, batch_size)
return train_loader, valid_loader, test_loader, train_size, valid_size
def save_model(self, type, path_model, model, acc, standard):
# Save the Trained Model
path = path_model+type.value
if not os.path.exists(path):
os.mkdir(path)
filename = path + '/'+self.version+' {:.2f}'.format(acc)+'.pkl'
torch.save({
'model': model.model.state_dict(),
'standard': standard
}, filename)
def create_model(self, times, show):
self.file_accucary = np.zeros(len(self.filenames))
for _ in range(times):
for idx, file in enumerate(self.filenames):
print "\nTraining dataset: " + str(file) + "\n"
dataset_x, dataset_y = self.get_dataset(file)
X_train, X_test, y_train, y_test = train_test_split(
dataset_x, dataset_y, test_size=0.2, random_state=21)
X_train, X_test, standard = self.get_normalization(
X_train, X_test)
train_loader, valid_loader, test_loader, train_size, valid_size = self.get_loader(
self.type, self.batch_size, X_train, y_train, X_test, y_test, self.matriz_size)
self.input_layer = X_train.shape[1]
self.model = self.__build_model()
path = self.filenames_models[idx] + \
self.type.value + '/'+self.version
accuracy = self.__train_model(train_loader, valid_loader,
test_loader, train_size, valid_size, show, filename=path)
if accuracy > self.file_accucary[idx]:
self.save_model(self.type,
self.filenames_models[idx], self.model, accuracy, standard)
self.file_accucary[idx] = accuracy
| 2.640625 | 3 |
test/schema/stream_gain_strategy_test.py | hq9000/py-headless-daw | 22 | 12797278 | import unittest
from typing import List
import numpy as np
from py_headless_daw.processing.stream.stream_gain import StreamGain
from py_headless_daw.schema.dto.time_interval import TimeInterval
from py_headless_daw.schema.events.event import Event
from py_headless_daw.schema.events.parameter_value_event import ParameterValueEvent
class StreamGainStrategyTest(unittest.TestCase):
def test_stream_gain_strategy(self):
strategy = StreamGain(np.float32(0.25))
interval = TimeInterval()
interval.start_in_bars = 0
interval.end_in_bars = 1
in_stream_buffer = np.ones(shape=(100,), dtype=np.float32)
out_stream_buffer = np.zeros(shape=(100,), dtype=np.float32)
setter_event: ParameterValueEvent = ParameterValueEvent(0, StreamGain.PARAMETER_GAIN, 0.55)
input_event_buffer: List[Event] = [setter_event]
output_event_buffer: List[Event] = []
strategy.render(interval, [in_stream_buffer], [out_stream_buffer], [input_event_buffer], [output_event_buffer])
# the first few samples are closer to the initial value
for x in range(0, 3):
self.assertTrue(0.24 < out_stream_buffer[x] < 0.26)
# while the last few are closer to the target one
for x in range(out_stream_buffer.shape[0] - 3, out_stream_buffer.shape[0]):
self.assertTrue(0.24 < out_stream_buffer[x] > 0.45)
strategy.render(interval, [in_stream_buffer], [out_stream_buffer], [[]], [[]])
# now we render without any events in the input, the logic in the
# strategy is slightly different in this case
for x in range(out_stream_buffer.shape[0] - 3, out_stream_buffer.shape[0]):
self.assertTrue(out_stream_buffer[x] > 0.45)
| 2.296875 | 2 |
home/views.py | sa-y-an/Qriosity2.0 | 0 | 12797279 | <reponame>sa-y-an/Qriosity2.0<gh_stars>0
from django.shortcuts import render
from django.contrib.auth.decorators import user_passes_test
# Create your views here.
def not_logged_in(user):
return not user.is_authenticated
def base(request):
return render(request, 'home/base.html')
def home(request):
return render(request, 'home/home.html')
def hello(request):
return render(request, 'home/hello.html')
@user_passes_test(not_logged_in, login_url='/user/dashboard', redirect_field_name=None)
def login(request):
return render(request, 'home/login.html')
def rules(request):
return render(request, 'home/rule.html')
| 1.992188 | 2 |
backend/src/models/Warehouse.py | ahmedsalahacc/Inventory-Management-System | 0 | 12797280 | <filename>backend/src/models/Warehouse.py
from models import BaseModel, gen_id
class WarehouseModel(BaseModel):
'''
ORM for Warehouse table with the following structure
warehouse(
id CHARACTER(10) NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
location TEXT NOT NULL
);
'''
def __init__(self, db_filename: str):
super(WarehouseModel, self).__init__(db_filename)
def insert(self, data_tuple: tuple):
'''
Inserts a new record in warehouse table
Parameters
----------
data_tuple: tuple
tuple of values (name, location)
'''
# aquiring cursor
cursor = self.conn.cursor()
# sql script
sql_script = '''
INSERT INTO warehouse VALUES (?, ?, ?)
'''
# executing script
id = gen_id()
data_tuple = (id, *data_tuple)
cursor.execute(sql_script, data_tuple)
self.conn.commit()
# conceding cursor
cursor.close()
return id
def delete(self, id: str):
'''
Deletes a record from warehouse table
Parameters
----------
id: str
'''
# aquiring cursor
cursor = self.conn.cursor()
# sql script
sql_script = '''
DELETE FROM warehouse WHERE id = ?
'''
# executing script
cursor.execute(sql_script, (id,))
self.conn.commit()
# conceding cursor
cursor.close()
def update(self, id: str, new_data: tuple):
'''
Updates a record of the warehouse table using id
Parameters
----------
id: str
id of the record in the db
data_tuple: tuple
tuple of new values (name, location)
'''
# aquiring cursor
cursor = self.conn.cursor()
# sql script
sql_script = '''
UPDATE warehouse
SET name = ? ,
location = ?
WHERE id = ?
'''
# executing script
new_data = (*new_data, id)
cursor.execute(sql_script, new_data)
self.conn.commit()
# conceding cursor
cursor.close()
def getByID(self, id: str):
'''
gets a record from the warehouse table using id
Parameters
----------
id: str
id of the record in the db
Returns
-------
query: tuple
represents the result
'''
# aquiring cursor
cursor = self.conn.cursor()
# sql script
sql_script = '''
SELECT * FROM warehouse WHERE id = ?
'''
# executing script
cursor.execute(sql_script, (id,))
query = cursor.fetchone()
# conceding cursor
cursor.close()
return query
def getAll(self, order: str = 'ASC'):
'''
gets a record from the warehouse table using id
Parameters
----------
order: str Default = 'asc'
arrangement of the returned query
ASC: ascending order
DESC: descending order
Returns
-------
query: list
results list
'''
# aquiring cursor
cursor = self.conn.cursor()
# sql script
sql_script = f'''
SELECT * FROM warehouse ORDER BY name {order}
'''
# executing script
cursor.execute(sql_script)
query = cursor.fetchall()
# conceding cursor
cursor.close()
return query
| 3.0625 | 3 |
ansibleroler/utils/__init__.py | xoxys/ansible-role | 3 | 12797281 | import six
import os
import yaml
import logging
import logging.config
from appdirs import AppDirs
from pkg_resources import resource_filename
def setup_logging(log_level):
log_config_file = os.path.join(resource_filename('ansibleroler', 'static'), 'config', 'logging.yml')
level = logging.getLevelName(log_level)
with open(log_config_file, 'rt') as f:
log_config = yaml.safe_load(f.read())
logging.config.dictConfig(log_config)
if level:
logging.getLogger("ansibleroler").setLevel(level)
return
def update_log_level(log_level):
level = logging.getLevelName(log_level)
if level:
logging.getLogger("ansibleroler").setLevel(level)
return
def normalize_path(path):
normalized = os.path.abspath(os.path.expanduser(path))
return normalized
def convert_bool(obj):
true_values = (True, 'True', 'true', 'yes', '1')
false_values = (False, 'False', 'false', 'no', '0')
if obj in true_values:
return True
elif obj in false_values:
return False
else:
if not isinstance(obj, six.text_type):
obj = six.text_type(obj, "utf-8")
return obj
class Settings(object):
def __init__(
self,
config_file=os.path.join(AppDirs("ansible-roler").user_config_dir, "config.ini"),
role_name=None,
base_path=os.getcwd(),
log_level='WARNING',
subdir_template=os.path.join(resource_filename('ansibleroler', 'static'), 'templates', 'main.yml.j2'),
root_template=os.path.join(resource_filename('ansibleroler', 'static'), 'templates', '.drone.yml.j2'),
exclude_subdirs=['templates', 'files', 'vars'],
enable_templating=False,
template_vars={}
):
self.config_file = config_file
self.role_name = role_name
self.base_path = base_path
self.log_level = log_level
self.subdir_template = subdir_template
self.root_template = root_template
self.exclude_subdirs = exclude_subdirs
self.enable_templating = enable_templating
self.template_vars = template_vars
| 2.171875 | 2 |
kora/install/rust.py | wannaphong/kora | 91 | 12797282 | import os
os.system('apt install rustc')
os.environ['PATH'] += ':/root/.cargo/bin'
os.environ['USER'] = 'user'
| 1.367188 | 1 |
viusitemapparser/sitemap_file.py | VIU-one/VIU-Sitemap-Parser | 1 | 12797283 | <gh_stars>1-10
class SitemapFile:
sitemap_file_name = None
sitemap_contents = None
sitemap_headers = None
sitemap_source_type = None # remote or local
sitemap_file_error = None
sitemap_received = False
sitemap_lxml = None
sitemap_plain_text = None
sitemap_type = None # one of xml_sitemap, xml_sitemap_index, rss_feed, atom_feed, plain_text,
# invalid_file_format
def __init__(self, filename):
self.sitemap_file_name = filename
def set_remote_file_from_requests(self, result):
self.sitemap_source_type = 'remote'
self.sitemap_received = True
self.sitemap_contents = result.text
self.sitemap_headers = result.headers
def set_local_file(self, file_contents):
self.sitemap_source_type = 'local'
self.sitemap_received = True
self.sitemap_contents = file_contents
def error_receiving_file(self, message):
self.sitemap_file_error = message
def error_receiving_remote_file(self, message):
self.sitemap_source_type = 'remote'
self.sitemap_file_error = message
def error_receiving_local_file(self, message):
self.sitemap_source_type = 'local'
self.sitemap_file_error = message
def get_file_status_as_dict(self):
return {'file_name': self.sitemap_file_name,
'file_headers': self.sitemap_headers,
'source_type': self.sitemap_source_type,
'file_error': self.sitemap_file_error,
'file_received': self.sitemap_received,
'sitemap_type': self.sitemap_type}
def get_content(self):
if self.sitemap_contents:
return self.sitemap_contents.strip()
else:
return None
def has_lxml(self):
if self.sitemap_lxml:
return True
else:
return False
def set_lxml(self, lxml):
self.sitemap_lxml = lxml
def set_plain_text(self, content):
self.sitemap_plain_text = content
def set_sitemap_type(self, sitemap_type):
self.sitemap_type = sitemap_type
| 2.3125 | 2 |
habiticaapi/habitica_object.py | nasfarley88/pyhabitica | 0 | 12797284 | # User provided config file
import config
import requests
import attrdict
import logging
def attrdict_or_list(thing):
if type(thing) == dict:
return attrdict.AttrMap(thing)
elif type(thing) == list:
return thing
else:
assert False, "DON'T PANIC. Something that wasn't a list or dict."
class NotInHabiticaObject(Exception):
pass
class HabiticaObject(object):
"""Abstract class for custom HTTP requests commands for Habitica. """
def __init__(self, uuid, apikey, json=None, endpoint=None):
# json must be created with __dict__ to avoid referencing itself in __setattr__
# self.__dict__["json"] = attrdict.AttrMap()
self.__dict__["_uuid"] = uuid
self.__dict__["_apikey"] = apikey
self.__dict__["_habitica_api"] = config.HABITICA_URL+"/api/v2"
if json:
self.__dict__["json"] = attrdict.AttrMap(json)
elif endpoint:
self.__dict__["json"] = self._get_or_except(endpoint)
else:
self.__dict__["json"] = attrdict.AttrMap()
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
# Use the ordinary, plain, boring, normal setattr so that
# pickle doesn't freak out.
super(HabiticaObject, self).__setattr__("__dict__", d)
def _put_or_except(self, endpoint, json=None):
"""Return json from PUT request or raise an exception."""
if json:
r = requests.put(
self._habitica_api+endpoint,
headers={
'x-api-user':self._uuid,
'x-api-key':self._apikey
},
json=dict(json)
)
else:
r = requests.put(
self._habitica_api+endpoint,
headers={
'x-api-user':self._uuid,
'x-api-key':self._apikey
},
)
try:
r.raise_for_status()
except Exception as e:
print(r)
raise(e)
return attrdict_or_list(r.json())
def _get_or_except(self, endpoint):
"""Return json from GET request or raise an exception."""
r = requests.get(
self._habitica_api+endpoint,
headers={
'x-api-user':self._uuid,
'x-api-key':self._apikey
}
)
r.raise_for_status()
return attrdict_or_list(r.json())
def _post_or_except(self, endpoint, json={}, query={}):
"""Return json from POST request or raise an exception."""
r = requests.post(
self._habitica_api+endpoint,
headers={
'x-api-user':self._uuid,
'x-api-key':self._apikey
},
json=dict(json),
params=query
)
r.raise_for_status()
return attrdict_or_list(r.json())
def _delete_or_except(self, endpoint):
"""Return json from POST request or raise an exception."""
r = requests.delete(
self._habitica_api+endpoint,
headers={
'x-api-user':self._uuid,
'x-api-key':self._apikey
}
)
r.raise_for_status()
return attrdict_or_list(r.json())
def __str__(self):
return "HabiticaObject: \n"+str(self.__dict__)
| 2.765625 | 3 |
udemy/python-video-workbook/my_progress/033.py | djrgit/coursework | 0 | 12797285 | # Exercise 33 - Local Variables
c = 1
def foo():
c = 2
return c
c = 3
print(foo()) | 2.859375 | 3 |
bom_tools/part_finder.py | snhobbs/BOMTools | 0 | 12797286 | """
part_finder.py
Look through two files and search for an internal part number that looks like `match_pattern`
"""
#!/usr/bin/env python3
import sys, regex, click
#first arg is a digikey csv cart
#second is a newline deliminated list of eoi partnumbers
match_pattern = "\w{3}-\w{4}-\w{2}"
@click.argument("--first", "-f", type=str, required=True, help="Design BOM to compare to. Should have the part number somewhere in the line")
@click.argument("--second", "-s", type=str, required=True, help="Main BOM to search. Typically the distributer BOM or a text schematic")
@click.command
def main(first, second):
regx = regex.compile(match_pattern)
with open(first, 'r') as f:
first_parts = [part.strip() for part in f.read().strip().split('\n')]
with open(second, 'r') as f:
st = f.read().strip()
second_parts = regx.findall(st)
nfirst = []
nsecond = []
for part in first_parts:
if part not in nfirst:
nfirst.append(part)
for part in second_parts:
if part not in parts:
nsecond.append(part)
print("Not in first: ", nfirst)
print("Not in second: ", nsecond)
if __name__ == "__main__":
main()
| 3.3125 | 3 |
src/10fastfingers-auto-type.py | gordonnguyen/10fastfingers-auto-type | 0 | 12797287 | <reponame>gordonnguyen/10fastfingers-auto-type
import os,sys,inspect
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
# Init working folder
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
# URLS
SIGNIN_URL = "https://10fastfingers.com/login"
# ELEMENTS SELECTORS
USERNAME_ID = "UserEmail"
PASSWORD_ID = "<PASSWORD>"
LOGIN_BTN_ID = "login-form-submit"
TARGET_WORD_SLTCR = "#row1 > span.highlight"
INPUT_FIELD_ID = "inputfield"
# SPECIAL TYPE INPUT
SPACE_KEY = "\ue00d"
# OTHERS
LINE = '<>'*20
WORDS_PER_LINE = 12
RESULT_VIEWING_TIME = 15
FINAL_VIEWING_TIME = 60*60 # 1 hour
def main():
# Initilize settings
setting = read_ini_settings()
typing_test_url = setting['typing_test_url']
use_account = setting['use_account']
word_per_sec = float(setting['word_per_sec'])
num_test_loop = int(setting['num_test_loop'])
extract_text_to_file = setting['extract_text_to_file']
extracted_text = ''
driver = webdriver.Chrome('bin/chromedriver.exe')
# Sign in
if use_account == True:
sign_in(driver, setting)
else:
print(LINE+"\nPlaying anonymously...")
# Get text and type
for i in range(num_test_loop):
word_count = 0
driver.get(typing_test_url)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, TARGET_WORD_SLTCR)))
print("\nYAY! It's typing... "+LINE)
while True:
target_word = driver.find_element_by_css_selector(TARGET_WORD_SLTCR).text
if target_word != '':
type_word(target_word, driver)
if extract_text_to_file == True:
word_count += 1
extracted_text = accumulate_words(target_word, word_count, extracted_text)
time.sleep(word_per_sec)
else:
time.sleep(RESULT_VIEWING_TIME)
break
# extract words from test to file
if extract_text_to_file:
with open('data/competition_text_file.txt', 'w') as text_f:
text_f.write(extracted_text)
print('\n'+LINE+"\nALL DONE! Check your browser for results!")
# Retain browser for viewing
time.sleep(FINAL_VIEWING_TIME)
driver.close()
def read_ini_settings():
setting_temp = []
setting = {}
num_of_value = 1
seperate_symbol = '='
# Open and read setting.ini
with open('data/setting.ini') as f:
for item in f:
if not (item.startswith('#') or item.startswith('\n')):
setting_temp.append(item.split(seperate_symbol)) # Will result with a 2D list
# Convert setting list to dictionary
for x in range(len(setting_temp)):
for y in range(num_of_value):
key = setting_temp[x][y].strip()
value = setting_temp[x][y+1].strip()
if value.lower() == 'true':
value = True
setting[key] = value
return setting
def sign_in(driver, setting):
email = setting['email']
password = setting['password']
# Fill text fields with account info
driver.get(SIGNIN_URL)
WebDriverWait(driver, 15).until(EC.element_to_be_clickable((By.ID,USERNAME_ID)))
driver.find_element_by_id(USERNAME_ID).send_keys(email)
driver.find_element_by_id(PASSWORD_ID).send_keys(password)
# Try logging in
driver.find_element_by_id(LOGIN_BTN_ID).click()
try:
WebDriverWait(driver, 15).until(EC.url_changes(SIGNIN_URL))
except:
print('Unable to sign in! Playing anonymously...')
else:
print('Sign in successfully!')
def type_word(target_word, driver):
driver.find_element_by_id(INPUT_FIELD_ID).send_keys(target_word+SPACE_KEY)
def accumulate_words(target_word, word_count, extracted_text):
if word_count % WORDS_PER_LINE == 0:
extracted_text += target_word + '\n'
else:
extracted_text += target_word+' '
return extracted_text
main() | 2.6875 | 3 |
xps_convert/read/xmp_parser.py | InternetUnexplorer/XPSConvert | 0 | 12797288 | import re
from typing import Iterator
from xps_convert.read.errors import ParseError
from xps_convert.read.xmp import Xmp
FIELD_RE = re.compile(r"([\w\s]+):\s(.*)")
def parse_xmp(filename: str, lines: Iterator[str]) -> Xmp:
xmp = Xmp()
# First line is always a comment, skip it
next(lines)
# Match each line and enumerate (line numbers are needed for errors)
for n, match in enumerate((FIELD_RE.match(line) for line in lines)):
if match is not None:
xmp.values[match.group(1)] = match.group(2)
else:
raise ParseError("unable to parse line", filename, n)
# Verify that required fields are present
for field in ("MHS File", "Device", "Package", "SpeedGrade"):
if field not in xmp.values:
raise ParseError(f"missing required field ‘{field}’", filename)
return xmp
| 2.9375 | 3 |
initializer.py | obs145628/py-neural-nets | 0 | 12797289 | <reponame>obs145628/py-neural-nets
import numpy as np
'''
Initializer
params(shape):
Create initalized tensor
@param shape tuple of dimensions of the expected tensor
@return initialized tensor
'''
class GaussInitializer:
def params(self, shape):
return np.random.standard_normal(shape)
class GaussSqrtInitializer:
def params(self, shape):
mat = np.random.standard_normal(shape)
if len(shape) > 1:
mat /= np.sqrt(shape[1])
return mat
| 3.0625 | 3 |
text_extensions_for_pandas/array/tensor.py | lresende/text-extensions-for-pandas | 0 | 12797290 | <filename>text_extensions_for_pandas/array/tensor.py
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# tensor.py
#
# Part of text_extensions_for_pandas
#
# Pandas extensions to support columns of N-dimensional tensors of equal shape.
#
from typing import *
import numpy as np
import pandas as pd
from pandas.compat import set_function_name
from pandas.core import ops
# Internal imports
import text_extensions_for_pandas.util as util
@pd.api.extensions.register_extension_dtype
class TensorType(pd.api.extensions.ExtensionDtype):
"""
Pandas data type for a column of tensors with the same shape.
"""
@property
def type(self):
"""The type for a single row of a TensorArray column."""
return np.ndarray
@property
def name(self) -> str:
"""A string representation of the dtype."""
return "TensorType"
@classmethod
def construct_from_string(cls, string: str):
"""
See docstring in `ExtensionDType` class in `pandas/core/dtypes/base.py`
for information about this method.
"""
# Upstream code uses exceptions as part of its normal control flow and
# will pass this method bogus class names.
if string == cls.__name__:
return cls()
else:
raise TypeError(
f"Cannot construct a '{cls.__name__}' from '{string}'")
@classmethod
def construct_array_type(cls):
"""
See docstring in `ExtensionDType` class in `pandas/core/dtypes/base.py`
for information about this method.
"""
return TensorArray
def __from_arrow__(self, extension_array):
from text_extensions_for_pandas.array.arrow_conversion import ArrowTensorArray
values = ArrowTensorArray.to_numpy(extension_array)
return TensorArray(values)
class TensorOpsMixin(pd.api.extensions.ExtensionScalarOpsMixin):
"""
Mixin to provide operators on underlying ndarray.
TODO: would be better to derive from ExtensionOpsMixin, but not available
"""
@classmethod
def _create_method(cls, op, coerce_to_dtype=True):
# NOTE: this overrides, but coerce_to_dtype might not be needed
def _binop(self, other):
lvalues = self._tensor
rvalues = other._tensor if isinstance(other, TensorArray) else other
res = op(lvalues, rvalues)
return TensorArray(res)
op_name = ops._get_op_name(op, True)
return set_function_name(_binop, op_name, cls)
class TensorArray(pd.api.extensions.ExtensionArray, TensorOpsMixin):
"""
A Pandas `ExtensionArray` that represents a column of `numpy.ndarray`s,
or tensors, where the outer dimension is the count of tensors in the column.
Each tensor must have the same shape.
"""
def __init__(self, values: Union[np.ndarray, Sequence[np.ndarray]],
make_contiguous: bool = True):
"""
:param values: A `numpy.ndarray` or sequence of `numpy.ndarray`s of equal shape.
:param make_contiguous: force values to be contiguous in memory
"""
if isinstance(values, np.ndarray):
self._tensor = values
elif isinstance(values, Sequence):
self._tensor = np.stack(values, axis=0) if len(values) > 0 else np.array([])
else:
raise TypeError(f"Expected a numpy.ndarray or sequence of numpy.ndarray, "
f"but received {values} "
f"of type '{type(values)}' instead.")
if not self._tensor.flags.c_contiguous and make_contiguous:
self._tensor = np.ascontiguousarray(self._tensor)
@classmethod
def _concat_same_type(
cls, to_concat: Sequence["TensorArray"]
) -> "TensorArray":
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return TensorArray(np.concatenate([a._tensor for a in to_concat]))
def isna(self) -> np.array:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
# TODO any or all values in row nan?
return np.any(np.isnan(self._tensor), axis=1)
def copy(self) -> "TensorArray":
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
ret = TensorArray(
self._tensor.copy(),
)
# TODO: Copy cached properties too
return ret
def take(
self, indices: Sequence[int], allow_fill: bool = False,
fill_value: Any = None
) -> "TensorArray":
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
values = self._tensor.take(indices, axis=0)
if allow_fill:
# From API docs: "[If allow_fill == True, then] negative values in
# `indices` indicate missing values. These values are set to
# `fill_value`.
for i in range(len(indices)):
if indices[i] < 0:
# Note that Numpy will broadcast the fill value to the shape
# of each row.
values[i] = fill_value
return TensorArray(values)
@property
def dtype(self) -> pd.api.extensions.ExtensionDtype:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return TensorType()
def to_numpy(self, dtype=None, copy=False, na_value=pd.api.extensions.no_default):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
# TODO options
return self._tensor
def __len__(self) -> int:
return len(self._tensor)
def __getitem__(self, item) -> "TensorArray":
"""
See docstring in `Extension Array` class in `pandas/core/arrays/base.py`
for information about this method.
"""
# TODO pandas converts series with np.asarray, then applied a function e.g. map_infer(array, is_float) to format strings etc.
# Return an ndarray for scalar item, or TensorArray for slice
if isinstance(item, int):
return self._tensor[item]
else:
return TensorArray(self._tensor[item])
def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None:
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
if isinstance(key, (int, slice)):
self._tensor[key] = value
else:
raise NotImplementedError(f"__setitem__ with key type '{type(key)}' "
f"not implemented")
def __repr__(self):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
return self._tensor.__repr__()
def __str__(self):
return self._tensor.__str__()
def _reduce(self, name, skipna=True, **kwargs):
"""
See docstring in `ExtensionArray` class in `pandas/core/arrays/base.py`
for information about this method.
"""
if name == "sum":
return TensorArray(np.sum(self._tensor, axis=0))
else:
raise NotImplementedError(f"'{name}' aggregate not implemented.")
def __arrow_array__(self, type=None):
from text_extensions_for_pandas.array.arrow_conversion import ArrowTensorArray
return ArrowTensorArray.from_numpy(self._tensor)
# Add operators from the mixin to the class
TensorArray._add_arithmetic_ops()
TensorArray._add_comparison_ops() | 2.421875 | 2 |
subprojects/laser/data/setup.py | sirca/bdkd_datastore | 3 | 12797291 | # Copyright 2015 Nicta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
from setuptools import setup
import os
package_name = 'bdkd-laser-data'
webdir = 'wsgi'
datafiles = [(os.path.join(package_name, root), [os.path.join(root, f) for f in files])
for root, dirs, files in os.walk(webdir)]
setup(
name=package_name,
version='0.1.0',
description='Access dataset data',
author='Sirca Ltd',
author_email='<EMAIL>',
url='http://github.com/sirca/bdkd',
package_dir={'': 'lib'},
packages=['bdkd.laser', 'bdkd.laser.util'],
data_files = datafiles,
scripts=[
'bin/pack_maps.py',
'bin/pack_raw.py',
],
entry_points = {
'console_scripts': [
'datastore-add-laser = bdkd.laser.util.add:add_laser_util',
],
},
install_requires=['boto', 'PyYAML', 'bdkd-datastore', 'h5py']
)
| 1.515625 | 2 |
fact-bounty-flask/api/models/story.py | ganeshpatro321/fact-Bounty | 0 | 12797292 | from datetime import datetime
from ... import db
class Story(db.Model):
""" This model holds information about Story """
__tablename__ = 'story'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Text, nullable=False)
content = db.Column(db.Text, nullable=False)
featured_img_url = db.Column(db.Text, nullable=False)
approved_count = db.Column(db.Integer, nullable=False)
fake_count = db.Column(db.Integer, nullable=False)
mixedvote_count = db.Column(db.Integer, nullable=False)
date_added = db.Column(db.Text, default=datetime.now())
def __init__(self, title, content, featured_img_url, approved_count, fake_count, mixedvote_count):
"""
Initialize the instance
"""
self.title = title
self.content = content
self.featured_img_url = featured_img_url
self.approved_count = approved_count
self.fake_count = fake_count
self.mixedvote_count = mixedvote_count
def __repr__(self):
"""
Returns the object reprensentation
"""
return '<Task %r>' % self.content
def to_json(self):
"""
Returns a JSON object
:return: story JSON object
"""
json_story = {
'title': self.title,
'content': self.content,
'featured_img_url': self.featured_img_url,
'approved_count': self.approved_count,
'fake_count': self.fake_count,
'mixedvote_count': self.mixedvote_count,
'date_added': self.date_added
}
return json_story
def save(self):
"""
Save a story to the database.
This includes creating a new story and editing one.
"""
db.session.add(self)
db.session.commit()
| 3.359375 | 3 |
cpdb/officers/serializers/response_mobile_serializers.py | invinst/CPDBv2_backend | 25 | 12797293 | <filename>cpdb/officers/serializers/response_mobile_serializers.py
from rest_framework import serializers
from shared.serializer import NoNullSerializer, OfficerPercentileSerializer, OfficerYearlyPercentileSerializer
class PoliceUnitMobileSerializer(NoNullSerializer):
unit_id = serializers.IntegerField(source='id')
unit_name = serializers.CharField()
description = serializers.CharField()
class OfficerInfoMobileSerializer(NoNullSerializer):
officer_id = serializers.IntegerField(source='id')
full_name = serializers.CharField()
unit = PoliceUnitMobileSerializer(source='last_unit')
date_of_appt = serializers.DateField(source='appointed_date', format='%Y-%m-%d')
date_of_resignation = serializers.DateField(source='resignation_date', format='%Y-%m-%d')
active = serializers.SerializerMethodField()
rank = serializers.CharField()
race = serializers.CharField()
birth_year = serializers.IntegerField()
badge = serializers.SerializerMethodField()
historic_badges = serializers.ListField(child=serializers.CharField())
gender = serializers.CharField(source='gender_display')
percentiles = serializers.SerializerMethodField()
allegation_count = serializers.IntegerField()
percentile_allegation = serializers.DecimalField(
source='complaint_percentile', max_digits=6, decimal_places=4, allow_null=True
)
percentile_trr = serializers.DecimalField(source='trr_percentile', max_digits=6, decimal_places=4, allow_null=True)
honorable_mention_count = serializers.IntegerField()
sustained_count = serializers.IntegerField()
unsustained_count = serializers.IntegerField()
discipline_count = serializers.IntegerField()
civilian_compliment_count = serializers.IntegerField()
trr_count = serializers.IntegerField()
major_award_count = serializers.IntegerField()
honorable_mention_percentile = serializers.DecimalField(max_digits=6, decimal_places=4, allow_null=True)
def get_percentiles(self, obj):
yearly_percentiles = obj.officeryearlypercentile_set.order_by('year')
return OfficerYearlyPercentileSerializer(yearly_percentiles, many=True).data
def get_active(self, obj):
return obj.get_active_display()
def get_badge(self, obj):
return obj.current_badge or ''
class BaseTimelineMobileSerializer(NoNullSerializer):
unit_name = serializers.SerializerMethodField()
unit_description = serializers.SerializerMethodField()
rank = serializers.SerializerMethodField()
priority_sort = serializers.SerializerMethodField()
kind = serializers.SerializerMethodField()
def get_kind(self, obj):
raise NotImplementedError
def get_priority_sort(self, obj):
raise NotImplementedError
def get_unit_name(self, obj):
return obj.unit_name if obj.unit_name else ''
def get_unit_description(self, obj):
return obj.unit_description if obj.unit_description else ''
def get_rank(self, obj):
return obj.rank_name
class RankChangeNewTimelineMobileSerializer(BaseTimelineMobileSerializer):
date_sort = serializers.DateField(source='spp_date', format=None)
date = serializers.DateField(source='spp_date', format='%Y-%m-%d')
def get_kind(self, obj):
return 'RANK_CHANGE'
def get_priority_sort(self, obj):
return 25
def get_rank(self, obj):
return obj.rank
class JoinedNewTimelineMobileSerializer(BaseTimelineMobileSerializer):
date_sort = serializers.DateField(source='appointed_date', format=None)
date = serializers.DateField(source='appointed_date', format='%Y-%m-%d')
def get_kind(self, obj):
return 'JOINED'
def get_priority_sort(self, obj):
return 10
class UnitChangeNewTimelineMobileSerializer(BaseTimelineMobileSerializer):
date_sort = serializers.DateField(source='effective_date', format=None)
date = serializers.DateField(source='effective_date', format='%Y-%m-%d')
def get_kind(self, obj):
return 'UNIT_CHANGE'
def get_priority_sort(self, obj):
return 20
class VictimMobileSerializer(NoNullSerializer):
gender = serializers.CharField(source='gender_display')
race = serializers.CharField()
age = serializers.IntegerField()
class AttachmentFileMobileSerializer(NoNullSerializer):
title = serializers.CharField()
url = serializers.CharField()
preview_image_url = serializers.CharField()
file_type = serializers.CharField()
id = serializers.CharField()
class CRNewTimelineMobileSerializer(BaseTimelineMobileSerializer):
date_sort = serializers.SerializerMethodField()
date = serializers.SerializerMethodField()
crid = serializers.CharField()
category = serializers.SerializerMethodField()
subcategory = serializers.CharField()
finding = serializers.CharField(source='final_finding_display')
outcome = serializers.CharField(source='final_outcome')
coaccused = serializers.IntegerField(source='coaccused_count')
attachments = serializers.SerializerMethodField()
point = serializers.SerializerMethodField()
victims = VictimMobileSerializer(many=True)
def get_date_sort(self, obj):
return obj.allegation.incident_date.date()
def get_date(self, obj):
return obj.allegation.incident_date.date().strftime('%Y-%m-%d')
def get_category(self, obj):
return obj.category if obj.category else 'Unknown'
def get_kind(self, obj):
return 'CR'
def get_priority_sort(self, obj):
return 30
def get_point(self, obj):
try:
return {
'lon': obj.allegation.point.x,
'lat': obj.allegation.point.y
}
except AttributeError:
return None
def get_attachments(self, obj):
return AttachmentFileMobileSerializer(obj.allegation.prefetch_filtered_attachments, many=True).data
class AwardNewTimelineMobileSerializer(BaseTimelineMobileSerializer):
date_sort = serializers.DateField(source='start_date', format=None)
date = serializers.DateField(source='start_date', format='%Y-%m-%d')
award_type = serializers.CharField()
def get_kind(self, obj):
return 'AWARD'
def get_priority_sort(self, obj):
return 40
class TRRNewTimelineMobileSerializer(BaseTimelineMobileSerializer):
trr_id = serializers.IntegerField(source='id')
date_sort = serializers.SerializerMethodField()
date = serializers.SerializerMethodField()
taser = serializers.NullBooleanField()
firearm_used = serializers.NullBooleanField()
point = serializers.SerializerMethodField()
def get_kind(self, obj):
return 'FORCE'
def get_priority_sort(self, obj):
return 50
def get_date_sort(self, obj):
return obj.trr_datetime.date()
def get_date(self, obj):
return obj.trr_datetime.date().strftime('%Y-%m-%d')
def get_point(self, obj):
try:
return {
'lon': obj.point.x,
'lat': obj.point.y
}
except AttributeError:
return None
class OfficerPercentileMobileSerializer(NoNullSerializer):
percentile_trr = serializers.DecimalField(
source='trr_percentile', allow_null=True, read_only=True, max_digits=6, decimal_places=4)
percentile_allegation_civilian = serializers.DecimalField(
source='civilian_allegation_percentile', allow_null=True, read_only=True, max_digits=6, decimal_places=4)
percentile_allegation_internal = serializers.DecimalField(
source='internal_allegation_percentile', allow_null=True, read_only=True, max_digits=6, decimal_places=4)
class CoaccusalCardMobileSerializer(OfficerPercentileSerializer):
id = serializers.IntegerField()
full_name = serializers.CharField()
rank = serializers.CharField()
coaccusal_count = serializers.IntegerField()
class OfficerCardMobileSerializer(OfficerPercentileSerializer):
id = serializers.IntegerField()
full_name = serializers.CharField()
complaint_count = serializers.IntegerField(source='allegation_count')
| 2 | 2 |
osmcsclassify/Config.py | jremillard/osm-changeset-classification | 16 | 12797294 | <filename>osmcsclassify/Config.py
historyDbTempDirName = "/home/jason/nn/osm-changeset-classification/osm-planet"
historyDbFileName = "osm-planet/history.sqlite"
#historyPBF = "../osm-data/vermont.osh.pbf"
#historyPBF = 'osm-planet/history-180319.osm.pbf'
historyPBF = "/media/jason/E46AC1AC6AC17BB4/Remillard/maps/osm2017/history-180319.osm.pbf"
changeSetHistoryOSM = "/media/jason/E46AC1AC6AC17BB4/Remillard/maps/osm2017/changesets-180319.osm.bz2"
| 0.984375 | 1 |
hilearn/mcmc/mcmc_sampler.py | houruiyan/hilearn | 4 | 12797295 | <reponame>houruiyan/hilearn<gh_stars>1-10
import numpy as np
def Geweke_Z(X, first=0.1, last=0.5):
N = X.shape[0]
A = X[:int(first*N)]
B = X[int(last*N):]
if np.sqrt(np.var(A) + np.var(B)) == 0:
Z = None
else:
Z = abs(A.mean() - B.mean()) / np.sqrt(np.var(A) + np.var(B))
return Z
def mcmc_sampler(data, prior_pdf, likelihood, propose_pdf,
propose_func, xx, param, adaptive_func=None,
min_run=1000, max_rum=3000, gap=100):
# initialization
X_now = propose_func(xx, param)
L_now = likelihood(X_now, data, param)
P_now = prior_pdf(X_now) + L_now
# MCMC running
accept_num = 0
L_all = np.zeros(max_rum)
X_all = np.zeros((max_rum, len(X_now)))
for m in range(max_rum):
P_try, L_try = 0.0, 0.0
Q_now, Q_try = 0.0, 0.0
# step 1: propose a value
X_try = propose_func(X_now, param)
Q_now = propose_pdf(X_now, X_try, param)
Q_try = propose_pdf(X_try, X_now, param)
L_try = likelihood(X_try, data, param)
P_try = prior_pdf(X_try) + L_try
# step 2: accept or reject the proposal
alpha = np.exp(min(P_try+Q_now-P_now-Q_try, 0))
if alpha is None:
print("Warning: accept ratio alpha is none!")
elif np.random.rand(1) < alpha:
accept_num += 1
X_now = X_try + 0.0
P_now = P_try + 0.0
L_now = L_try + 0.0
L_all[m] = L_now
X_all[m,:] = X_now
# step 3. convergence diagnostics
if m >= min_run and m % gap == 0:
z_scores = np.zeros(X_all.shape[1])
for k in range(X_all.shape[1]):
z_scores[k] = Geweke_Z(X_all[:m, k])
if sum(z_scores <= 2) == len(z_scores):
L_all = L_all[:m]
X_all = X_all[:m,:]
break
# step 4: adaptive MCMC
if (adaptive_func is not None and
accept_num >= 10 and m % gap == 0):
param = adaptive_func(X_all[:m,:], param)
print("MCMC summary: %d acceptance in %d run (%.1f%%)."
%(accept_num, m, accept_num*100.0/m))
return X_all, L_all, accept_num | 2.515625 | 3 |
model/user_in_group.py | Belyanova/Python_-training | 0 | 12797296 | class UserGroup:
def __init__(self, id=None, group_id=None,):
self.group_id = group_id
self.id = id | 2.078125 | 2 |
landing/migrations/0001_initial.py | XeryusTC/projman | 0 | 12797297 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
def setup_site(apps, schema_editor):
"""Populate the sites model"""
Site = apps.get_model('sites', 'Site')
Site.objects.all().delete()
# Register SITE_ID = 1
try:
domain = settings.DOMAIN
except:
domain = 'example.com'
Site.objects.create(domain=domain, name='ProjMan')
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(setup_site)
]
| 2.09375 | 2 |
gmaps/process/gmaps_process.py | cfespinoza/google_maps_exractor | 0 | 12797298 | import multiprocessing
class GmapsProcess(multiprocessing.Process):
# make 'daemon' attribute always return False
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class GmapsProcessPool(multiprocessing.pool.Pool):
Process = GmapsProcess
| 2.71875 | 3 |
listful/types.py | d1618033/listful | 2 | 12797299 | <gh_stars>1-10
import typing
ITEM = typing.TypeVar('ITEM')
FIELD = str
VALUE = typing.TypeVar('VALUE')
GETTER = typing.Callable[[ITEM, FIELD], VALUE]
| 2.359375 | 2 |
amicropyserver.py | IhorNehrutsa/micropyserver | 0 | 12797300 | <gh_stars>0
"""
MicroPyServer is a simple HTTP server for MicroPython projects.
@see https://github.com/troublegum/micropyserver
The MIT License
Copyright (c) 2019 troublegum. https://github.com/troublegum/micropyserver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import re
import sys
import io
import utime
import gc
import uasyncio as asyncio
class aMicroPyServer(object):
def __init__(self, host="0.0.0.0", port=80, backlog=5, timeout=20):
""" Constructor """
self._host = host
self._port = port
self._routes = []
self._on_request_handler = None
self.backlog = backlog
self.timeout = timeout
self._counter = 0 # Remove it in the production release.
async def start(self):
""" Start server """
print('Awaiting client connection on {}:{}'.format(self._host, self._port))
self.server = await asyncio.start_server(self.run_client, self._host, self._port, self.backlog)
while True:
await asyncio.sleep(1)
async def run_client(self, sreader, swriter):
self.start_time = utime.ticks_ms()
#print('Got connection from client', self, sreader, sreader.s.fileno(), sreader.e['peername'])
try:
request = b''
res = b''
while True:
try:
'''
# 450-550 ms
res = await asyncio.wait_for(sreader.readline(), self.timeout)
request += res
if res == b'\r\n': # end of HTTP request
break
'''
# 150-250 ms
res = await asyncio.wait_for(sreader.read(1024), self.timeout)
print(res)
request += res
if request[-4:] == b'\r\n\r\n': # end of HTTP request
break
'''
# 150-250 ms
request = await asyncio.wait_for(sreader.readline(), self.timeout)
res = await asyncio.wait_for(sreader.read(1024), self.timeout)
if res[-4:] == b'\r\n\r\n': # end of HTTP request
break
'''
except asyncio.TimeoutError as e:
print(1, e, "asyncio.TimeoutError", self.timeout)
res = b''
if res == b'': # socket connection broken
print('raise OSError')
raise OSError
if request:
request = str(request, "utf8")
#print('request >>>{}<<<'.format(request))
try:
route = self.find_route(request)
if route:
await route["handler"](swriter, request)
else:
await self.not_found(swriter)
#1/0 # test internal_error
except Exception as e:
print(2, e)
self.internal_error(swriter, e)
raise
except OSError as e:
print(3, e)
pass
swriter_s_fileno = swriter.s.fileno()
await swriter.wait_closed()
#print('Client socket closed.', self, swriter, swriter_s_fileno, swriter.s.fileno(), swriter.e['peername'])
print('Render time: %i ms' % utime.ticks_diff(utime.ticks_ms(), self.start_time))
gc.collect()
#print('---------------------------------------------------------------')
async def close(self):
print('Closing server...')
self.server.close()
await self.server.wait_closed()
print('Server is closed.')
def add_route(self, path, handler, method="GET"):
""" Add new route """
self._routes.append({"path": path, "handler": handler, "method": method})
async def send(self, swriter, response, status=200, content_type="Content-Type: text/plain", extra_headers=[]):
""" Send response to client """
if swriter is None:
raise Exception("Can't send response, no connection instance")
status_message = {200: "OK", 400: "Bad Request", 403: "Forbidden", 404: "Not Found",
500: "Internal Server Error"}
swriter.write("HTTP/1.0 " + str(status) + " " + status_message[status] + "\r\n" + \
content_type + "\r\n")
await swriter.drain()
for header in extra_headers:
swriter.write(header + "\r\n")
### await swriter.write("X-Powered-By: MicroPyServer\r\n") # not required, vainglory
swriter.write("Cache-Control: no-store\r\n") # The response may not be stored in any cache.
# This is necessary to execute the code on the server:
# switch PIN ON and switch PIN OFF.
# This prevents showing the cashed text
# when a user presses the "Backward/Forward" button in a browser.
swriter.write("\r\n") # end of HTTP header
await swriter.drain()
self._counter += 1
swriter.write(str(self._counter) + '\r\n')
swriter.write(response)
#print("swriter.out_buf >>>{}<<<".format(swriter.out_buf))
await swriter.drain()
#print("Finished processing request.")
def find_route(self, request):
""" Find route """
method = re.search("^([A-Z]+)", request).group(1)
for route in self._routes:
if method != route["method"]:
continue
path = re.search("^[A-Z]+\\s+(/[-a-zA-Z0-9_.]*)", request).group(1)
if path == route["path"]:
return route
else:
match = re.search("^" + route["path"] + "$", path)
if match:
return route
return None
'''
def find_route(self, request):
""" Find route """
lines = request.split("\r\n")
#print('lines', lines)
if len(lines[0]) > 0:
method = re.search("^([A-Z]+)", lines[0]).group(1)
for route in self._routes:
if method != route["method"]:
continue
path = re.search("^[A-Z]+\\s+(/[-a-zA-Z0-9_.]*)", lines[0]).group(1)
if path == route["path"]:
return route
else:
match = re.search("^" + route["path"] + "$", path)
if match:
return route
return None
'''
async def not_found(self, swriter):
""" Not found action """
await self.send(swriter, "404 Not found", status=404)
async def internal_error(self, swriter, error):
""" Catch error action """
output = io.StringIO()
sys.print_exception(error, output)
str_error = output.getvalue()
output.close()
if swriter.s.fileno() != -1:
await self.send(swriter, "Error: " + str_error, status=500)
def on_request(self, handler):
""" Set request handler """
self._on_request_handler = handler
| 2.25 | 2 |
ole.py | nursultanramazanov/123 | 2 | 12797301 | <reponame>nursultanramazanov/123
# -*- coding:utf-8 -*-
# Author: <NAME>(<EMAIL>)
import os
import sys
import struct
import types
import kernel
import kavutil
# -------------------------------------------------------------------------
# 메시지 출력 함수
# -------------------------------------------------------------------------
__version__ = '1.0'
# -------------------------------------------------------------------------
# 엔진 오류 메시지를 정의
# -------------------------------------------------------------------------
class Error(Exception):
pass
# ---------------------------------------------------------------------
# MisiBase64 인코더 디코더
# ---------------------------------------------------------------------
def MsiBase64Encode(x):
ct = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz._'
if x > 63:
return None
return ord(ct[x])
def DecodeStreamName(name):
wch = []
och = []
for i in range(len(name) / 2):
wch.append(kavutil.get_uint16(name, i * 2))
for ch in wch:
if 0x3800 <= ch <= 0x4840:
if ch >= 0x4800: # 0x4800 - 0x483F
# only one charecter can be decoded
ch = MsiBase64Encode(ch - 0x4800)
if not ch:
continue
else: # 0x3800 - 0x383F
# the value contains two characters
ch -= 0x3800
och.append(MsiBase64Encode(ch & 0x3f))
ch = MsiBase64Encode(((ch >> 6) & 0x3f))
och.append(ch)
ret_str = ''
for ch in och:
ret_str += struct.pack('<H', ch)
# print ret_str.decode('UTF-16LE', 'replace')
return ret_str
# ---------------------------------------------------------------------
# OLE 내부 링크 구하기
# ---------------------------------------------------------------------
def get_block_link(no, bbd_or_sbd_fat):
ret = []
fat = bbd_or_sbd_fat
next_b = no
if next_b != 0xfffffffe:
ret.append(next_b)
while True:
try:
next_b = fat[next_b]
if next_b == 0xfffffffe:
break
if len(ret) % 10000 == 0:
if next_b in ret: # 이미 링크가 존재하면 종료
break
ret.append(next_b)
except KeyError:
break
return ret
# ---------------------------------------------------------------------
# OLE 블록 읽기
# ---------------------------------------------------------------------
def get_bblock(buf, no, bsize):
off = (no+1) * bsize
return buf[off:off+bsize]
# ---------------------------------------------------------------------
# OLE의 BBD 리스트를 얻는다.
# ---------------------------------------------------------------------
def get_bbd_list_array(buf, verbose=False):
bbd_list_array = buf[0x4c:0x200] # 전체 bbd_list
num_of_bbd_blocks = kavutil.get_uint32(buf, 0x2c)
xbbd_start_block = kavutil.get_uint32(buf, 0x44)
num_of_xbbd_blocks = kavutil.get_uint32(buf, 0x48)
bsize = 1 << kavutil.get_uint16(buf, 0x1e)
if verbose:
kavutil.vprint(None, 'Num of BBD Blocks', '%d' % num_of_bbd_blocks)
kavutil.vprint(None, 'XBBD Start', '%08X' % xbbd_start_block)
kavutil.vprint(None, 'Num of XBBD Blocks', '%d' % num_of_xbbd_blocks)
if num_of_bbd_blocks > 109: # bbd list 개수가 109보다 크면 xbbd를 가져와야 함
next_b = xbbd_start_block
for i in range(num_of_xbbd_blocks):
t_data = get_bblock(buf, next_b, bsize)
bbd_list_array += t_data[:-4]
next_b = kavutil.get_uint32(t_data, bsize-4)
return bbd_list_array[:num_of_bbd_blocks*4], num_of_bbd_blocks, num_of_xbbd_blocks, xbbd_start_block
# ---------------------------------------------------------------------
# OLE의 BBD list의 index를 Offset으로 리턴한다.
# ---------------------------------------------------------------------
def get_bbd_list_index_to_offset(buf, idx):
num_of_bbd_blocks = kavutil.get_uint32(buf, 0x2c)
xbbd_start_block = kavutil.get_uint32(buf, 0x44)
# num_of_xbbd_blocks = kavutil.get_uint32(buf, 0x48)
bsize = 1 << kavutil.get_uint16(buf, 0x1e)
if idx >= num_of_bbd_blocks: # 범위를 벗어나면 에러
return -1
if idx <= 109:
return 0x4c + (idx * 4)
else:
t_idx = idx - 109
seg = (t_idx / ((bsize / 4) - 1)) + (1 if (t_idx % ((bsize / 4) - 1)) else 0)
off = (t_idx % ((bsize / 4) - 1))
next_b = xbbd_start_block
for i in range(seg):
if next_b == 0xfffffffe:
return -1
t_buf = get_bblock(buf, next_b, bsize)
next_b = kavutil.get_uint32(t_buf, bsize-4)
return (next_b + 1) * bsize + (off * 4)
# ---------------------------------------------------------------------
# OLE 파일인지 확인한다.
# ---------------------------------------------------------------------
def is_olefile(filename):
try:
buf = open(filename, 'rb').read(8)
if buf == 'D0CF11E0A1B11AE1'.decode('hex'):
return True
except IOError:
pass
return False
# ---------------------------------------------------------------------
# OleFile 클래스
# ---------------------------------------------------------------------
class OleFile:
def __init__(self, input_data, write_mode=False, verbose=False):
self.verbose = verbose # 디버깅용
self.isfile = False # 파일로 접근 중인가?
if isinstance(input_data, types.StringType):
if os.path.exists(input_data):
self.isfile = True
self.fname = input_data
self.fp = open(input_data, 'rb')
buf = self.fp.read()
else:
buf = input_data
else:
raise Error('Input data is invalid.')
# 수정 모드
self.write_mode = write_mode
# OLE 주요 데이터
self.mm = None
self.bsize = None
self.ssize = None
self.bbd_list_array = None
self.bbd = None
self.bbd_fat = {}
self.sbd = None
self.root = None
self.pps = None
self.small_block = None
self.root_list_array = None
self.exploit = [] # 취약점 존재 여부
# 임시 변수
self.__deep = None
self.__full_list = None
self.init(buf)
def init(self, buf):
# OLE 주요 데이터
self.mm = buf
self.bsize = 0
self.ssize = 0
# 임시 변수
self.__deep = 0
self.__full_list = []
self.parse() # OLE 파일을 분석
def close(self):
if self.isfile:
self.fp.close()
if self.write_mode:
open(self.fname, 'wb').write(self.mm)
# ---------------------------------------------------------------------
# OLE 파싱하기
# ---------------------------------------------------------------------
def parse(self):
buf = self.mm[:8]
if buf != 'D0CF11E0A1B11AE1'.decode('hex'):
raise Error('Not Ole signature')
# big block, small bloc 크기 구하기
self.bsize = 1 << kavutil.get_uint16(self.mm, 0x1e)
self.ssize = 1 << kavutil.get_uint16(self.mm, 0x20)
if self.verbose:
kavutil.vprint('Header')
kavutil.vprint(None, 'Big Block Size', '%d' % self.bsize)
kavutil.vprint(None, 'Small Block Size', '%d' % self.ssize)
print
kavutil.HexDump().Buffer(self.mm, 0, 0x60)
print
if self.bsize % 0x200 != 0 or self.ssize != 0x40: # 이상 파일 정보 처리
return False
# bbd 읽기
self.bbd_list_array, num_of_bbd_blocks, num_of_xbbd_blocks, xbbd_start_block = \
get_bbd_list_array(self.mm, self.verbose)
'''
# 상당히 많은 데이터가 출력되어 주석 처리
if self.verbose:
print
if num_of_bbd_blocks < 109:
kavutil.HexDump().Buffer(self.mm, 0x4c, num_of_bbd_blocks * 4)
else:
kavutil.HexDump().Buffer(self.mm, 0x4c, num_of_bbd_blocks * 109)
next_b = xbbd_start_block
for i in range(num_of_xbbd_blocks):
t_data = get_bblock(self.mm, next_b, self.bsize)
print
kavutil.HexDump().Buffer(self.mm, (next_b+1) * self.bsize)
next_b = kavutil.get_uint32(t_data, self.bsize-4)
'''
if len(self.bbd_list_array)/4 < num_of_bbd_blocks:
return False
self.bbd = ''
for i in range(num_of_bbd_blocks):
no = kavutil.get_uint32(self.bbd_list_array, i*4)
self.bbd += get_bblock(self.mm, no, self.bsize)
self.bbd_fat = {}
for i in range(len(self.bbd) / 4):
n = kavutil.get_uint32(self.bbd, i*4)
self.bbd_fat[i] = n
if self.verbose:
open('bbd.dmp', 'wb').write(self.bbd)
print
kavutil.vprint('BBD')
print
kavutil.HexDump().Buffer(self.bbd, 0, 0x80)
# Root 읽기
root_startblock = kavutil.get_uint32(self.mm, 0x30)
root_list_array = get_block_link(root_startblock, self.bbd_fat)
self.root_list_array = root_list_array
self.root = ''
for no in root_list_array:
self.root += get_bblock(self.mm, no, self.bsize)
if self.verbose:
open('root.dmp', 'wb').write(self.root)
print
kavutil.vprint('ROOT')
kavutil.vprint(None, 'Start Blocks', '%d' % root_startblock)
print
kavutil.HexDump().Buffer(self.root, 0, 0x80)
# sbd 읽기
sbd_startblock = kavutil.get_uint32(self.mm, 0x3c)
num_of_sbd_blocks = kavutil.get_uint32(self.mm, 0x40)
sbd_list_array = get_block_link(sbd_startblock, self.bbd_fat)
self.sbd = ''
for no in sbd_list_array:
self.sbd += get_bblock(self.mm, no, self.bsize)
self.sbd_fat = {}
for i in range(len(self.sbd) / 4):
n = kavutil.get_uint32(self.sbd, i*4)
self.sbd_fat[i] = n
if self.verbose:
open('sbd.dmp', 'wb').write(self.sbd)
print
kavutil.vprint('SBD')
kavutil.vprint(None, 'Start Blocks', '%d' % sbd_startblock)
kavutil.vprint(None, 'Num of SBD Blocks', '%d' % num_of_sbd_blocks)
print
kavutil.HexDump().Buffer(self.sbd, 0, 0x80)
# PPS 읽기
self.pps = []
for i in range(len(self.root) / 0x80):
p = {}
pps = self.root[i*0x80:(i+1)*0x80]
t_size = min(kavutil.get_uint16(pps, 0x40), 0x40)
if t_size != 0:
# 출력시 이름이 깨질 가능성이 큼
if ord(pps[0]) & 0xF0 == 0x00 and ord(pps[1]) == 0x00:
name = '_\x00' + pps[2:t_size-2]
else:
name = pps[0:t_size-2]
p['Name'] = DecodeStreamName(name).decode('UTF-16LE', 'replace')
else:
p['Name'] = ''
p['Type'] = ord(pps[0x42])
p['Prev'] = kavutil.get_uint32(pps, 0x44)
p['Next'] = kavutil.get_uint32(pps, 0x48)
p['Dir'] = kavutil.get_uint32(pps, 0x4c)
p['Start'] = kavutil.get_uint32(pps, 0x74)
p['Size'] = kavutil.get_uint32(pps, 0x78)
p['Valid'] = False
# CVE-2012-0158 검사하기
# pps에 ListView.2의 CLSID가 존재함
# 참고 : https://securelist.com/the-curious-case-of-a-cve-2012-0158-exploit/37158/
# 참고 : https://www.symantec.com/security_response/attacksignatures/detail.jsp?asid=25657
cve_clsids = ['\x4B\xF0\xD1\xBD\x8B\x85\xD1\x11\xB1\x6A\x00\xC0\xF0\x28\x36\x28',
'\xE0\xF5\x6B\x99\x44\x80\x50\x46\xAD\xEB\x0B\x01\x39\x14\xE9\x9C',
'\xE6\x3F\x83\x66\x83\x85\xD1\x11\xB1\x6A\x00\xC0\xF0\x28\x36\x28',
'\x5F\xDC\x81\x91\x7D\xE0\x8A\x41\xAC\xA6\x8E\xEA\x1E\xCB\x8E\x9E',
'\xB6\x90\x41\xC7\x89\x85\xD1\x11\xB1\x6A\x00\xC0\xF0\x28\x36\x28'
]
if pps[0x50:0x60] in cve_clsids:
self.exploit.append('Exploit.OLE.CVE-2012-0158')
return False
self.pps.append(p)
# PPS Tree 검증
if self.__valid_pps_tree() is False:
return False
if self.verbose:
print
kavutil.vprint('Property Storage')
'''
print ' %-2s %-20s %4s %-8s %-8s %-8s %-8s %-8s' % ('No', 'Name', 'Type', 'Prev', 'Next', 'Dir', 'SB',
'Size')
print ' ' + ('-' * 74)
for p in self.pps:
print ' ' + '%2d %-23s %d %8X %8X %8X %8X %8d' % (self.pps.index(p), p['Name'], p['Type'], p['Prev'],
p['Next'], p['Dir'], p['Start'], p['Size'])
'''
print ' %-2s %-32s %4s %-4s %-4s %-4s %8s %8s' % ('No', 'Name', 'Type', 'Prev', 'Next', ' Dir', 'SB',
'Size')
print ' ' + ('-' * 74)
for p in self.pps:
if p['Valid'] is False: # 유효한 Tree가 아니면 다음
continue
t = ''
t += ' - ' if p['Prev'] == 0xffffffff else '%4d ' % p['Prev']
t += ' - ' if p['Next'] == 0xffffffff else '%4d ' % p['Next']
t += ' - ' if p['Dir'] == 0xffffffff else '%4d ' % p['Dir']
t += ' - ' if p['Start'] == 0xffffffff else '%8X ' % p['Start']
tname = p['Name'].encode(sys.stdout.encoding, 'replace')
print ' ' + '%2d %-35s %d %22s %8d' % (self.pps.index(p), tname, p['Type'], t, p['Size'])
# PPS 전체 경로 구하기
self.__deep = 0
self.__full_list = []
try:
self.__get_pps_path()
except IndexError:
pass
# small block link 얻기
self.small_block = get_block_link(self.pps[0]['Start'], self.bbd_fat)
if self.verbose:
print
kavutil.vprint('Small Blocks')
print self.small_block
return True
# ---------------------------------------------------------------------
# PPS Tree의 유효성을 체크한다. (내장)
# ---------------------------------------------------------------------
def __valid_pps_tree(self):
scaned_pps_node = [0] # 이미 분석한 노드의 경우 더이상 분석하지 않기 위해 처리
f = []
if len(self.pps) == 0: # 분석된 PPS가 없으면 종료
return False
if self.pps[0]['Dir'] != 0xffffffff and self.pps[0]['Type'] == 5:
f.append(self.pps[0]['Dir'])
scaned_pps_node.append(self.pps[0]['Dir'])
self.pps[0]['Valid'] = True
if len(f) == 0: # 정상적인 PPS가 없음
return False
while len(f):
x = f.pop(0)
try:
if self.pps[x]['Type'] != 1 and self.pps[x]['Type'] != 2 and len(self.pps[x]['Name']) == 0:
continue
except IndexError:
if (x & 0x90900000) == 0x90900000: # CVE-2003-0820 취약점
self.exploit.append('Exploit.OLE.CVE-2003-0820')
return False
else: # CVE-2003-0347 취약점
self.exploit.append('Exploit.OLE.CVE-2003-0347')
return False
self.pps[x]['Valid'] = True
if self.pps[x]['Prev'] != 0xffffffff:
if self.pps[x]['Prev'] in scaned_pps_node:
self.pps[x]['Prev'] = 0xffffffff
else:
f.append(self.pps[x]['Prev'])
scaned_pps_node.append(self.pps[x]['Prev'])
if self.pps[x]['Next'] != 0xffffffff:
if self.pps[x]['Next'] in scaned_pps_node:
self.pps[x]['Next'] = 0xffffffff
else:
f.append(self.pps[x]['Next'])
scaned_pps_node.append(self.pps[x]['Next'])
if self.pps[x]['Dir'] != 0xffffffff:
if self.pps[x]['Dir'] in scaned_pps_node:
self.pps[x]['Dir'] = 0xffffffff
else:
f.append(self.pps[x]['Dir'])
scaned_pps_node.append(self.pps[x]['Dir'])
return True
# ---------------------------------------------------------------------
# PPS 전체 경로 구하기 (내장)
# ---------------------------------------------------------------------
def __get_pps_path(self, node=0, prefix=''):
if node == 0:
pps_name = ''
name = prefix + pps_name
else:
if self.pps[node]['Valid'] is False: # 유효한 PPS만 처리함
return 0
pps_name = self.pps[node]['Name'].encode('cp949', 'ignore')
name = prefix + '/' + pps_name
# print ("%02d : %d %s") % (node, self.deep, name)
# if self.pps[node]['Type'] != 5: # Stream만 저장
p = {'Node': node, 'Name': name[1:], 'Type': self.pps[node]['Type']}
self.__full_list.append(p)
if self.pps[node]['Dir'] != 0xFFFFFFFFL:
self.__deep += 1
self.__get_pps_path(self.pps[node]['Dir'], name)
self.__deep -= 1
if self.pps[node]['Prev'] != 0xFFFFFFFFL:
self.__get_pps_path(self.pps[node]['Prev'], prefix)
if self.pps[node]['Next'] != 0xFFFFFFFFL:
self.__get_pps_path(self.pps[node]['Next'], prefix)
return 0
# ---------------------------------------------------------------------
# PPS 전체 경로 구하기 (스트림만 출력)
# ---------------------------------------------------------------------
def listdir(self, streams=True, storages=False):
ret = []
for p in self.__full_list:
if p['Type'] == 2 and streams:
ret.append(p['Name'])
elif p['Type'] == 1 and storages:
ret.append(p['Name'])
else:
pass
return ret
# ---------------------------------------------------------------------
# 스트림이 존재하는가?
# ---------------------------------------------------------------------
def exists(self, name):
for p in self.__full_list:
if p['Name'] == name:
return True
else:
return False
# ---------------------------------------------------------------------
# 스트림을 연다
# ---------------------------------------------------------------------
def openstream(self, name):
# -----------------------------------------------------------------
# 스트림 전용 클래스
# -----------------------------------------------------------------
class Stream:
def __init__(self, parent, node):
self.parent = parent
self.node = node
self.read_size = 0
self.fat = None
# print self.parent.verbose
# 연속된 숫자 값을 리턴한다.
# TODO : 임시로 작성한거라 최적화 필요함
def get_liner_value(self, num_list):
start = None
end = None
if not start:
start = num_list.pop(0)
e = start
loop = False
for x in num_list:
if e + 1 == x:
e = x
loop = True
continue
else:
while loop:
if e == num_list.pop(0):
break
end = e
break
else:
for i in range(len(num_list)):
num_list.pop(0)
end = e
return start, end
def read(self):
pps = self.parent.pps[self.node]
sb = pps['Start']
size = pps['Size']
if size >= 0x1000:
self.read_size = self.parent.bsize
self.fat = self.parent.bbd_fat
else:
self.read_size = self.parent.ssize
self.fat = self.parent.sbd_fat
list_array = get_block_link(sb, self.fat)
data = ''
if size >= 0x1000:
t_list = list(list_array)
while len(t_list):
s, e = self.get_liner_value(t_list) # 연속된 링크를 모두 수집해서 한꺼번에 파일로 읽기
off = (s + 1) * self.read_size
data += self.parent.mm[off:off + self.read_size * (e - s + 1)]
else:
for n in list_array:
div_n = self.parent.bsize / self.parent.ssize
off = (self.parent.small_block[n / div_n] + 1) * self.parent.bsize
off += (n % div_n) * self.parent.ssize
data += self.parent.mm[off:off + self.read_size]
if self.parent.verbose:
print
kavutil.vprint(pps['Name'])
kavutil.HexDump().Buffer(data, 0, 80)
return data[:size]
def close(self):
pass
# -----------------------------------------------------------------
for p in self.__full_list:
if p['Name'] == name:
no = p['Node']
break
else:
no = -1
if no == -1:
raise Error('PPS name is invalid.')
return Stream(self, no)
# ---------------------------------------------------------------------
# 스트림의 데이터를 덮어쓴다.
# ---------------------------------------------------------------------
def write_stream(self, name, data):
for p in self.__full_list:
if p['Name'] == name:
no = p['Node']
break
else:
no = -1
if no == -1:
raise Error('PPS name(%s) is invalid.' % name)
# self.init(self.mm)
# return
ow = OleWriteStream(self.mm, self.pps, self.bsize, self.ssize,
self.bbd, self.bbd_fat,
self.sbd, self.sbd_fat,
self.root_list_array, self.small_block, self.verbose)
t = ow.write(no, data)
if t:
self.init(t) # 새롭게 OLE 재로딩
# ---------------------------------------------------------------------
# 스트림 또는 스토리지를 삭제한다.
# ---------------------------------------------------------------------
def delete(self, name, delete_storage=False, reset_stream=False):
for p in self.__full_list:
if p['Name'] == name:
no = p['Node']
break
else:
no = -1
if no == -1:
raise Error('PPS name is invalid.')
# print no
ow = OleWriteStream(self.mm, self.pps, self.bsize, self.ssize,
self.bbd, self.bbd_fat,
self.sbd, self.sbd_fat,
self.root_list_array, self.small_block, self.verbose)
target_pps = self.pps[no]
if target_pps['Valid'] and target_pps['Type'] == 2: # 유효한 PPS에 대한 삭제인지 확인
if reset_stream:
size = target_pps['Size']
t = ow.write(no, '\x00' * size) # 모든 데이터를 0으로 Wipe
t = ow.delete(no)
if t:
self.init(t) # 새롭게 OLE 재로딩
elif target_pps['Valid'] and target_pps['Type'] == 1 and delete_storage: # 유효한 스토리지?
t = ow.delete(no) # 링크 삭제
if t:
self.init(t) # 새롭게 OLE 재로딩
# ---------------------------------------------------------------------
# OleWriteStream 클래스
# ---------------------------------------------------------------------
class OleWriteStream:
def __init__(self, mm, pps, bsize, ssize, bbd, bbd_fat, sbd, sbd_fat, root_list_array, small_block, verbose):
self.verbose = verbose
self.mm = mm
self.pps = pps
self.bsize = bsize
self.ssize = ssize
self.bbd = bbd
self.bbd_fat = bbd_fat
self.sbd = sbd
self.sbd_fat = sbd_fat
self.root_list_array = root_list_array
self.small_block = small_block
def __get_root_node(self, node): # 해당 정보를 가진 root를 찾기
for i, pps in enumerate(self.pps):
if pps['Prev'] == node or pps['Next'] == node or pps['Dir'] == node:
return i
def __get_max_node(self, node): # 특정 노드의 Max 값을 가진 node를 찾기
no = node
while True:
pps = self.pps[no]
if pps['Next'] == 0xffffffff: # 더이상 오른쪽이 없으면 탐색 종료
break
else: # 항상 오른쪽 노드가 큰 값임
no = pps['Next']
return no
def delete(self, del_no):
del_pps = self.pps[del_no]
prev_no = del_pps['Prev']
next_no = del_pps['Next']
dir_no = del_pps['Dir']
# root를 찾기
root_no = self.__get_root_node(del_no)
# 양쪽 노드가 존재하는가?
if prev_no != 0xffffffff and next_no != 0xffffffff: # 양쪽 모두 노트가 존재함
# 1. prev 노드 값을 root로 보낸다.
t_no = prev_no
# 2. prev 노드 하위에 next가 없는 node를 찾아서 del_pps의 next_no를 등록한다.
blank_next_no = self.__get_max_node(prev_no)
self.__set_pps_header(blank_next_no, pps_next=next_no)
elif prev_no != 0xffffffff and next_no == 0xffffffff: # Prev만 존재
# 1. prev 노드 값을 root로 보낸다.
t_no = prev_no
elif prev_no == 0xffffffff and next_no != 0xffffffff: # Next만 존재
# 1. next 노드 값을 root로 보낸다.
t_no = next_no
else: # prev_no == 0xffffffff and next_no == 0xffffffff: # 단일 노드
# 1. 0xffffffff 노드 값을 root로 보낸다.
t_no = 0xffffffff
# root 노드를 수정한다.
pps = self.pps[root_no]
if pps['Prev'] == del_no:
self.__set_pps_header(root_no, pps_prev=t_no)
elif pps['Next'] == del_no:
self.__set_pps_header(root_no, pps_next=t_no)
else: # Dir
self.__set_pps_header(root_no, pps_dir=t_no)
# 삭제 노드 값은 모두 지우기
self.__set_pps_header(del_no, size=0, start=0xffffffff, pps_prev=0xffffffff, pps_next=0xffffffff,
pps_dir=0xffffffff, del_info=True)
return self.mm
def write(self, no, data):
# 기존 PPS 정보를 얻는다
org_sb = self.pps[no]['Start']
org_size = self.pps[no]['Size']
'''
if org_size >= 0x1000:
# read_size = self.bsize
fat = self.bbd
else:
# read_size = self.ssize
fat = self.sbd
# org_list_array = get_block_link(org_sb, fat)
'''
# 수정된 data를 쓰기 위해 준비한다
if len(data) >= 0x1000: # BBD를 사용한다.
if org_size >= 0x1000: # 기존에는 BBD 사용
if org_size >= len(data):
# raise error('Not Support : BBD -> BBD (Dec)') # 개발 완료
n = (len(data) / self.bsize) + (1 if (len(data) % self.bsize) else 0)
t_data = data + ('\x00' * ((n * self.bsize) - len(data))) # 여분의 크기를 data 뒤쪽에 추가하기
t_link = get_block_link(org_sb, self.bbd_fat) # 이전 링크 수집하기
t_link = self.__decrease_bbd_link(t_link, n) # 필요한 개수로 링크 줄이기
# Big block 영역에 bsize 만큼씩 Overwrite
self.__write_data_to_big_block(t_data, t_link)
# PPS 크기 수정
self.__set_pps_header(no, size=len(data))
else:
# raise error('Not Support : BBD -> BBD (Inc)')
n = (len(data) / self.bsize) + (1 if (len(data) % self.bsize) else 0)
t_data = data + ('\x00' * ((n * self.bsize) - len(data))) # 여분의 크기를 data 뒤쪽에 추가하기
t_link = get_block_link(org_sb, self.bbd_fat) # 이전 링크 수집하기
t_num = 0
if (len(t_link) * self.bsize) < len(t_data): # 블록 추가해야 하나?
t_size = len(t_data) - (len(t_link) * self.bsize)
t_num = (t_size / self.bsize) + (1 if (t_size % self.bsize) else 0)
self.__add_big_block_num(t_num) # 필요한 블록 수 추가하기
# 수집된 마지막 링크 이후에 존재하는 사용하지 않는 블록을 수집한다.
t_link = self.__modify_big_block_link(t_link, t_num)
# Big block 영역에 bsize 만큼씩 Overwrite
self.__write_data_to_big_block(t_data, t_link)
# PPS 크기 수정
self.__set_pps_header(no, size=len(data))
else: # 기존에는 SBD 사용
# raise error('Not Support : SBD -> BBD') # 섹터가 변화는 것은 Dec, Inc가 의미 없음
n = (len(data) / self.bsize) + (1 if (len(data) % self.bsize) else 0)
t_data = data + ('\x00' * ((n * self.bsize) - len(data))) # 여분의 크기를 data 뒤쪽에 추가하기
t_num = len(t_data) / self.bsize # 몇개의 블록이 필요한가?
self.__add_big_block_num(t_num) # 필요한 블록 수 추가하기
# BBD 링크를 처음 생성하므로 이전 링크가 없다.
t_link = self.__modify_big_block_link(None, t_num)
# Big block 영역에 bsize 만큼씩 Overwrite
self.__write_data_to_big_block(t_data, t_link)
# PPS 크기 수정, start 블록 수정
self.__set_pps_header(no, size=len(data), start=t_link[0])
# 이전 SBD의 링크는 모두 삭제한다.
# t_link = get_block_link(org_sb, self.sbd) # 이전 링크 수집하기
t_link = get_block_link(org_sb, self.sbd_fat) # 이전 링크 수집하기
sbd = self.sbd
for no in t_link:
sbd = sbd[:no*4] + '\xff\xff\xff\xff' + sbd[(no+1)*4:]
self.__modify_sbd(sbd)
else: # SBD를 사용한다.
if org_size >= 0x1000: # 기존에는 BBD 사용
# raise error('Not Support : BBD -> SBD') # 섹터가 변화는 것은 Dec, Inc가 의미 없음
n = (len(data) / self.ssize) + (1 if (len(data) % self.ssize) else 0)
t_data = data + ('\x00' * ((n * self.ssize) - len(data))) # 여분의 크기를 data 뒤쪽에 추가하기
t_num = len(t_data) / self.ssize # 몇개의 블록이 필요한가?
self.__add_small_block_num(t_num) # 필요한 블록 수 추가하기
# SBD 링크를 처음 생성하므로 이전 링크가 없다.
t_link = self.__modify_small_block_link(None, t_num)
bbd_list_array, _, _, _ = get_bbd_list_array(self.mm)
self.bbd = ''
for i in range(len(bbd_list_array)/4):
n = kavutil.get_uint32(bbd_list_array, i*4)
self.bbd += get_bblock(self.mm, n, self.bsize)
# 새로운 Small Block 링크가 필요하다
self.small_block = get_block_link(self.pps[0]['Start'], self.bbd_fat)
# Small block 영역에 ssize 만큼씩 Overwrite
self.__write_data_to_small_bolck(t_data, t_link)
# PPS 크기 수정, start 블록 수정
self.__set_pps_header(no, size=len(data), start=t_link[0])
# 이전 BBD의 링크는 모두 삭제한다.
# t_link = get_block_link(org_sb, self.bbd) # 이전 링크 수집하기
t_link = get_block_link(org_sb, self.bbd_fat) # 이전 링크 수집하기
bbd = self.bbd
for no in t_link:
bbd = bbd[:no*4] + '\xff\xff\xff\xff' + bbd[(no+1)*4:]
self.__modify_bbd(bbd)
else: # 기존에는 SBD 사용
if org_size >= len(data):
# raise error('Not Support : SBD -> SBD (Dec)') # 지원 완료
n = (len(data) / self.ssize) + (1 if (len(data) % self.ssize) else 0)
t_data = data + ('\x00' * ((n*self.ssize) - len(data))) # 여분의 크기를 data 뒤쪽에 추가하기
t_link = get_block_link(org_sb, self.sbd_fat) # 이전 링크 수집하기
t_link = self.__decrease_sbd_link(t_link, n) # 필요한 개수로 링크 줄이기
# Small block 영역에 ssize 만큼씩 Overwrite
self.__write_data_to_small_bolck(t_data, t_link)
# PPS 크기 수정
self.__set_pps_header(no, size=len(data))
else:
# raise error('Not Support : SBD -> SBD (Inc)') # 작업 완료
n = (len(data) / self.ssize) + (1 if (len(data) % self.ssize) else 0)
t_data = data + ('\x00' * ((n*self.ssize) - len(data))) # 여분의 크기를 data 뒤쪽에 추가하기
# t_link = get_block_link(org_sb, self.sbd) # 이전 링크 수집하기
t_link = get_block_link(org_sb, self.sbd_fat) # 이전 링크 수집하기
t_num = 0
if (len(t_link) * self.ssize) < len(t_data): # 블록 추가해야 하나?
t_size = len(t_data) - (len(t_link) * self.ssize)
t_num = (t_size / self.ssize) + (1 if (t_size % self.ssize) else 0)
self.__add_small_block_num(t_num) # 필요한 블록 수 추가하기
# 수집된 마지막 링크 이후에 존재하는 사용하지 않는 블록을 수집한다.
t_link = self.__modify_small_block_link(t_link, t_num)
# Small block 갱신
self.bbd_fat = {}
for i in range(len(self.bbd) / 4):
n = kavutil.get_uint32(self.bbd, i * 4)
self.bbd_fat[i] = n
self.small_block = get_block_link(self.pps[0]['Start'], self.bbd_fat)
# Small block 영역에 ssize 만큼씩 Overwrite
self.__write_data_to_small_bolck(t_data, t_link)
# PPS 크기 수정
self.__set_pps_header(no, size=len(data))
return self.mm
# ---------------------------------------------------------------------
# 특정 데이터를 big block 링크를 따라 데이터 쓰기 (내장)
# ---------------------------------------------------------------------
def __write_data_to_big_block(self, t_data, t_link):
for i, n in enumerate(t_link):
off = (n + 1) * self.bsize
self.mm = self.mm[:off] + t_data[i * self.bsize:(i + 1) * self.bsize] + self.mm[off + self.bsize:]
# ---------------------------------------------------------------------
# 특정 데이터를 small block 링크를 따라 데이터 쓰기 (내장)
# ---------------------------------------------------------------------
def __write_data_to_small_bolck(self, t_data, t_link):
for i, n in enumerate(t_link):
off = (self.small_block[n / 8] + 1) * self.bsize
off += (n % 8) * self.ssize
self.mm = self.mm[:off] + t_data[i * self.ssize:(i + 1) * self.ssize] + self.mm[off + self.ssize:]
# ---------------------------------------------------------------------
# OLE 영역의 특정 위치에 1개의 Big Block Overwrite하기 (내장)
# ---------------------------------------------------------------------
def __set_bblock(self, no, data):
off = (no + 1) * self.bsize
if len(data) == self.bsize:
self.mm = self.mm[:off] + data + self.mm[off+self.bsize:]
return True
return False
# ---------------------------------------------------------------------
# PPS 헤더에 존재하는 특정 스트림의 크기를 조정한다. (내장)
# node : PPS 인덱스
# size : 설정 크기
# start : 시작 링크
# ---------------------------------------------------------------------
def __set_pps_header(self, node, size=None, start=None, pps_prev=None, pps_next=None, pps_dir=None, del_info=False):
n = self.root_list_array[node / 4]
buf = get_bblock(self.mm, n, self.bsize)
off = ((node % 4) * 0x80)
if del_info and off == 0x180:
buf = buf[:off] + '\x00' * 0x80
elif del_info:
buf = buf[:off] + '\x00' * 0x80 + buf[off+0x80:]
if size is not None:
t_off = off + 0x78
buf = buf[:t_off] + struct.pack('<L', size) + buf[t_off + 4:]
if start is not None:
t_off = off + 0x74
buf = buf[:t_off] + struct.pack('<L', start) + buf[t_off + 4:]
if pps_prev is not None:
t_off = off + 0x44
buf = buf[:t_off] + struct.pack('<L', pps_prev) + buf[t_off + 4:]
if pps_next is not None:
t_off = off + 0x48
buf = buf[:t_off] + struct.pack('<L', pps_next) + buf[t_off + 4:]
if pps_dir is not None:
t_off = off + 0x4C
buf = buf[:t_off] + struct.pack('<L', pps_dir) + buf[t_off + 4:]
self.__set_bblock(n, buf)
if self.verbose:
print
buf = get_bblock(self.mm, n, self.bsize)
kavutil.HexDump().Buffer(buf, 0, 0x200)
# ---------------------------------------------------------------------
# SBD 링크를 줄인다
# org_link_list : 기존 Small block 링크
# num_link : 필요로 하는 전체 링크 수
# ---------------------------------------------------------------------
def __decrease_sbd_link(self, org_link_list, num_link):
if len(org_link_list) > num_link:
# SBD를 배열로 바꾸기
t_link = []
for i in range(len(self.sbd) / 4):
t_link.append(kavutil.get_uint32(self.sbd, i * 4))
t = org_link_list[num_link:]
org_link_list = org_link_list[:num_link]
t_link[t[0]] = 0xfffffffe # 링크 끝 설정하기
# 남은 링크는 모두 0xffffffff로 설정하기
for i in t[1:]:
t_link[i] = 0xffffffff
# SBD 배열을 SBD 버퍼로 바꾸기
self.sbd = ''
for i in t_link:
self.sbd += struct.pack('<L', i)
# self.mm에 SBD 적용하기
sbd_startblock = kavutil.get_uint32(self.mm, 0x3c)
sbd_list_array = get_block_link(sbd_startblock, self.bbd_fat)
for i, n in enumerate(sbd_list_array):
self.__set_bblock(n, self.sbd[i*self.bsize:(i+1)*self.bsize])
return org_link_list
elif len(org_link_list) == num_link:
return org_link_list
else:
raise Error('Invalid call')
# ---------------------------------------------------------------------
# BBD 링크를 줄인다
# org_link_list : 기존 Small block 링크
# num_link : 필요로 하는 전체 링크 수
# ---------------------------------------------------------------------
def __decrease_bbd_link(self, org_link_list, num_link):
if len(org_link_list) > num_link:
# BBD를 배열로 바꾸기
t_link = []
for i in range(len(self.bbd) / 4):
t_link.append(kavutil.get_uint32(self.bbd, i * 4))
t = org_link_list[num_link:]
org_link_list = org_link_list[:num_link]
t_link[t[0]] = 0xfffffffe # 링크 끝 설정하기
# 남은 링크는 모두 0xffffffff로 설정하기
for i in t[1:]:
t_link[i] = 0xffffffff
# BBD 배열을 BBD 버퍼로 바꾸기
self.bbd = ''
for i in t_link:
self.bbd += struct.pack('<L', i)
# self.mm에 BBD 적용하기
t, num_of_bbd_blocks, num_of_xbbd_blocks, xbbd_start_block = \
get_bbd_list_array(self.mm, self.verbose)
bbd_list_array = []
for i in range(len(t) / 4):
bbd_list_array.append(kavutil.get_uint32(t, i * 4))
for i, n in enumerate(bbd_list_array):
self.__set_bblock(n, self.bbd[i*self.bsize:(i+1)*self.bsize])
return org_link_list
elif len(org_link_list) == num_link:
return org_link_list
else:
raise Error('Invalid call')
# ---------------------------------------------------------------------
# Big Block을 주어진 개수만큼 추가한다.
# num : 추가할 Big Block 개수
# ---------------------------------------------------------------------
def __add_big_block_num(self, num):
size = (len(self.mm) / self.bsize) * self.bsize # 파일 크기
self.mm = self.mm[:size] # 뒤쪽 쓸모 없는 부분은 제거
attach_data = self.mm[size:] # 파일 뒤에 붙어 있는 잔여 데이터
# 전체 BBD 링크를 구한다
bbd_list_array, num_of_bbd_blocks, _, _ = get_bbd_list_array(self.mm)
# BBD를 모은다
bbd = ''
for i in range(num_of_bbd_blocks):
no = kavutil.get_uint32(bbd_list_array, i*4)
bbd += get_bblock(self.mm, no, self.bsize)
bbd_link = []
for i in range(len(bbd) / 4):
bbd_link.append(kavutil.get_uint32(bbd, i*4))
# 사용하지 않는 BBD 링크를 찾는다.
free_link = [i for i, no in enumerate(bbd_link) if (no == 0xffffffff and i < size / self.bsize)]
if len(free_link) >= num: # 여유분이 충분히 존재함...
return # 추가할 필요 없음
# 잔여 개수 체크하기
last_no = (size / self.bsize) - 2 # 실제 마지막 Big Block 번호
n = (len(self.bbd) / 4 - 1) - last_no
if n >= num:
# 잔여 개수가 추가하려는 개수보다 많거나 같으면 추가 블록 개수만 파일 뒤에 추가하기
self.mm += '\x00' * self.bsize * num # 실제 필요한 데이터 블록
self.mm += attach_data
else:
special_no = [] # 특수 목적의 Big Block 번호. 해당 블록은 0xfffffffd로 처리해야 함
x_data = ''
# b_data = ''
# add_data = ''
add_num = num - n # 추가해야 할 블록 수
add_data = ('\x00' * self.bsize * add_num)
# 추가해야 할 BBD list 개수는 한개의 BBD에는 bsize / 4 개수만큼 Big Block을 담을 수 있음
b_num = (add_num / (self.bsize/4)) + (1 if (add_num % (self.bsize/4)) else 0)
old_num_bbd = kavutil.get_uint32(self.mm, 0x2c)
xbbd_start_block = kavutil.get_uint32(self.mm, 0x44)
num_of_xbbd_blocks = kavutil.get_uint32(self.mm, 0x48)
# 추가적인 Big Block을 계산한다. BBD List와 XBBD 블록도 추가될 수 있기 때문에...
old_b_num = b_num
while True:
if old_num_bbd + b_num > 109:
t_num = (old_num_bbd + b_num - 109)
total_xbbd_num = (t_num / ((self.bsize - 4) / 4)) + (1 if (t_num % ((self.bsize - 4) / 4)) else 0)
x_num = total_xbbd_num - num_of_xbbd_blocks # 추가해야 할 XBBD 개수
add_num += x_num
b_num = (add_num / (self.bsize / 4)) + (1 if (add_num % (self.bsize / 4)) else 0)
if old_b_num == b_num:
break
else:
old_b_num = b_num
total_bbd_num = old_num_bbd + b_num # 전체 BBD list 개수
self.mm = self.mm[:0x2c] + struct.pack('<L', total_bbd_num) + self.mm[0x30:]
last_no += 1
# XBBD 처리하기
if total_bbd_num > 109:
t_num = (total_bbd_num - 109)
total_xbbd_num = (t_num / ((self.bsize - 4) / 4)) + (1 if (t_num % ((self.bsize - 4) / 4)) else 0)
x_num = total_xbbd_num - num_of_xbbd_blocks # 추가해야 할 XBBD 개수
# XBBD를 위한 헤더 수정
if num_of_xbbd_blocks == 0:
data = struct.pack('<LL', last_no, total_xbbd_num)
self.mm = self.mm[:0x44] + data + self.mm[0x4C:]
else:
data = struct.pack('<L', total_xbbd_num)
self.mm = self.mm[:0x48] + data + self.mm[0x4C:]
# XBBD 블록 연결
next_b = xbbd_start_block
if num_of_xbbd_blocks == 1:
t_data = get_bblock(self.mm, next_b, self.bsize)
else:
t_data = ''
for i in range(num_of_xbbd_blocks-1):
t_data = get_bblock(self.mm, next_b, self.bsize)
next_b = kavutil.get_uint32(t_data, self.bsize-4)
# 기존 XBBD 마지막에 새로운 XBBD 링크 추가
t_data = t_data[:-4] + struct.pack('<L', last_no)
off = (next_b + 1) * self.bsize # t_data의 위치
self.mm = self.mm[:off] + t_data + self.mm[off + self.bsize:]
# XBBD 생성하기
for i in range(x_num):
x_data += '\xff\xff\xff\xff' * ((self.bsize/4) - 1)
if i != (x_num-1):
x_data += struct.pack('<L', last_no+1) # 다음 블록을 가리켜야 함으로 1를 더함
else:
x_data += '\xfe\xff\xff\xff' # 마지막 블록의 링크는 끝을 처리함
special_no.append(last_no) # 특수 블록 등록
last_no += 1
# END of XBBD
# BBD 추가하기
bbd_no = []
b_data = '\xff' * self.bsize * b_num
for i in range(b_num):
bbd_no.append(last_no)
last_no += 1
# 최종 조합
self.mm += x_data + b_data + add_data + attach_data
# 특수 블록에 BBD list도 추가
special_no += bbd_no
# 특수 블록 처리 (bbd_list_array, num_of_bbd_blocks, num_of_xbbd_blocks, xbbd_start_block)
bbd_list_array, num_of_bbd_blocks, _, _ = get_bbd_list_array(self.mm)
bb_num = (self.bsize/4) # 한개의 BBD list 블록에 들어갈 수 있는 Big Block 개수
for no in special_no:
seg = no / bb_num
off = no % bb_num
# print hex(no), hex(seg), hex(off), hex(kavutil.get_uint32(bbd_list_array, seg*4))
t_no = kavutil.get_uint32(bbd_list_array, seg*4)
t_off = ((t_no + 1) * self.bsize) + (off * 4)
self.mm = self.mm[:t_off] + '\xfd\xff\xff\xff' + self.mm[t_off+4:]
# print repr(self.mm[t_off:t_off+4])
# t = get_bblock(self.mm, t_no, self.bsize)
# print repr(t)
# t = kavutil.get_uint32(t, off*4)
# print hex(t)
# BBD List에 BBD 등록하기
for i, no in enumerate(bbd_no):
off = get_bbd_list_index_to_offset(self.mm, old_num_bbd + i)
# print hex(off)
self.mm = (self.mm[:off] + struct.pack('<L', no) + self.mm[off+4:])
# ---------------------------------------------------------------------
# Small Block을 주어진 개수만큼 추가한다.
# num : 추가할 Big Block 개수
# ---------------------------------------------------------------------
def __add_small_block_num(self, num):
root = self.pps[0]
r_size = root['Size']
r_no = root['Start']
# SBD 링크를 생성한다.
sbd_link = []
for i in range(len(self.sbd) / 4):
sbd_link.append(kavutil.get_uint32(self.sbd, i*4))
# 사용하지 않는 SBD 링크를 찾는다.
free_link = [i for i, no in enumerate(sbd_link) if (no == 0xffffffff and i < r_size / self.ssize)]
if len(free_link) >= num: # 여유분이 충분히 존재함...
return # 추가할 필요 없음
else: # 여유분이 부족함. 따라서 Root를 늘려야 함
size = num * self.ssize # 추가해야 할 용량
add_big_num = (size / self.bsize) + (1 if (size % self.bsize) else 0) # 추가해야 할 Big Block 개수
self.__add_big_block_num(add_big_num) # Big Block 추가 요청
# t_link = get_block_link(r_no, self.bbd) # 이전 Small Block의 링크를 구함
t_link = get_block_link(r_no, self.bbd_fat) # 이전 Small Block의 링크를 구함
self.__modify_big_block_link(t_link, add_big_num) # 이전 링크에 필요한 블록 수 추가하여 링크를 새롭게 생성
# Root 크기 수정
self.__set_pps_header(0, size=r_size + add_big_num * self.bsize)
# ---------------------------------------------------------------------
# BBD link 추가 요청한다. (원본 이미지의 BBD link가 수정 됨)
# old_link : 기존 BBD link
# add_num : 추가 BBD link 개수
# ---------------------------------------------------------------------
def __modify_big_block_link(self, old_link, add_num):
if add_num < 0:
return []
# 전체 BBD 링크를 구한다
bbd_list_array, num_of_bbd_blocks, _, _ = get_bbd_list_array(self.mm)
# BBD를 모은다
bbd = ''
for i in range(num_of_bbd_blocks):
no = kavutil.get_uint32(bbd_list_array, i*4)
bbd += get_bblock(self.mm, no, self.bsize)
if self.verbose:
open('bbd.dm2', 'wb').write(bbd)
bbd_link = []
for i in range(len(bbd) / 4):
bbd_link.append(kavutil.get_uint32(bbd, i*4))
# 사용하지 않는 BBD 링크를 찾는다.
free_link = [i for i, no in enumerate(bbd_link) if (no == 0xffffffff)]
if old_link:
ret_link = old_link + free_link[:add_num] # 최종 결과의 BBD 링크
t_link = old_link[-1:] + free_link[:add_num] # BBD에 링크 연결하기
else:
# 이전 링크가 없다면...
ret_link = free_link[:add_num] # 최종 결과의 BBD 링크
t_link = free_link[:add_num] # BBD에 링크 연결하기
for i in range(len(t_link)-1):
no = t_link[i+1]
data = struct.pack('<L', no)
no = t_link[i]
bbd = bbd[:no*4] + data + bbd[(no+1)*4:]
no = t_link[-1]
bbd = bbd[:no * 4] + '\xfe\xff\xff\xff' + bbd[(no + 1) * 4:]
if self.verbose:
open('bbd.dm3', 'wb').write(bbd)
# 원래 이미지에 BBD 덮어쓰기
self.__modify_bbd(bbd)
return ret_link # 연결된 링크
# ---------------------------------------------------------------------
# SBD link 추가 요청한다. (원본 이미지의 SBD link가 수정 됨)
# old_link : 기존 SBD link
# add_num : 추가 SBD link 개수
# ---------------------------------------------------------------------
def __modify_small_block_link(self, old_link, add_num):
if add_num < 0:
return []
sbd = self.sbd
if self.verbose:
open('sbd.dm2', 'wb').write(sbd)
# SBD 링크를 생성한다.
sbd_link = []
for i in range(len(sbd) / 4):
sbd_link.append(kavutil.get_uint32(sbd, i*4))
# 사용하지 않는 SBD 링크를 찾는다.
free_link = [i for i, no in enumerate(sbd_link) if (no == 0xffffffff)]
if old_link:
ret_link = old_link + free_link[:add_num] # 최종 결과의 SBD 링크
t_link = old_link[-1:] + free_link[:add_num] # SBD에 링크 연결하기
else:
# 이전 링크가 없다면...
ret_link = free_link[:add_num] # 최종 결과의 BBD 링크
t_link = free_link[:add_num] # BBD에 링크 연결하기
for i in range(len(t_link)-1):
no = t_link[i+1]
data = struct.pack('<L', no)
no = t_link[i]
sbd = sbd[:no*4] + data + sbd[(no+1)*4:]
no = t_link[-1]
sbd = sbd[:no * 4] + '\xfe\xff\xff\xff' + sbd[(no + 1) * 4:]
# SBD가 나누어 bsize 단위가 아니면 맞춘다.
n = len(sbd) % self.bsize
if n:
t = self.bsize - n
sbd += '\xff' * t
if self.verbose:
open('sbd.dm3', 'wb').write(sbd)
self.__modify_sbd(sbd) # 수정된 SDB 적용하기
return ret_link # 연결된 링크
# ---------------------------------------------------------------------
# SBD를 수정한다.
# sbd : 수정된 SBD 이미지
# ---------------------------------------------------------------------
def __modify_sbd(self, sbd):
# 원래 이미지에 SBD 덮어쓰기
sbd_no = kavutil.get_uint32(self.mm, 0x3c)
# sbd_list_array = get_block_link(sbd_no, self.bbd)
sbd_list_array = get_block_link(sbd_no, self.bbd_fat)
# print sbd_list_array
for i, no in enumerate(sbd_list_array):
data = sbd[i*self.bsize:(i+1)*self.bsize]
off = (no + 1) * self.bsize
self.mm = self.mm[:off] + data + self.mm[off+self.bsize:]
# ---------------------------------------------------------------------
# BBD를 수정한다.
# bbd : 수정된 BBD 이미지
# ---------------------------------------------------------------------
def __modify_bbd(self, bbd):
self.bbd = bbd # 체크 !!!
bbd_list_array, _, _, _ = get_bbd_list_array(self.mm)
for i in range(len(bbd_list_array) / 4):
no = kavutil.get_uint32(bbd_list_array, i * 4)
data = bbd[i * self.bsize:(i + 1) * self.bsize]
off = (no + 1) * self.bsize
self.mm = self.mm[:off] + data + self.mm[off + self.bsize:]
if __name__ == '__main__':
# import zlib
# o = OleFile('normal.hwp', write_mode=True, verbose=True)
o = OleFile('a82d381c20cfdf47d603b4b2b840136ed32f71d2757c64c898dc209868bb57d6', write_mode=True, verbose=True)
print o.listdir()
o.delete('_VBA_PROJECT_CUR/VBA') # Root 수정, Next 수정
o.close()
'''
o = OleFile('normal.hwp', verbose=True)
pics = o.openstream('PrvImage')
print get_block_link(o.pps[6]['Start'], o.sbd)
# d2 = pics.read()
o.close()
'''
# XBBD 늘어나는 경우
# o = OleFile('xbbd2.ppt', write_mode=True, verbose=True)
# o.test()
'''
# 늘어나는건 경우의 수가 너무 많음
o = OleFile('normal.hwp', write_mode=True, verbose=True)
pics = o.openstream('FileHeader')
d = pics.read()
d = d + d
o.write_stream('FileHeader', d)
o.close()
'''
'''
# case1
o = OleFile('normal.hwp', write_mode=True, verbose=True)
pics = o.openstream('Scripts/DefaultJScript')
d = pics.read()
d = zlib.decompress(d, -15)
d = d.replace(b'v\x00a\x00r', b'f\x00o\x00o') # var -> foo
d = zlib.compress(d)[2:]
o.write_stream('Scripts/DefaultJScript', d)
o.close()
'''
# -------------------------------------------------------------------------
# KavMain 클래스
# -------------------------------------------------------------------------
class KavMain:
# ---------------------------------------------------------------------
# init(self, plugins_path)
# 플러그인 엔진을 초기화 한다.
# 인력값 : plugins_path - 플러그인 엔진의 위치
# verbose - 디버그 모드 (True or False)
# 리턴값 : 0 - 성공, 0 이외의 값 - 실패
# ---------------------------------------------------------------------
def init(self, plugins_path, verbose=False): # 플러그인 엔진 초기화
self.handle = {}
self.verbose = verbose
return 0 # 플러그인 엔진 초기화 성공
# ---------------------------------------------------------------------
# uninit(self)
# 플러그인 엔진을 종료한다.
# 리턴값 : 0 - 성공, 0 이외의 값 - 실패
# ---------------------------------------------------------------------
def uninit(self): # 플러그인 엔진 종료
return 0 # 플러그인 엔진 종료 성공
# ---------------------------------------------------------------------
# getinfo(self)
# 플러그인 엔진의 주요 정보를 알려준다. (제작자, 버전, ...)
# 리턴값 : 플러그인 엔진 정보
# ---------------------------------------------------------------------
def getinfo(self): # 플러그인 엔진의 주요 정보
info = dict() # 사전형 변수 선언
info['author'] = '<NAME>' # 제작자
info['version'] = '1.1' # 버전
info['title'] = 'OLE Library' # 엔진 설명
info['kmd_name'] = 'ole' # 엔진 파일 이름
info['make_arc_type'] = kernel.MASTER_PACK # 악성코드 치료 후 재압축 유무
info['sig_num'] = len(self.listvirus()) # 진단/치료 가능한 악성코드 수
return info
# ---------------------------------------------------------------------
# listvirus(self)
# 진단/치료 가능한 악성코드의 리스트를 알려준다.
# 리턴값 : 악성코드 리스트
# ---------------------------------------------------------------------
def listvirus(self): # 진단 가능한 악성코드 리스트
vlist = list() # 리스트형 변수 선언
vlist.append('Exploit.OLE.CVE-2012-0158') # 진단/치료하는 악성코드 이름 등록
vlist.append('Exploit.OLE.CVE-2003-0820')
vlist.append('Exploit.OLE.CVE-2003-0347')
vlist.sort()
return vlist
# ---------------------------------------------------------------------
# format(self, filehandle, filename, filename_ex)
# 파일 포맷을 분석한다.
# 입력값 : filehandle - 파일 핸들
# filename - 파일 이름
# filename_ex - 압축 파일 내부 파일 이름
# 리턴값 : {파일 포맷 분석 정보} or None
# ---------------------------------------------------------------------
def format(self, filehandle, filename, filename_ex):
ret = {}
mm = filehandle
# OLE 헤더와 동일
if mm[:8] == '\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1':
ret['ff_ole'] = 'OLE'
# OLE 뒤에 첨부된 파일이 있는지를 조사한다.
fsize = len(mm)
bsize = 1 << kavutil.get_uint16(mm, 0x1e)
rsize = (fsize / bsize) * bsize
if fsize > rsize:
fileformat = { # 포맷 정보를 담을 공간
'Attached_Pos': rsize,
'Attached_Size': fsize - rsize
}
ret['ff_attach'] = fileformat
# HWP 인가?
o = OleFile(filename)
try:
pics = o.openstream('FileHeader')
d = pics.read()
if d[:0x11] == 'HWP Document File':
val = ord(d[0x24])
ret['ff_hwp'] = {'compress': (val & 0x1 == 0x1),
'encrypt': (val & 0x2 == 0x2),
'viewtext': (val & 0x4 == 0x4)}
except Error:
pass
o.close()
return ret
# ---------------------------------------------------------------------
# __get_handle(self, filename)
# 압축 파일의 핸들을 얻는다.
# 입력값 : filename - 파일 이름
# 리턴값 : 압축 파일 핸들
# ---------------------------------------------------------------------
def __get_handle(self, filename):
if filename in self.handle: # 이전에 열린 핸들이 존재하는가?
zfile = self.handle.get(filename, None)
else:
zfile = OleFile(filename, verbose=self.verbose) # ole 파일 열기
self.handle[filename] = zfile
return zfile
# ---------------------------------------------------------------------
# arclist(self, filename, fileformat)
# 압축 파일 내부의 파일 목록을 얻는다.
# 입력값 : filename - 파일 이름
# fileformat - 파일 포맷 분석 정보
# 리턴값 : [[압축 엔진 ID, 압축된 파일 이름]]
# ---------------------------------------------------------------------
def arclist(self, filename, fileformat):
file_scan_list = [] # 검사 대상 정보를 모두 가짐
# 미리 분석된 파일 포맷중에 OLE 파일 포맷이 있는가?
if 'ff_ole' in fileformat:
try:
# OLE Stream 목록 추출하기
o = self.__get_handle(filename)
for name in o.listdir():
file_scan_list.append(['arc_ole', name])
return file_scan_list
except:
pass
return []
# ---------------------------------------------------------------------
# unarc(self, arc_engine_id, arc_name, fname_in_arc)
# 입력값 : arc_engine_id - 압축 엔진 ID
# arc_name - 압축 파일
# fname_in_arc - 압축 해제할 파일 이름
# 리턴값 : 압축 해제된 내용 or None
# ---------------------------------------------------------------------
def unarc(self, arc_engine_id, arc_name, fname_in_arc):
data = None
if arc_engine_id == 'arc_ole':
o = self.__get_handle(arc_name)
fp = o.openstream(fname_in_arc)
try:
data = fp.read()
except:
data = None
return data
# ---------------------------------------------------------------------
# arcclose(self)
# 압축 파일 핸들을 닫는다.
# ---------------------------------------------------------------------
def arcclose(self):
for fname in self.handle.keys():
zfile = self.handle[fname]
zfile.close()
self.handle.pop(fname)
# ---------------------------------------------------------------------
# mkarc(self, arc_engine_id, arc_name, file_infos)
# 입력값 : arc_engine_id - 압축 가능 엔진 ID
# arc_name - 최종적으로 압축될 압축 파일 이름
# file_infos - 압축 대상 파일 정보 구조체
# 리턴값 : 압축 성공 여부 (True or False)
# ---------------------------------------------------------------------
def mkarc(self, arc_engine_id, arc_name, file_infos):
if arc_engine_id == 'arc_ole':
o = OleFile(arc_name, write_mode=True) # , verbose=True)
# zfile = zipfile.ZipFile(arc_name, 'w')
for file_info in file_infos:
rname = file_info.get_filename()
a_name = file_info.get_filename_in_archive()
try:
if os.path.exists(rname):
with open(rname, 'rb') as fp:
buf = fp.read()
# print '[-] filename :', rname, len(buf)
# print '[-] rname :',
o.write_stream(a_name, buf)
# zfile.writestr(a_name, buf)
else:
# 삭제 처리
o.delete(a_name)
except IOError:
# print file_info.get_filename_in_archive()
pass
o.close()
# zfile.close()
return True
return False
| 1.984375 | 2 |
segmentation.py | chngchinboon/necklysis | 0 | 12797302 | import matplotlib.pyplot as plt
# import pydicom
import os
from pydicom.filereader import dcmread, read_dicomdir
from glob import glob
import cv2
import numpy as np
cv2.destroyAllWindows()
# window prop
screensize = ((-1440,0),(0,900))
screenwidth = screensize[0][1]-screensize[0][0]
screenheight = screensize[1][1]-screensize[1][0]
headertop= 30
headerbottom = 8
headerside = 8
n = 3
m = 2
windowwidth = int((screenwidth - n * headerside*2)/ n)
windowheight = int((screenheight - m * (headertop + headerbottom)) /m)
# input directory
dicom_dir = r"E:\BTSynchSGH\datasets\necklysis\input\dicom"
fps = glob(os.path.join(dicom_dir,"*.dcm"))
ds_list = [dcmread(filename) for filename in fps]
# select image
image = ds_list[10].pixel_array
# image details
image_height, image_width = image.shape
# image pre-processing
image_norm = cv2.normalize(image, dst=None, alpha=0, beta=65536, norm_type=cv2.NORM_MINMAX) # so that can see better
image_norm_uint8 = cv2.convertScaleAbs(image_norm)
min_head_thresh = 10000
max_head_thresh = 65535
# get outline of head
ret, image_thresh = cv2.threshold(image_norm,min_head_thresh, max_head_thresh, cv2.THRESH_TOZERO)
image_thresh_uint8 = cv2.convertScaleAbs(image_thresh)
image_canny = cv2.Canny(image_thresh_uint8,100,150)
# get contour
im2, contours, hierarchy = cv2.findContours(image_canny,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
image_norm_3chan = np.stack([image_norm]*3,axis=-1)
# get largest contour
perimeter = [cv2.arcLength(cnt,True) for cnt in contours]
idx_max = np.argmax(np.array(perimeter))
image_contours = cv2.drawContours(image_norm_3chan.copy(), [contours[idx_max]], 0, (0,65535,0), 3)
# display process images
# original image
cv2.namedWindow("image_norm",cv2.WINDOW_NORMAL)
cv2.moveWindow("image_norm",screensize[0][0],0)
cv2.resizeWindow("image_norm",(windowwidth,windowheight))
cv2.imshow("image_norm", image_norm)
# canny
cv2.namedWindow("image_canny",cv2.WINDOW_NORMAL)
cv2.imshow("image_canny", image_canny)
cv2.resizeWindow("image_canny",(windowwidth,windowheight))
cv2.moveWindow("image_canny",screensize[0][0]+(windowwidth+headerside*2),0)
# contours
cv2.namedWindow("contours",cv2.WINDOW_NORMAL)
cv2.imshow("contours", image_contours)
cv2.resizeWindow("contours",(windowwidth,windowheight))
cv2.moveWindow("contours",screensize[0][0]+(windowwidth+headerside)*2,0)
# cv2.waitKey(1)
# cv2.destroyAllWindows()
| 2.53125 | 3 |
week_10/extra_problems/server.py | vlasenckov/MIPT_py_3_term | 5 | 12797303 | import socket
import threading
import multiprocessing
import os
def worker(sock):
while True:
conn, addr = sock.accept()
print("PID:", os.getpid())
thread = threading.Thread(target=process_request, args=(conn, addr))
thread.start()
def process_request(conn, addr):
print("addr: ", addr)
with conn:
while True:
data = conn.recv(1024)
if not data:
break
print(data.decode("utf-8"))
if __name__ == "__main__":
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.bind(("127.0.0.1", 10001))
sock.listen(socket.SOMAXCONN)
PROCESS_COUNT = 6
process_list = [multiprocessing.Process(target=worker,
args=(sock,)) for _ in range(PROCESS_COUNT)]
for process in process_list:
process.start()
for process in process_list:
process.join()
| 3.046875 | 3 |
plot_bathymetry.py | sustain-lab/multibeam-em712 | 0 | 12797304 | import glob
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import xarray as xr
from mpl_toolkits.basemap import Basemap
import gc
import matplotlib
matplotlib.rc('font', size=12)
data_path = 'processed_netcdf'
multibeam_files = glob.glob(data_path + '/*.nc')
multibeam_files.sort()
lon0, lon1 = -122.2, -121.7
lat0, lat1 = 36.6, 37.
parallels = np.arange(lat0, lat1 + 0.1, 0.1)
meridians = np.arange(lon0, lon1 + 0.1, 0.1)
fig = plt.figure(figsize=(8, 6))
map = Basemap(llcrnrlon=lon0, llcrnrlat=lat0, urcrnrlon=lon1, urcrnrlat=lat1, \
resolution='f')
map.drawcoastlines()
map.drawparallels(parallels, labels=~np.isnan(parallels))
map.drawmeridians(meridians, labels=~np.isnan(meridians))
skip = 4
for f in multibeam_files:
print('Plotting ', f)
ds = xr.open_dataset(f)
lon = np.array(ds.longitude[::skip,::skip])
lat = np.array(ds.latitude[::skip,::skip])
depth = np.array(ds.depth[::skip,::skip])
plt.pcolor(lon, lat, depth, vmin=0, vmax=100, cmap=cm.viridis_r)
del lon, lat, depth, ds
gc.collect()
plt.colorbar()
fig.suptitle('Monterey Bay bathymetry from shipboard Multibeam EM-712')
plt.savefig('monterey_bay_multibeam_bathymetry.png', dpi=300)
plt.close(fig)
| 2.21875 | 2 |
ego/decomposition/relabel.py | fabriziocosta/EGO | 0 | 12797305 | <gh_stars>0
#!/usr/bin/env python
"""Provides scikit interface."""
from ego.component import GraphComponent
from collections import Counter
def decompose_relabel_node_size(graph_component):
new_subgraphs_list = []
new_signatures_list = []
for subgraph, signature in zip(graph_component.subgraphs, graph_component.signatures):
node_size_label = '%d' % subgraph.number_of_nodes()
new_signature = signature + '_node_size_' + node_size_label
new_subgraphs_list.append(subgraph)
new_signatures_list.append(new_signature)
gc = GraphComponent(
graph=graph_component.graph,
subgraphs=new_subgraphs_list,
signatures=new_signatures_list)
return gc
def decompose_relabel_node_label_frequency(graph_component):
new_subgraphs_list = []
new_signatures_list = []
for subgraph, signature in zip(graph_component.subgraphs, graph_component.signatures):
labels = [subgraph.nodes[u]['label'] for u in subgraph.nodes()]
new_signature = signature + '_node_label_frequency_' + '_'.join(['%s:%s' % (k, v) for k, v in Counter(labels).most_common()])
new_subgraphs_list.append(subgraph)
new_signatures_list.append(new_signature)
gc = GraphComponent(
graph=graph_component.graph,
subgraphs=new_subgraphs_list,
signatures=new_signatures_list)
return gc
def decompose_relabel_distinct_node_labels(graph_component):
new_subgraphs_list = []
new_signatures_list = []
for subgraph, signature in zip(graph_component.subgraphs, graph_component.signatures):
labels = [subgraph.nodes[u]['label'] for u in subgraph.nodes()]
new_signature = signature + '_distinct_node_labels_%d' % len(set(labels))
new_subgraphs_list.append(subgraph)
new_signatures_list.append(new_signature)
gc = GraphComponent(
graph=graph_component.graph,
subgraphs=new_subgraphs_list,
signatures=new_signatures_list)
return gc
def decompose_relabel_node_degree_frequency(graph_component):
new_subgraphs_list = []
new_signatures_list = []
for subgraph, signature in zip(graph_component.subgraphs, graph_component.signatures):
labels = [v for v in dict(subgraph.degree()).values()]
new_signature = signature + '_node_degree_frequency_' + '_'.join(['%s:%s' % (k, v) for k, v in Counter(labels).most_common()])
new_subgraphs_list.append(subgraph)
new_signatures_list.append(new_signature)
gc = GraphComponent(
graph=graph_component.graph,
subgraphs=new_subgraphs_list,
signatures=new_signatures_list)
return gc
def decompose_relabel_max_node_degree(graph_component):
new_subgraphs_list = []
new_signatures_list = []
for subgraph, signature in zip(graph_component.subgraphs, graph_component.signatures):
labels = [v for v in dict(subgraph.degree()).values()]
new_signature = signature + '_max_node_degree_%d' % max(labels)
new_subgraphs_list.append(subgraph)
new_signatures_list.append(new_signature)
gc = GraphComponent(
graph=graph_component.graph,
subgraphs=new_subgraphs_list,
signatures=new_signatures_list)
return gc
def decompose_relabel_estimator(graph_component, graph_estimator=None):
new_subgraphs_list = []
new_signatures_list = []
preds = graph_estimator.predict(graph_component.subgraphs)
for subgraph, signature, pred in zip(graph_component.subgraphs, graph_component.signatures, preds):
new_signature = signature + '_estimator_%s' % pred
new_subgraphs_list.append(subgraph)
new_signatures_list.append(new_signature)
gc = GraphComponent(
graph=graph_component.graph,
subgraphs=new_subgraphs_list,
signatures=new_signatures_list)
return gc
def rlbest(*args, **kargs):
return decompose_relabel_estimator(*args, **kargs)
def rlbmdgr(*args, **kargs):
return decompose_relabel_max_node_degree(*args, **kargs)
def rlbdfrq(*args, **kargs):
return decompose_relabel_node_degree_frequency(*args, **kargs)
def rlbnod(*args, **kargs):
return decompose_relabel_distinct_node_labels(*args, **kargs)
def rlblfrq(*args, **kargs):
return decompose_relabel_node_label_frequency(*args, **kargs)
def rlbsiz(*args, **kargs):
return decompose_relabel_node_size(*args, **kargs)
| 2.203125 | 2 |
source/Mlos.Python/mlos/OptimizerEvaluationTools/SyntheticFunctions/sample_functions.py | GindaChen/MLOS | 81 | 12797306 | #
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import math
import numpy as np
def quadratic(**kwargs) -> float:
return sum(x_i ** 2 for _, x_i in kwargs.items())
def ackley(x_1=None, x_2=None, a=20, b=0.2, c=2*math.pi):
d = 2
return -a * np.exp(-b * np.sqrt((x_1**2 + x_2**2) / d)) - np.exp(np.cos(c * x_1) + np.cos(c * x_2)) + a + np.exp(1)
def flower(**kwargs):
a = 1
b = 2
c = 4
x_1 = kwargs['x_1']
x_2 = kwargs['x_2']
x_norm = np.sqrt(x_1**2 + x_2**2)
return a * x_norm + b * np.sin(c * np.arctan2(x_1, x_2))
| 2.96875 | 3 |
main.py | adezoguns/flask_server_ml_display | 0 | 12797307 | from flask import Flask, url_for, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from werkzeug.utils import secure_filename
from werkzeug.serving import run_simple
from id_class_locator import id_class_detector
import os
import time
from cv2 import cv2
app=Flask(__name__)
#app.config['SQLALCHEMY_DATABASE_URI']='sqlite:///my.db'
#db=SQLAlchemy(app)
path2File= os.path.dirname(os.path.realpath(__file__))
pathToModel=path2File+'/WorkArea/FRCNN'
PATH = path2File+'/static/input'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app.config['PATH']=PATH
#app.config["TEMPLATES_AUTO_RELOAD"] = True
model = cv2.dnn.readNetFromTensorflow(pathToModel+'/frozen_inference_graph.pb', pathToModel+'/frcnn.pbtxt')
@app.route('/hello', methods=['POST', 'GET'])
def hello():
return('Hello')
@app.route('/', methods=['POST', 'GET'])
def index():
return render_template('home.html')
@app.route('/upload', methods=['POST', 'GET'])
def upload():
if request.method == 'POST':
# check if the post request has the file part
file = request.files['imageUploadForm']
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['PATH'], filename))
print(filename)
img=cv2.imread(os.path.join(app.config['PATH'], filename))
id_class_detector(img, model, filename, debug=False)
#time.sleep(2)
return render_template('home.html', value=filename)
if __name__=="__main__":
run_simple('127.0.0.1', 9100, app, use_reloader=False)
| 2.28125 | 2 |
zapr/utils/reader.py | zapr-oss/zapr-athena-client | 1 | 12797308 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from configparser import ConfigParser
from s3transfer import RetriesExceededError
from s3transfer.exceptions import TransferNotDoneError
import os
import sys
S3 = "s3://"
S3A = "s3a://"
class ConfigReader:
def __init__(self, logger, config_location):
self.config_location = config_location
self.logger = logger
def read_config(self):
config = ConfigParser()
config.read(self.config_location)
enable_insert_overwrite = 'True'
enable_external_table_drop = 'True'
if 'aws' in config and 'region' in config['aws']:
aws_region = config['aws']['region']
else:
self.logger.error("Not able to read the region from the config ")
sys.exit(os.EX_CONFIG)
if 'athena' in config:
if 'ATHENA_OUTPUT_LOCATION' in config['athena']:
athena_output_location = config['athena']['ATHENA_OUTPUT_LOCATION']
else:
self.logger.error("Not able to read the ATHENA_OUTPUT_LOCATION from the config ")
sys.exit(os.EX_CONFIG)
if 'STAGING_DB' in config['athena']:
staging_db = config['athena']['STAGING_DB']
else:
self.logger.error("Not able to read the STAGING_DB from the config ")
sys.exit(os.EX_CONFIG)
if 'ENABLE_INSERT_OVERWRITE' in config['athena']:
enable_insert_overwrite = config['athena']['ENABLE_INSERT_OVERWRITE']
if 'ENABLE_EXTERNAL_TABLE_DROP' in config['athena']:
enable_external_table_drop = config['athena']['ENABLE_INSERT_OVERWRITE']
else:
self.logger.error("Not able to read the athena config")
sys.exit(os.EX_CONFIG)
return aws_region, athena_output_location, staging_db, enable_insert_overwrite, enable_external_table_drop
class FileReader:
def __init__(self, logger, s3_resource):
self.logger = logger
self.s3_resource = s3_resource
def split_s3_path(self, s3_location):
path_parts = s3_location.replace(S3, "").replace(S3A, "").split("/")
s3_bucket = path_parts.pop(0)
prefix = "/".join(path_parts)
return s3_bucket, prefix
def download_input_from_s3(self, s3_bucket, prefix, destination_location):
try:
self.s3_resource.meta.client.download_file(s3_bucket, prefix, destination_location)
except RetriesExceededError as e:
self.logger.fatal("Unable to download the file {0}".format(e))
self.logger.fatal("Unable to download the file from s3 to local : {0}/{1}".format(s3_bucket, prefix))
sys.exit(os.EX_DATAERR)
except TransferNotDoneError as e:
self.logger.fatal("Unable to download the file {0}".format(e))
sys.exit(os.EX_OSERR)
return destination_location
def get_file(self,file_type, source_location, destination_location):
if source_location.startswith(S3) or source_location.startswith(S3A):
self.logger.info("Downloading the {0} from {1} to {2}".format(file_type,
source_location,
destination_location))
s3_bucket, prefix = self.split_s3_path(source_location)
return self.download_input_from_s3(s3_bucket, prefix, destination_location)
else:
return source_location
| 1.828125 | 2 |
xcount/events/query.py | Havenir/xcount | 0 | 12797309 | import frappe
import datetime
@frappe.whitelist()
def get_batch_nos(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select batch_id, expiry_date
from `tabBatch`
where
item = {item_code} and disabled = 0 and (expiry_date is null or expiry_date > '{cur_date}')"""
.format(item_code = frappe.db.escape(filters.get("item")), cur_date = datetime.datetime.today()
))
| 2.1875 | 2 |
djangobmf/contrib/accounting/bmf_module.py | caputomarcos/django-bmf | 0 | 12797310 | #!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from djangobmf.dashboards import Accounting
from djangobmf.sites import Module
from djangobmf.sites import ViewMixin
from djangobmf.sites import register
from djangobmf.sites import site
from .categories import TransactionCategory
from .models import ACCOUNTING_INCOME
from .models import ACCOUNTING_EXPENSE
from .models import ACCOUNTING_ASSET
from .models import ACCOUNTING_LIABILITY
from .models import Account
from .models import Transaction
from .models import TransactionItem
from .views import TransactionCreateView
from .views import TransactionUpdateView
@register(dashboard=Accounting)
class AccountModule(Module):
model = Account
default = True
@register(dashboard=Accounting)
class TransactionModule(Module):
model = Transaction
default = True
create = TransactionCreateView
update = TransactionUpdateView
@register(dashboard=Accounting)
class TransactionItemModule(Module):
model = TransactionItem
default = True
site.register_settings('bmfcontrib_accounting', {
'income': forms.ModelChoiceField(queryset=Account.objects.filter(type=ACCOUNTING_INCOME)),
'expense': forms.ModelChoiceField(queryset=Account.objects.filter(type=ACCOUNTING_EXPENSE)),
'customer': forms.ModelChoiceField(queryset=Account.objects.filter(type=ACCOUNTING_ASSET)),
'supplier': forms.ModelChoiceField(queryset=Account.objects.filter(type=ACCOUNTING_LIABILITY)),
})
@register(category=TransactionCategory)
class AllAccounts(ViewMixin):
model = Account
name = _("All Accounts")
slug = "accounts"
@register(category=TransactionCategory)
class OpenTransactions(ViewMixin):
model = Transaction
name = _("Open transactions")
slug = "open"
def filter_queryset(self, request, queryset, view):
return queryset.filter(draft=True).order_by('-modified')
@register(category=TransactionCategory)
class ClosedTrancations(ViewMixin):
model = Transaction
name = _("Closed transactions")
slug = "closed"
date_resolution = "month"
def filter_queryset(self, request, queryset, view):
return queryset.filter(draft=False).order_by('modified')
@register(category=TransactionCategory)
class Archive(ViewMixin):
model = TransactionItem
name = _("Transaction archive")
slug = "archive"
date_resolution = "week"
| 1.984375 | 2 |
api/log.py | StonebreakerDesigns/project-boilerplate | 0 | 12797311 | # coding: utf-8
'''Centralized logger factory.'''
import logging
from .config import config
# The level options supported in configuration.
LEVEL_OPTIONS = list((
'notset', 'debug', 'info', 'warning', 'error', 'critical'
))
def _setup_logger_supply():
'''Create and return a logger generator.'''
configured_level = config.development.log_level
# Perform basic configuration.
logging.basicConfig(
level=20, # Configure 3rd party loggers to the INFO level.
format='%(asctime)-10s %(name)-30s %(levelname)-8s %(message)s'
)
def create_log(name):
'''Create a log and elevate it to the configured level.'''
log = logging.getLogger(name)
log.setLevel(LEVEL_OPTIONS.index(configured_level)*10)
return log
return create_log
# Define the callable that can be used to create properly configured loggers.
logger = _setup_logger_supply() # pylint: disable=invalid-name
| 2.515625 | 3 |
tests/test_utilities.py | mhostetter/quaternionic | 40 | 12797312 | import sys
import numpy as np
import quaternionic
import pytest
def test_self_return():
def f1(a, b, c):
d = np.asarray(a).copy()
assert isinstance(a, np.ndarray) and isinstance(a, quaternionic.array)
assert isinstance(b, np.ndarray) and isinstance(b, quaternionic.array)
assert isinstance(c, np.ndarray) and isinstance(c, quaternionic.array)
assert isinstance(d, np.ndarray) and not isinstance(d, quaternionic.array)
return d
a = quaternionic.array.random((17, 3, 4))
b = quaternionic.array.random((13, 3, 4))
c = quaternionic.array.random((11, 3, 4))
d1 = f1(a, b, c)
assert isinstance(d1, np.ndarray) and not isinstance(d1, quaternionic.array)
f2 = quaternionic.utilities.type_self_return(f1)
d2 = f2(a, b, c)
assert isinstance(d2, np.ndarray) and isinstance(d2, quaternionic.array)
f1.nin = 3
f3 = quaternionic.utilities.type_self_return(f1)
d3 = f3(a, b, c)
assert isinstance(d3, np.ndarray) and isinstance(d3, quaternionic.array)
def test_ndarray_args():
def f1(a, b, c):
d = np.asarray(a).copy()
assert isinstance(a, np.ndarray) and not isinstance(a, quaternionic.array)
assert isinstance(b, np.ndarray) and not isinstance(b, quaternionic.array)
assert isinstance(c, np.ndarray) and not isinstance(c, quaternionic.array)
assert isinstance(d, np.ndarray) and not isinstance(d, quaternionic.array)
return d
a = quaternionic.array.random((17, 3, 4))
b = quaternionic.array.random((13, 3, 4))
c = quaternionic.array.random((11, 3, 4))
f2 = quaternionic.utilities.ndarray_args(f1)
d2 = f2(a, b, c)
assert isinstance(d2, np.ndarray) and not isinstance(d2, quaternionic.array)
f1.nin = 3
f3 = quaternionic.utilities.ndarray_args(f1)
d3 = f3(a, b, c)
assert isinstance(d3, np.ndarray) and not isinstance(d3, quaternionic.array)
def test_ndarray_args_and_return():
def f1(a, b, c):
d = np.asarray(a).copy()
assert isinstance(a, np.ndarray) and not isinstance(a, quaternionic.array)
assert isinstance(b, np.ndarray) and not isinstance(b, quaternionic.array)
assert isinstance(c, np.ndarray) and not isinstance(c, quaternionic.array)
assert isinstance(d, np.ndarray) and not isinstance(d, quaternionic.array)
return d
a = quaternionic.array.random((17, 3, 4))
b = quaternionic.array.random((13, 3, 4))
c = quaternionic.array.random((11, 3, 4))
f2 = quaternionic.utilities.ndarray_args_and_return(f1)
d2 = f2(a, b, c)
assert isinstance(d2, np.ndarray) and isinstance(d2, quaternionic.array)
f1.nin = 3
f3 = quaternionic.utilities.ndarray_args_and_return(f1)
d3 = f3(a, b, c)
assert isinstance(d3, np.ndarray) and isinstance(d3, quaternionic.array)
@pytest.mark.skipif(sys.implementation.name.lower() == 'pypy', reason="No numba on pypy")
def test_types_to_ftylist():
import numba
types_to_ftylist = quaternionic.utilities.convert_numpy_ufunc_type_to_numba_ftylist
types = '?bhilqpBHILQPfdgF->D'
ftylist = numba.complex128(
numba.boolean,
numba.byte,
numba.short,
numba.intc,
numba.int_,
numba.longlong,
numba.intp,
numba.char,
numba.ushort,
numba.uintc,
numba.uint,
numba.ulonglong,
numba.uintp,
numba.float32,
numba.float_,
numba.double,
numba.complex64,
)
assert types_to_ftylist([types]) == [ftylist]
def test_pyguvectorize():
_quaternion_resolution = 10 * np.finfo(float).resolution
np.random.seed(1234)
one = quaternionic.array(1, 0, 0, 0)
x = quaternionic.array.random((7, 13, 4))
y = quaternionic.array.random((13, 4))
z = np.random.rand(13)
arg0s = [one, -(1+2*_quaternion_resolution)*one, -one, x]
for k in dir(quaternionic.algebra_ufuncs):
if not k.startswith('__'):
f1 = getattr(quaternionic.algebra_ufuncs, k)
f2 = getattr(quaternionic.algebra, k)
sig = f2.signature
inputs = sig.split('->')[0].split(',')
for arg0 in arg0s:
args = [arg0.ndarray] if inputs[0] == '(n)' else [z,]
if len(inputs) > 1:
args.append(y.ndarray if inputs[1] == '(n)' else z)
assert np.allclose(
f1(*args),
quaternionic.utilities.pyguvectorize(f2.types, f2.signature)(f2)(*args),
atol=0.0,
rtol=_quaternion_resolution
)
| 2.484375 | 2 |
docs/02.AI_ML/code-1805/Day05all/map.py | mheanng/PythonNote | 2 | 12797313 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
def f1(x):
return x + 3
a = 1
b = f1(a)
print(b)
A = [1, 2, 3]
'''
B = []
for x in A:
B.append(f1(x))
'''
B = list(map(f1, A))
print(B)
C = list(map(lambda x: x + 3, A))
print(C)
| 3.796875 | 4 |
001_TwoSum/two_sum.py | fangqian/LeetCodes | 0 | 12797314 | class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d = {}
for i in range(len(nums)):
find = target - nums[i]
if d.get(find, None) is None:
d[nums[i]] = i
else:
return [d[find], i]
class Solution1:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
tmp = {}
for i in range(len(num)):
if target - num[i] in tmp:
return([tmp[target - num[i]], i])
else:
tmp[num[i]] = i
nums = [2, 5, 2, 11, 15]
target = 4
a = Solution()
print(a.twoSum(nums, target))
b = Solution1()
print(b.twoSum(nums, target))
| 3.3125 | 3 |
day02/day02.py | andreaskaempf/adventofcode2021 | 0 | 12797315 | <gh_stars>0
# Advent of Code, Day 2
# State variables for problems 1 & 2
horiz1 = depth1 = 0
horiz2 = depth2 = aim2 = 0
# Process each line of text
for l in open('input.txt'):
# Get instruction and value
instruction, n = l.split()
n = int(n)
# Process instructions for both problems
if instruction == 'forward':
horiz1 += n
horiz2 += n
depth2 += aim2 * n
elif instruction == 'down':
depth1 += n
aim2 += n
elif instruction == 'up':
depth1 -= n
aim2 -= n
else:
print('Bad instruction:', instruction)
# Show results for both problems
print(f'Problem 1: Ending pos = {horiz1}, depth = {depth1}, product = {horiz1 * depth1}')
print(f'Problem 2: Ending pos = {horiz2}, depth = {depth2}, aim = {aim2}, product = {horiz2 * depth2}')
| 3.78125 | 4 |
runner.py | inXS212/Saltie | 0 | 12797316 | <gh_stars>0
import configparser
import ctypes
import io
import mmap
import msvcrt
import multiprocessing as mp
import os
import sys
import random
import time
import bot_input_struct as bi
import bot_manager
import game_data_struct as gd
import rlbot_exception
from bot_code.conversions.server_converter import ServerConverter
PARTICPANT_CONFIGURATION_HEADER = 'Participant Configuration'
PARTICPANT_BOT_KEY_PREFIX = 'participant_is_bot_'
PARTICPANT_RLBOT_KEY_PREFIX = 'participant_is_rlbot_controlled_'
PARTICPANT_CONFIG_KEY_PREFIX = 'participant_config_'
PARTICPANT_BOT_SKILL_KEY_PREFIX = 'participant_bot_skill_'
PARTICPANT_TEAM_PREFIX = 'participant_team_'
RLBOT_CONFIG_FILE = 'rlbot.cfg'
RLBOT_CONFIGURATION_HEADER = 'RLBot Configuration'
INPUT_SHARED_MEMORY_TAG = 'Local\\RLBotInput'
BOT_CONFIG_LOADOUT_HEADER = 'Participant Loadout'
BOT_CONFIG_LOADOUT_ORANGE_HEADER = 'Participant Loadout Orange'
BOT_CONFIG_MODULE_HEADER = 'Bot Location'
USER_CONFIGURATION_HEADER = 'User Info'
BOT_CONFIG_AGENT_HEADER = 'Bot Parameters'
try:
server_manager = ServerConverter('http://saltie.tk:5000', True, True, True, username='unknown')
except ImportError:
server_manager = ServerConverter('', False, False, False)
print('config.py not present, cannot upload replays to collective server')
print('Check Discord server for information')
if server_manager.error:
server_manager.warn_server('unable to connect to server')
def get_bot_config_file_list(botCount, config):
config_file_list = []
for i in range(botCount):
config_file_list.append(config.get(PARTICPANT_CONFIGURATION_HEADER, PARTICPANT_CONFIG_KEY_PREFIX + str(i)))
return config_file_list
# Cut off at 31 characters and handle duplicates
def get_sanitized_bot_name(dict, name):
if name not in dict:
new_name = name[:31] # Make sure name does not exceed 31 characters
dict[name] = 1
else:
count = dict[name]
new_name = name[:27] + "(" + str(count + 1) + ")" # Truncate at 27 because we can have up to '(10)' appended
dict[name] = count + 1
return new_name
def run_agent(terminate_event, callback_event, config_file, name, team, index, module_name, game_name, save_data, server_uploader):
bm = bot_manager.BotManager(terminate_event, callback_event, config_file, name, team,
index, module_name, game_name, save_data, server_uploader)
bm.run()
def main():
# Set up RLBot.cfg
framework_config = configparser.RawConfigParser()
framework_config.read(RLBOT_CONFIG_FILE)
# Open anonymous shared memory for entire GameInputPacket and map buffer
buff = mmap.mmap(-1, ctypes.sizeof(bi.GameInputPacket), INPUT_SHARED_MEMORY_TAG)
gameInputPacket = bi.GameInputPacket.from_buffer(buff)
# Determine number of participants
num_participants = framework_config.getint(RLBOT_CONFIGURATION_HEADER, 'num_participants')
try:
server_manager.set_player_username(framework_config.get(USER_CONFIGURATION_HEADER, 'username'))
except Exception as e:
print('username not set in config', e)
print('using default username')
# Retrieve bot config files
participant_configs = get_bot_config_file_list(num_participants, framework_config)
# Create empty lists
bot_names = []
bot_teams = []
bot_modules = []
processes = []
callbacks = []
bot_parameter_list = []
name_dict = dict()
save_data = True
save_path = os.path.join(os.getcwd(), 'bot_code', 'training', 'replays')
game_name = str(int(round(time.time() * 1000))) + '-' + str(random.randint(0, 1000))
if save_data:
print(save_path)
if not os.path.exists(save_path):
print(os.path.dirname(save_path) + ' does not exist creating')
os.makedirs(save_path)
joined_path = os.path.join(save_path, game_name)
if not os.path.exists(joined_path):
os.makedirs(joined_path)
print('gameName: ' + game_name + 'in ' + save_path)
gameInputPacket.iNumPlayers = num_participants
server_manager.load_config()
num_team_0 = 0
# Set configuration values for bots and store name and team
for i in range(num_participants):
bot_config_path = participant_configs[i]
sys.path.append(os.path.dirname(bot_config_path))
bot_config = configparser.RawConfigParser()
if server_manager.download_config:
if 'saltie' in os.path.basename(bot_config_path):
bot_config._read(io.StringIO(server_manager.config_response.json()['content']), 'saltie.cfg')
else:
bot_config.read(bot_config_path)
else:
bot_config.read(bot_config_path)
team_num = framework_config.getint(PARTICPANT_CONFIGURATION_HEADER,
PARTICPANT_TEAM_PREFIX + str(i))
loadout_header = BOT_CONFIG_LOADOUT_HEADER
if (team_num == 1 and bot_config.has_section(BOT_CONFIG_LOADOUT_ORANGE_HEADER)):
loadout_header = BOT_CONFIG_LOADOUT_ORANGE_HEADER
if gameInputPacket.sPlayerConfiguration[i].ucTeam == 0:
num_team_0 += 1
gameInputPacket.sPlayerConfiguration[i].bBot = framework_config.getboolean(PARTICPANT_CONFIGURATION_HEADER,
PARTICPANT_BOT_KEY_PREFIX + str(i))
gameInputPacket.sPlayerConfiguration[i].bRLBotControlled = framework_config.getboolean(
PARTICPANT_CONFIGURATION_HEADER,
PARTICPANT_RLBOT_KEY_PREFIX + str(i))
gameInputPacket.sPlayerConfiguration[i].fBotSkill = framework_config.getfloat(PARTICPANT_CONFIGURATION_HEADER,
PARTICPANT_BOT_SKILL_KEY_PREFIX
+ str(i))
gameInputPacket.sPlayerConfiguration[i].iPlayerIndex = i
gameInputPacket.sPlayerConfiguration[i].wName = get_sanitized_bot_name(name_dict,
bot_config.get(loadout_header, 'name'))
gameInputPacket.sPlayerConfiguration[i].ucTeam = team_num
gameInputPacket.sPlayerConfiguration[i].ucTeamColorID = bot_config.getint(loadout_header,
'team_color_id')
gameInputPacket.sPlayerConfiguration[i].ucCustomColorID = bot_config.getint(loadout_header,
'custom_color_id')
gameInputPacket.sPlayerConfiguration[i].iCarID = bot_config.getint(loadout_header, 'car_id')
gameInputPacket.sPlayerConfiguration[i].iDecalID = bot_config.getint(loadout_header, 'decal_id')
gameInputPacket.sPlayerConfiguration[i].iWheelsID = bot_config.getint(loadout_header, 'wheels_id')
gameInputPacket.sPlayerConfiguration[i].iBoostID = bot_config.getint(loadout_header, 'boost_id')
gameInputPacket.sPlayerConfiguration[i].iAntennaID = bot_config.getint(loadout_header, 'antenna_id')
gameInputPacket.sPlayerConfiguration[i].iHatID = bot_config.getint(loadout_header, 'hat_id')
gameInputPacket.sPlayerConfiguration[i].iPaintFinish1ID = bot_config.getint(loadout_header,
'paint_finish_1_id')
gameInputPacket.sPlayerConfiguration[i].iPaintFinish2ID = bot_config.getint(loadout_header,
'paint_finish_2_id')
gameInputPacket.sPlayerConfiguration[i].iEngineAudioID = bot_config.getint(loadout_header,
'engine_audio_id')
gameInputPacket.sPlayerConfiguration[i].iTrailsID = bot_config.getint(loadout_header, 'trails_id')
gameInputPacket.sPlayerConfiguration[i].iGoalExplosionID = bot_config.getint(loadout_header,
'goal_explosion_id')
if bot_config.has_section(BOT_CONFIG_AGENT_HEADER):
try:
bot_parameter_list.append(bot_config[BOT_CONFIG_AGENT_HEADER])
except Exception as e:
bot_parameter_list.append(None)
print('failed to load bot parameters')
else:
bot_parameter_list.append(None)
bot_names.append(bot_config.get(loadout_header, 'name'))
bot_teams.append(framework_config.getint(PARTICPANT_CONFIGURATION_HEADER, PARTICPANT_TEAM_PREFIX + str(i)))
if gameInputPacket.sPlayerConfiguration[i].bRLBotControlled:
bot_modules.append(bot_config.get(BOT_CONFIG_MODULE_HEADER, 'agent_module'))
else:
bot_modules.append('NO_MODULE_FOR_PARTICIPANT')
# downloads the model based on the hash in the config
try:
server_manager.load_model(bot_config[BOT_CONFIG_AGENT_HEADER]['model_hash'])
except Exception as e:
print ("Couldn't get model hash,", e)
server_manager.set_player_amount(num_participants, num_team_0)
# Create Quit event
quit_event = mp.Event()
# Launch processes
for i in range(num_participants):
if gameInputPacket.sPlayerConfiguration[i].bRLBotControlled:
callback = mp.Event()
callbacks.append(callback)
process = mp.Process(target=run_agent,
args=(quit_event, callback, bot_parameter_list[i],
str(gameInputPacket.sPlayerConfiguration[i].wName),
bot_teams[i], i, bot_modules[i], save_path + '\\' + game_name,
save_data, server_manager))
process.start()
print("Successfully configured bots. Setting flag for injected dll.")
gameInputPacket.bStartMatch = True
# Wait 100 milliseconds then check for an error code
time.sleep(0.1)
game_data_shared_memory = mmap.mmap(-1, ctypes.sizeof(gd.GameTickPacketWithLock),
bot_manager.OUTPUT_SHARED_MEMORY_TAG)
bot_output = gd.GameTickPacketWithLock.from_buffer(game_data_shared_memory)
if not bot_output.iLastError == 0:
# Terminate all process and then raise an exception
quit_event.set()
terminated = False
while not terminated:
terminated = True
for callback in callbacks:
if not callback.is_set():
terminated = False
raise rlbot_exception.RLBotException().raise_exception_from_error_code(bot_output.iLastError)
print("Press any character to exit")
msvcrt.getch()
print("Shutting Down")
quit_event.set()
# Wait for all processes to terminate before terminating main process
terminated = False
while not terminated:
terminated = True
for callback in callbacks:
if not callback.is_set():
terminated = False
if __name__ == '__main__':
main()
| 2.0625 | 2 |
src/features/FER.py | pandov/myitacademy | 1 | 12797317 | import torchvision
from ..paths import PATH_DATA_PROCESSED
class FER(torchvision.datasets.ImageFolder):
def __init__(self, num_classes: int, **kwargs):
kwargs['root'] = PATH_DATA_PROCESSED.joinpath(f'FER{num_classes}').as_posix()
super().__init__(**kwargs)
| 2.203125 | 2 |
Difficulty/Medium/16.3-sum-closest.py | ryderfang/LeetCode | 1 | 12797318 | #
# @lc app=leetcode id=16 lang=python3
#
# [16] 3Sum Closest
#
# @lc code=start
from typing import List
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
ans = 0x7fffffff
sz = len(nums)
nums.sort()
for i in range(sz-2):
if i > 0 and nums[i] == nums[i-1]:
continue
p = i + 1
q = sz - 1
b = True
while p < q:
tmp = nums[i] + nums[p] +nums[q]
if abs(tmp - target) < abs(ans - target):
ans = tmp
if tmp == target:
break
elif tmp > target:
q -= 1
else:
p += 1
if ans == target:
break
return ans
# @lc code=end
| 3.109375 | 3 |
yetl/metaconf/__init__.py | semanticinsight/yetl-framework | 0 | 12797319 | from ._dataset import DataSet
from ._datastore import DataStore
from ._environment import Environment
from ._project import Project
from ._secret_store import SecretStore
from ._spark import _Spark, Spark
from ._type_mapping import TypeMapping
from ._exceptions import *
__all__ = [
"DataSet",
"DataStore",
"Environment",
"Project",
"SecretStore",
"_Spark",
"Spark",
"TypeMapping",
"ProjectVersionInvalid",
"ProjectDirectoryNotSet",
"ProjectDirectoryNotExists",
]
| 1.296875 | 1 |
cogs/onload.py | paulranshaw/Discord-Bot | 0 | 12797320 | <filename>cogs/onload.py<gh_stars>0
import discord
import os
from discord.errors import ClientException
import dotenv
import logging
import asyncio
from discord.ext import commands, tasks
from dotenv import load_dotenv
from itertools import cycle
client = discord.Client()
class Onload(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print('Bot is online.')
def setup(client):
client.add_cog(Onload(client)) | 2.484375 | 2 |
tests/impls/gen_detector_prop_sal/test_drise_scoring.py | vbhavank/xaitk-saliency | 0 | 12797321 | from unittest import TestCase
import numpy as np
import os
from xaitk_saliency.impls.gen_detector_prop_sal.drise_scoring import DetectorRISE
from xaitk_saliency import GenerateDetectorProposalSaliency
from smqtk_core.configuration import configuration_test_helper
from tests import DATA_DIR, EXPECTED_MASKS_4x6
class TestSimilarityScoring (TestCase):
def test_init_(self) -> None:
"""
Test if implementation is usable.
"""
impl = DetectorRISE()
assert impl.is_usable() and isinstance(impl, GenerateDetectorProposalSaliency)
def test_default_param(self) -> None:
"""
Test default construction.
"""
impl = DetectorRISE()
assert impl.proximity_metric == 'cosine'
def test_get_config(self) -> None:
"""
Test expected configuation behavior.
"""
impl = DetectorRISE('euclidean')
for i in configuration_test_helper(impl):
assert i.proximity_metric == 'euclidean'
def test_metric_args(self) -> None:
"""
Test non-default metric type.
"""
impl = DetectorRISE('hamming')
assert impl.proximity_metric == 'hamming'
def test_shape_sanity(self) -> None:
"""
Test basic scoring with a single feature for broadcasting sanity check.
"""
impl = DetectorRISE()
np.random.seed(2)
image1_dets = np.random.rand(2, (7))
pertb_dets = np.random.rand(10, 2, (7))
pertb_mask = np.random.randint(low=0, high=2, size=(10, 15, 25), dtype='int')
sal = impl.generate(image1_dets, pertb_dets, pertb_mask)
assert sal.shape == (2, 15, 25)
def test_standard_detection(self) -> None:
"""
Test basic scoring on known values and non-square masks.
"""
impl = DetectorRISE()
image1_dets = np.array([[1, 1, 4, 3, 0, 1, 0.89]])
pertb_dets = np.array([[[1, 2, 6, 6, 0.3, 1, 0.995]],
[[0, 1, 2, 2, 0.2, 2, 0.03]],
[[1, 0, 2, 2, 0.45, 1, 0.81]],
[[1, 1, 6, 6, 0.5, 1, 0.625]],
[[0, 2, 3, 5, 0.03, 1, 0.56]],
[[1, 2, 6, 3, 0.01, 1, 0.07]],])
sal = impl.generate(image1_dets, pertb_dets, EXPECTED_MASKS_4x6)
standard_sal = np.load(os.path.join(DATA_DIR, 'drisesal.npy'))
assert sal.shape == (1, 4, 6)
assert np.allclose(standard_sal, sal)
| 2.28125 | 2 |
AA_FOOTBALL/GuessLeverage.py | fattail/- | 0 | 12797322 | <filename>AA_FOOTBALL/GuessLeverage.py
import pandas as pd
import numpy as np
import os as os
def GetLverage(v_win,v_tie,v_los,bet_amt,keep_side):
AllocateList = pd.read_csv(r'./data/cmb_list.csv',index_col=None)
AllocateList['胜'] = (AllocateList['胜']*bet_amt)/100
AllocateList['平'] = (AllocateList['平']*bet_amt)/100
AllocateList['负'] = (AllocateList['负']*bet_amt)/100
AllocateList['胜入'] = AllocateList['胜']*(v_win+1)
AllocateList['平入'] = AllocateList['平']*(v_tie+1)
AllocateList['负入'] = AllocateList['负']*(v_los+1)
AllocateList['总出'] = AllocateList['胜'] + AllocateList['平'] + AllocateList['负']
AllocateList['胜利'] = AllocateList['胜入'] - AllocateList['总出']
AllocateList['平利'] = AllocateList['平入'] - AllocateList['总出']
AllocateList['负利'] = AllocateList['负入'] - AllocateList['总出']
if keep_side == '留胜':
OPTAllocateList = AllocateList.loc[(AllocateList['平利']>0) & (AllocateList['负利']>0),]
OPTAllocateList['最小胜'] = (OPTAllocateList['总出']/OPTAllocateList['胜']) - 1
OPTAllocateList['杠杆差'] = OPTAllocateList['最小胜'] - v_win
return OPTAllocateList.loc[OPTAllocateList['最小胜'] == OPTAllocateList['最小胜'].min(),]
elif keep_side == '留平':
OPTAllocateList = AllocateList.loc[(AllocateList['胜利']>0) & (AllocateList['负利']>0),]
OPTAllocateList['最小平'] = (OPTAllocateList['总出']/OPTAllocateList['平']) - 1
OPTAllocateList['杠杆差'] = OPTAllocateList['最小平'] - v_tie
return OPTAllocateList.loc[OPTAllocateList['最小平'] == OPTAllocateList['最小平'].min(),]
elif keep_side == '留负':
OPTAllocateList = AllocateList.loc[(AllocateList['平利']>0) & (AllocateList['胜利']>0),]
OPTAllocateList['最小负'] = (OPTAllocateList['总出']/OPTAllocateList['负']) - 1
OPTAllocateList['杠杆差'] = OPTAllocateList['最小负'] - v_los
return OPTAllocateList.loc[OPTAllocateList['最小负'] == OPTAllocateList['最小负'].min(),]
# 波黑-波兰
GetArbitrage(1.55,2.125,1.875,100,'留胜') # 2.125
GetArbitrage(1.55,2.125,1.875,100,'留平') # 3.0
GetArbitrage(1.55,2.125,1.875,100,'留负') # 2.7
| 2.59375 | 3 |
高频120_Lint/Regular Expression Matching.py | lixiaoruiusa/Rui7272 | 0 | 12797323 | class Solution:
"""
@param s: A string
@param p: A string includes "." and "*"
@return: A boolean
@ '.'匹配任意一个字母。'*'匹配零个或者多个前面的元素。
@ conner case ??
@ Time | Space
"""
def isMatch(self, s, p):
# 长为S+1 高为p+1 的二维矩阵
dp = [[False] * (len(s) + 1) for _ in range(len(p) + 1)]
dp[0][0] = True
for i in range(1, len(p)):
dp[i + 1][0] = dp[i - 1][0] and p[i] == '*'
for i in range(len(p)):
for j in range(len(s)):
if p[i] == '*':
dp[i + 1][j + 1] = dp[i - 1][j + 1] or dp[i][j + 1]
if p[i - 1] == s[j] or p[i - 1] == '.':
dp[i + 1][j + 1] |= dp[i + 1][j]
else:
dp[i + 1][j + 1] = dp[i][j] and (p[i] == s[j] or p[i] == '.')
return dp[-1][-1]
| 3.703125 | 4 |
src/vis.py | atreyasha/lfw-faces-rgan | 0 | 12797324 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import re
import os
import glob
import imageio
import argparse
import subprocess
import numpy as np
from tqdm import tqdm
from PIL import Image
from pygifsicle import optimize
from obj.arg_formatter import arg_metav_formatter
def sorted_alphanumeric(data):
"""
Function to sort number-containing strings
Args:
data (list): list of strings to sort
Returns:
(list): sorted list
"""
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(data, key=alphanum_key)
def make_plot(direct, number_ticks):
"""
Function to plot values from log csv file
Args:
direct (str): base directory of logged model
number_ticks (int): number of ticks to have on graph
"""
direct = re.sub(r"(\/)?$", "", direct)
direct = re.sub(r"(\.\/)?pickles\/", "", direct)
directLong = "./pickles/" + direct
if not os.path.isdir(directLong):
sys.exit(directLong + " does not exist")
# make vis directory within log directory
os.makedirs(directLong + "/vis", exist_ok=True)
subprocess.call(
["Rscript", "gg.R", "-d", directLong, "-t",
str(number_ticks)])
def make_gif(direct,
shrink_factor=4,
skip_rate=2,
interval=0.1,
until=None,
progress_bar=False):
"""
Function to create gif from images
Args:
direct (str): base directory of logged model
shrink_factor (int): factor by which to downsample images
skip_rate (int): interval to images to use for gif
interval (float): temporal interval for gif construction or speed
until (int): upper limit for epoch to be used in gif construction
progress_bar (bool): True if progress bar should be added to gif
"""
print("creating training evolution gif")
# clean up directory input
direct = re.sub(r"(\/)?$", "", direct)
direct = re.sub(r"(\.\/)?pickles\/", "", direct)
directLong = "./pickles/" + direct
if not os.path.isdir(directLong):
sys.exit(directLong + " does not exist")
# get sorted image list
sorted_list = sorted_alphanumeric(glob.glob(directLong + "/img/*png"))
# assume all images are of same size
size = Image.open(sorted_list[0]).size
new_size = tuple([int(el / shrink_factor) for el in size])
if isinstance(until, int):
sorted_list = sorted_list[:until]
sorted_list = [
Image.open(img).resize(new_size, Image.ANTIALIAS)
for i, img in enumerate(tqdm(sorted_list))
if ((i + 1) % skip_rate == 0 or i == 0)
]
kargs = {'duration': interval}
imageio.mimsave(directLong + "/vis/vis.gif", sorted_list, **kargs)
optimize(directLong + "/vis/vis.gif", directLong + "/vis/vis.gif")
if progress_bar:
print("adding progress bar to gif")
output = subprocess.call("cat " + directLong + "/vis/vis.gif" +
" | gif-progress --bar-color '#000'" + " > " +
directLong + "/vis/out.gif",
shell=True)
if output != 0:
sys.exit("error occurred with gif progress bar, do manual check")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=arg_metav_formatter)
required = parser.add_argument_group("required name arguments")
required.add_argument("--log-dir",
type=str,
required=True,
help="base directory within pickles from which to" +
" visualize")
parser.add_argument("--number-ticks",
type=int,
default=10,
help="number of x-axis ticks to use in main plots")
parser.add_argument("--create-gif",
default=False,
action="store_true",
help="option to activate gif creation")
parser.add_argument("--shrink-factor",
type=int,
default=4,
help="shrinking factor for images, applies only" +
" when --create-gif is supplied")
parser.add_argument("--skip-rate",
type=int,
default=2,
help="skip interval when using images to construct" +
" gif applies only when --create-gif is supplied")
parser.add_argument("--interval",
type=float,
default=0.1,
help="time interval when constructing gifs from" +
" images, applies only when --create-gif is supplied")
parser.add_argument("--until",
type=int,
default=None,
help="set upper epoch limit for gif creation," +
" applies only when --create-gif is supplied")
parser.add_argument(
"--progress-bar",
default=False,
action="store_true",
help="option to add progress bar to gifs, applies" +
"only when --create-gif is supplied; check readme for" +
" additional go package installation instructions")
args = parser.parse_args()
# make plot
make_plot(args.log_dir, args.number_ticks)
# if necessary, make gif
if args.create_gif:
make_gif(args.log_dir, args.shrink_factor, args.skip_rate,
args.interval, args.until, args.progress_bar)
| 2.46875 | 2 |
djangoerp/registration/forms.py | xarala221/django-erp | 345 | 12797325 | <filename>djangoerp/registration/forms.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.5'
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from djangoerp.core.forms.auth import UserForm
class UserRegistrationForm(UserForm):
"""Form for user registration.
"""
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
# Improved security.
if hasattr(self.fields, 'is_admin'): self.fields.pop('is_admin')
if hasattr(self.fields, 'is_staff'): self.fields.pop('is_staff')
if hasattr(self.fields, 'is_active'): self.fields.pop('is_active')
if hasattr(self.fields, 'is_superuser'): self.fields.pop('is_superuser')
if hasattr(self.fields, 'groups'): self.fields.pop('groups')
if hasattr(self.fields, 'user_permissions'): self.fields.pop('user_permissions')
| 1.921875 | 2 |
DailyCodingProblem/147_Others_Sort_List_Using_Given_Reverse_Method.py | RafayAK/CodingPrep | 5 | 12797326 | """
Given a list, sort it using this method: reverse(lst, i, j), which reverses lst from i to j.
"""
def reverse(lst, i, j):
lst[i: j] = lst[i: j][::-1]
def sort_with_reverse(lst:list):
iterator = 0
while iterator < len(lst)-1:
smallest_value = min(lst[iterator:])
index_smallest = lst.index(smallest_value, iterator, len(lst))
reverse(lst, iterator, index_smallest+1)
iterator += 1
return lst
if __name__ == '__main__':
l = [3, 2, 4, 1]
assert sort_with_reverse(l) == [1, 2, 3, 4]
l = [5, 2, 3, 4, 5, 6, 2]
assert sort_with_reverse(l) == [2, 2, 3, 4, 5, 5, 6] | 4.1875 | 4 |
pyflsk.py | swathiprabhu3/SwakshaGadgetShop | 0 | 12797327 | <reponame>swathiprabhu3/SwakshaGadgetShop
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://root:@localhost/shoppingwebapp"
db = SQLAlchemy(app)
class Contacts(db.Model):
''' sno name email mes date '''
sno = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(25), nullable=False)
email = db.Column(db.String(20), nullable=False)
mes = db.Column(db.String(120), nullable=False)
date = db.Column(db.String(12), nullable=True)
class Checkout(db.Model):
sno = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(25), nullable=False)
phone = db.Column(db.String(20), nullable=False)
email = db.Column(db.String(20), nullable=False)
country = db.Column(db.String(12), nullable=False)
address= db.Column(db.String(120), nullable=False)
postcode= db.Column(db.String(20), nullable=False)
city= db.Column(db.String(20), nullable=False)
date = db.Column(db.String(12), nullable=True)
class Register(db.Model):
sno = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(25), nullable=False)
phone = db.Column(db.Integer, nullable=True)
email = db.Column(db.String(20), nullable=False)
password=db.Column(db.String(20),nullable=False)
class Payment(db.Model):
sno = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(25), nullable=False)
card = db.Column(db.Integer, nullable=True)
cvv = db.Column(db.Integer, nullable=False)
class Specification(db.Model):
sno = db.Column(db.Integer, primary_key=True)
size = db.Column(db.Integer, nullable=False)
color = db.Column(db.String(25), nullable=True)
delivery = db.Column(db.String(25), nullable=False)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/cart')
def cart():
return render_template('cart.html')
@app.route('/checkout', methods= ['GET', 'POST'])
def check():
if(request.method=='POST'):
name=request.form.get('name')
phone=request.form.get('phone')
email=request.form.get('email')
country=request.form.get('country')
address=request.form.get('address')
postcode=request.form.get('postcode')
city=request.form.get('city')
entry=Checkout(name=name,phone=phone,email=email, country=country,address=address,postcode=postcode,city=city,date=datetime.now())
db.session.add(entry)
db.session.commit()
return render_template('checkout.html')
@app.route("/register", methods=["GET", "POST"])
def register():
if(request.method=='POST'):
name=request.form.get('name')
email=request.form.get('email')
password=request.form.get('password')
phone=request.form.get('phone')
entry=Register(name=name,email=email,password=password,phone=phone)
db.session.add(entry)
db.session.commit()
return render_template("register.html")
@app.route('/specification',methods=['GET','POST'])
def specification():
if(request.method=='POST'):
size=request.form.get('size')
color=request.form.get('color')
delivery=request.form.get('delivery')
entry=Specification(size=size,color=color,delivery=delivery)
db.session.add(entry)
db.session.commit()
return render_template('specification.html')
@app.route('/payment',methods=['GET','POST'])
def payment():
if(request.method=='POST'):
name=request.form.get('name')
card=request.form.get('card')
cvv=request.form.get('cvv')
entry=Payment(name=name,card=card,cvv=cvv)
db.session.add(entry)
db.session.commit()
return render_template('payment.html')
@app.route('/category',methods=['GET','POST'])
def category():
return render_template('category.html')
@app.route('/product_detail')
def productdetail():
return render_template('product_detail.html')
@app.route('/search')
def search():
return render_template('search.html')
@app.route('/contact', methods= ['GET', 'POST'])
def contact():
if(request.method=='POST'):
'''add entry to database'''
name=request.form.get('name')
email=request.form.get('email')
message=request.form.get('msg')
entry=Contacts(name=name,email=email, mes=message,date=datetime.now())
db.session.add(entry)
db.session.commit()
return render_template('contact.html')
@app.route('/product')
def product():
return render_template('product.html')
app.run(debug=True)
| 2.28125 | 2 |
service/models.py | YuraPogorelov/auto-blog-pyhon | 0 | 12797328 | <gh_stars>0
from django.db import models
from django.urls import reverse
# Create your models here.
class Category(models.Model):
"""Кактегория услуг"""
name = models.CharField('Название категории', max_length=120)
slug = models.SlugField('URL', max_length=120)
text = models.TextField('Текст категории')
banner = models.ImageField('Баннер', upload_to='images/', blank=True, null=True)
title = models.CharField('Title', max_length=120)
description = models.CharField('Description', max_length=120)
keywords = models.CharField('Keywords', max_length=250)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('service_category', kwargs={'slug': self.slug})
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
class Model(models.Model):
"""Модель из категории"""
category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True, verbose_name='Выбор категории')
name = models.CharField('Название услуги', max_length=120)
slug = models.SlugField('URL', max_length=120, default='', unique=True)
text = models.TextField('Текст модели', default='')
header = models.CharField('Заголовок', max_length=240, blank=True, null=True)
sub_header = models.CharField('Подзаголовок', max_length=240, blank=True, null=True)
images = models.ImageField('Картинка техники', upload_to='images/', blank=True, null=True)
active = models.BooleanField('Опубликовать', default=True)
title = models.CharField('Title', max_length=120)
description = models.CharField('Description', max_length=120)
keywords = models.CharField('Keywords', max_length=250)
sort = models.PositiveIntegerField('Порядок', default=0, unique=True)
banner = models.ImageField('Баннер', upload_to='images/', blank=True, null=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('service_model_view', kwargs={'category': self.category.slug, 'slug': self.slug})
class Meta:
verbose_name = 'Модель'
verbose_name_plural = 'Модели' | 2.296875 | 2 |
winning_ticket.py | GYosifov88/Python-Fundamentals | 0 | 12797329 | <reponame>GYosifov88/Python-Fundamentals<filename>winning_ticket.py
def additional_func(partition):
current_max_num = 0
special_char = ''
for ch in partition:
if ch != special_char:
if current_max_num >= 6:
break
current_max_num = 1
special_char = ch
else:
current_max_num += 1
return [current_max_num, special_char]
def ticket_validator(ticket):
ticket_condition = ''
if len(ticket) != 20:
ticket_condition = "invalid ticket"
elif ticket[0] * 20 == ticket and ticket[0] in '@#$^':
ticket_condition = f'ticket "{ticket}" - 10{ticket[0]} Jackpot!'
else:
data_source = ''
if additional_func(ticket[0:10]) > additional_func(ticket[10:]):
data_source = additional_func(ticket[10:])
else:
data_source = additional_func(ticket[0:10])
number_of_special_signs = data_source[0]
special_sign = data_source[1]
if number_of_special_signs < 6 or special_sign not in '@#$^':
ticket_condition = f'ticket "{ticket}" - no match'
else:
ticket_condition = f'ticket "{ticket}" - {number_of_special_signs}{special_sign}'
return ticket_condition
def winning_ticket(data):
for ticket in data:
print(ticket_validator(ticket))
tickets_info = input()
data = [x.strip() for x in tickets_info.split(',')]
winning_ticket(data)
| 3.546875 | 4 |
migrations/versions/2021_102612_bbedc353f90c_.py | fareszr/app | 0 | 12797330 | <gh_stars>0
"""empty message
Revision ID: bbedc353f90c
Revises: d67eab226ecd
Create Date: 2021-10-26 12:05:38.840492
"""
import sqlalchemy_utils
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bbedc353f90c'
down_revision = 'd67eab226ecd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('client_referral_id_fkey', 'client', type_='foreignkey')
op.create_foreign_key(None, 'client', 'referral', ['referral_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'client', type_='foreignkey')
op.create_foreign_key('client_referral_id_fkey', 'client', 'referral', ['referral_id'], ['id'])
# ### end Alembic commands ###
| 1.359375 | 1 |
neuralprocesses/util.py | tom-andersson/neuralprocesses | 0 | 12797331 | <reponame>tom-andersson/neuralprocesses
from functools import wraps
import lab as B
import numpy as np
from lab.util import resolve_axis
from . import _dispatch
__all__ = [
"is_framework_module",
"modules",
"register_module",
"models",
"register_model",
"composite_coders",
"register_composite_coder",
"is_composite_coder",
"wrapped_partial",
"is_nonempty",
"batch",
"compress_batch_dimensions",
"split",
"split_dimension",
"merge_dimensions",
"select",
"with_first_last",
]
@_dispatch
def is_framework_module(x):
"""Check if something is a framework module.
Args:
x (object): Object to check.
Returns:
bool: `True` if `x` is a framework module, else `False`.
"""
return False
modules = [] #: Registered modules
def register_module(module):
"""Decorator to register a new module."""
modules.append(module)
return module
models = [] #: Registered models
def register_model(model):
"""Decorator to register a new model."""
models.append(model)
return model
composite_coders = [] #: Composite coders
def register_composite_coder(coder):
"""Decorator to register a composite coder."""
composite_coders.append(coder)
return coder
def is_composite_coder(coder):
"""Check if a coder is composite.
Args:
coder (coder): Coder.
Returns:
bool: Coder is composite.
"""
return any([isinstance(coder, c) for c in composite_coders])
def wrapped_partial(f, *partial_args, **partial_kw_args):
"""Like :func:`functools.partial`, but preserves the docstring.
Args:
f (function): Function to wrap.
*partial_args: Partial arguments.
**partial_kw_args: Partial keyword arguments.
Returns:
function: Version of `f` with some arguments and keyword arguments already set.
"""
@wraps(f)
def wrapped_f(*args, **kw_args):
return f(*partial_args, *args, **partial_kw_args, **kw_args)
return wrapped_f
def is_nonempty(x):
"""Check if a tensor is not empty.
Args:
x (tensor): Tensor.
Returns:
bool: `True` if `x` is not empty, otherwise `False`.
"""
return all([i > 0 for i in B.shape(x)])
def batch(x, other_dims):
"""Get the shape of the batch of a tensor.
Args:
x (tensor): Tensor.
other_dims (int): Number of non-batch dimensions.
Returns:
tuple[int]: Shape of batch dimensions.
"""
return B.shape(x)[:-other_dims]
def compress_batch_dimensions(x, other_dims):
"""Compress multiple batch dimensions of a tensor into a single batch dimension.
Args:
x (tensor): Tensor to compress.
other_dims (int): Number of non-batch dimensions.
Returns:
tensor: `x` with batch dimensions compressed.
function: Function to undo the compression of the batch dimensions.
"""
b = batch(x, other_dims)
if len(b) == 1:
return x, lambda x: x
else:
def uncompress(x_after):
return B.reshape(x_after, *b, *B.shape(x_after)[1:])
return B.reshape(x, int(np.prod(b)), *B.shape(x)[len(b) :]), uncompress
def split(z, sizes, axis):
"""Split a tensor into multiple tensors.
Args:
z (tensor): Tensor to split.
sizes (iterable[int]): Sizes of the components.
axis (int): Axis.
Returns:
list[tensor]: Components of the split.
"""
axis = resolve_axis(z, axis)
index = [slice(None, None, None)] * B.rank(z)
components = []
i = 0
for size in sizes:
index[axis] = slice(i, i + size, None)
components.append(z[tuple(index)])
i += size
return components
def split_dimension(z, axis, sizes):
"""Split a dimension of a tensor into multiple dimensions.
Args:
z (tensor): Tensor to split.
axis (int): Axis to split
sizes (iterable[int]): Sizes of new dimensions.
Returns:
tensor: Reshaped version of `z`.
"""
shape = B.shape(z)
# The indexing below will only be correct for positive `axis`, so resolve the index.
axis = resolve_axis(z, axis)
return B.reshape(z, *shape[:axis], *sizes, *shape[axis + 1 :])
def merge_dimensions(z, axis, sizes):
"""Merge dimensions of a tensor into one dimension. This operation is the opposite
of :func:`split_dimension`.
Args:
z (tensor): Tensor to merge.
axis (int): Axis to merge into.
sizes (iterable[int]): Sizes of dimensions to merge.
Returns:
tensor: Reshaped version of `z`.
"""
shape = B.shape(z)
# The indexing below will only be correct for positive `axis`, so resolve the index.
axis = resolve_axis(z, axis)
return B.reshape(
z,
*shape[: axis - len(sizes) + 1],
np.prod(sizes),
*shape[axis + 1 :],
)
def select(z, i, axis):
"""Select a particular index `i` at axis `axis` without squeezing the tensor.
Args:
z (tensor): Tensor to select from.
i (int): Index to select.
axis (int): Axis to select from.
Returns:
tensor: Selection from `z`.
"""
axis = resolve_axis(z, axis)
index = [slice(None, None, None) for _ in range(B.rank(z))]
index[axis] = slice(i, i + 1, None)
return z[index]
def with_first_last(xs):
"""Return a generator which indicates whether the returned element is the first or
last.
Args:
xs: Generator to wrap.
Yields:
bool: Element is first.
bool: Element is last.
object: Element.
"""
state = {"first": True}
def first():
if state["first"]:
state["first"] = False
return True
else:
return False
prev = None
have_prev = False
cur = None
have_cur = False
for x in xs:
cur = x
have_cur = True
if not have_prev:
# We will need a `prev`, but there is no `prev` yet. Take the current one as
# `prev` and skip to the next iteration.
prev = cur
have_prev = True
continue
# We currently have available `prev` and `cur`. We will return `prev` and,
# after the loop has finished, return `cur` as the last one.
yield first(), False, prev
prev = cur
if have_cur:
yield first(), True, cur
| 2.28125 | 2 |
word2vec/wordvectors.py | fangyw/word2vec | 1 | 12797332 | <filename>word2vec/wordvectors.py<gh_stars>1-10
# coding: utf-8
import numpy as np
from word2vec.utils import unitvec
class WordVectors(object):
def __init__(self, vocab=None, vectors=None, saveMemory=True):
self.vocab = vocab
if not saveMemory:
self.vectors = vectors
self.l2norm = np.vstack(unitvec(vec) for vec in vectors)
def ix(self, word):
'''
Returns the index on self.vocab and self.l2norm for `word`
'''
temp = np.where(self.vocab == word)[0]
if temp.size == 0:
raise KeyError('Word not in vocabulary')
else:
return temp[0]
def get_vector(self, word):
'''
Returns the (l2norm) vector for `word` in the vocabulary
'''
idx = self.ix(word)
return self.l2norm[idx]
def __getitem__(self, word):
return self.get_vector(word)
def generate_response(self, indexes, metric, exclude=''):
'''
Generates a response as a list of tuples based on the indexes
Each tuple is: (vocab[i], metric[i])
'''
if isinstance(exclude, basestring):
exclude = [exclude]
return [(word, sim) for word, sim in zip(self.vocab[indexes], metric[indexes]) if word not in exclude]
def cosine(self, words, n=10):
'''
Cosine similarity.
metric = dot(l2norm_of_vectors, l2norm_of_target_vector)
Uses a precomputed l2norm of the vectors
Parameters
----------
words : string or list of string
word(s) in the vocabulary to calculate the vectors
n : int, optional (default 10)
number of neighbors to return
Returns
-------
dict: with the n similar words and its similarity as a list of tuples
Example
-------
>>> model.cosine('black', n=2)
```
```
{'black': [('white', 0.94757425919916516),
('yellow', 0.94640807944950878)]
}
'''
if isinstance(words, basestring):
words = [words]
targets = np.vstack((self.get_vector(word) for word in words))
metrics = np.dot(self.l2norm, targets.T)
ans = {}
for col, word in enumerate(words):
best = np.argsort(metrics[:, col])[::-1][:n + 1]
best = self.generate_response(best, metrics[:, col], exclude=word)
ans[word] = best
return ans
def _cosine(self, word, n=10):
'''
Cosine distance using scipy.distance.cosine
Note: This method is **a lot** slower than `self.cosine`
and results are the almost the same, really just use `self.cosine`
This is just available for testing.
Requires: `__init__(..., saveMemory=False)`
Parameters
----------
word : string
word in the vocabulary to calculate the vectors
n : int, optional (default 10)
number of neighbors to return
'''
from scipy.spatial import distance
target_vec = self[word]
metric = np.empty(self.vocab.shape)
for idx, vector in enumerate(self.vectors):
metric[idx] = distance.cosine(target_vec, vector)
best = metric.argsort()[:n + 1]
return self.generate_response(best, metric, exclude=word)
def analogy(self, pos, neg, n=10):
'''
Analogy similarity.
Parameters
----------
pos : list
neg : list
Returns
-------
List of tuples, each tuple is (word, similarity)
Example
-------
`king - man + woman = queen` will be:
`pos=['king', 'woman'], neg=['man']`
'''
words = pos + neg
pos = [(word, 1.0) for word in pos]
neg = [(word, -1.0) for word in neg]
mean = []
for word, direction in pos + neg:
mean.append(direction * unitvec(self.get_vector(word)))
mean = np.array(mean).mean(axis=0)
similarities = np.dot(self.l2norm, mean)
best = similarities.argsort()[::-1][:n + len(words) - 1]
return self.generate_response(best, similarities, exclude=words)
| 3.046875 | 3 |
tests/features/test_evaluate_constant_expression.py | TomPretty/calculator | 0 | 12797333 | <reponame>TomPretty/calculator
from calculator import evaluate
def test_evaluating_a_constant_expression():
source = "3"
assert evaluate(source) == 3
| 2.8125 | 3 |
bot/short.py | dvdrm/gd | 14 | 12797334 | <reponame>dvdrm/gd
from telethon import events, Button
from .utils import split_list, press_event, cmd
from asyncio import exceptions
from .. import jdbot, chat_id, SHORTCUT_FILE, logger, BOT_SET, ch_name
@jdbot.on(events.NewMessage(from_users=chat_id, pattern=r'^/a$'))
async def my_a(event):
markup = []
SENDER = event.sender_id
msg = await jdbot.send_message(chat_id, '正在查询您的常用命令,请稍后')
with open(SHORTCUT_FILE, 'r', encoding='utf-8') as f:
shortcuts = f.readlines()
try:
cmdtext = None
async with jdbot.conversation(SENDER, timeout=60) as conv:
markup = [Button.inline(shortcut.split(
'-->')[0], data=str(shortcut.split('-->')[-1])) for shortcut in shortcuts if '-->' in shortcut]
markup = split_list(markup, 3)
markup.append([Button.inline('取消', data='cancel')])
msg = await jdbot.edit_message(msg, '请做出您的选择:', buttons=markup)
convdata = await conv.wait_event(press_event(SENDER))
res = bytes.decode(convdata.data)
if res == 'cancel':
msg = await jdbot.edit_message(msg, '对话已取消')
conv.cancel()
else:
await jdbot.delete_messages(chat_id, msg)
cmdtext = res
conv.cancel()
if cmdtext:
await cmd(cmdtext.replace('nohup ', ''))
except exceptions.TimeoutError:
msg = await jdbot.edit_message(msg, '选择已超时,对话已停止')
except Exception as e:
await jdbot.edit_message(msg, f'something wrong,I\'m sorry\n{str(e)}')
logger.error(f'something wrong,I\'m sorry\n{str(e)}')
@jdbot.on(events.NewMessage(from_users=chat_id, pattern=r'^/b$'))
async def my_b(event):
markup = []
msg = await jdbot.send_message(chat_id, '正在查询您的常用命令,请稍后')
with open(SHORTCUT_FILE, 'r', encoding='utf-8') as f:
shortcuts = f.readlines()
try:
await jdbot.delete_messages(chat_id, msg)
markup = [Button.text(shortcut, single_use=True)
for shortcut in shortcuts if '-->' not in shortcut]
markup = split_list(markup, int(BOT_SET['每页列数']))
await jdbot.send_message(chat_id, '请做出您的选择:', buttons=markup)
except Exception as e:
await jdbot.edit_message(msg, f'something wrong,I\'m sorry\n{str(e)}')
logger.error(f'something wrong,I\'m sorry\n{str(e)}')
@jdbot.on(events.NewMessage(from_users=chat_id, pattern=r'^/clearboard$'))
async def my_clear(event):
try:
await jdbot.send_message(chat_id, '已清空您的keyboard',buttons=Button.clear())
except Exception as e:
await jdbot.send_message(chat_id, f'something wrong,I\'m sorry\n{str(e)}')
logger.error(f'something wrong,I\'m sorry\n{str(e)}')
if ch_name:
jdbot.add_event_handler(my_a, events.NewMessage(
from_users=chat_id, pattern=BOT_SET['命令别名']['a']))
jdbot.add_event_handler(my_b, events.NewMessage(from_users=chat_id, pattern=BOT_SET['命令别名']['b']))
| 2.09375 | 2 |
checks/check_nicusage.py | graphingit/droko-agent | 0 | 12797335 | <gh_stars>0
#!/usr/bin/env __PYTHONVER__
import psutil
import time
import sys
if __name__ == "__main__":
while True:
try:
# Get the current unix epoch time
now = str(int(time.time() / 1))
# Output needs to be in comma format, eg:
# epoch,checkname,value,metadata
# Eg:
# 1430157858,os.nic.bytes.wlan0_out,32863319809,metric=bps;title=wlan0_out
# 1430157858,os.nic.bytes.wlan0_in,6320095757,metric=bps;title=wlan0_in
# 1430157858,os.nic.bytes.em1_out,4128073428,metric=bps;title=em1_out
# 1430157858,os.nic.bytes.em1_in,3156351939,metric=bps;title=em1_in
# List of network devices we want to exclude
filternics = ['lo']
nics = psutil.net_io_counters(pernic=True)
for nic in nics:
(bytes_sent, bytes_recv, packets_sent, packets_recv, errin, errout, dropin, dropout) = nics[nic]
if nic not in filternics:
# Ignore inactive network interfaces
if packets_recv != 0 and packets_sent != 0:
print(now + ",os.nic.bytes." + nic + "_out," + str(bytes_sent) + ",metric=bps;function=derivative;inversion=-1;title=" + nic + "_out")
print(now + ",os.nic.bytes." + nic + "_in," + str(bytes_recv) + ",metric=bps;function=derivative;inversion=1;title=" + nic + "_in")
print(now + ",os.nic.packets." + nic + "_sent," + str(packets_sent) + ",metric=pps;function=derivative;inversion=-1;title=" + nic + "_sent")
print(now + ",os.nic.packets." + nic + "_recv," + str(packets_recv) + ",metric=pps;function=derivative;inversion=1;title=" + nic + "_recv")
sys.stdout.flush()
time.sleep(1)
except:
pass
| 2.421875 | 2 |
.projectsAg/model/mail.py | Hraesvel/portfolioSite_flutter | 0 | 12797336 | <reponame>Hraesvel/portfolioSite_flutter
import json
import os
import boto3
from botocore.exceptions import ClientError
# from email.mime.multipart import MIMEMultipart
# from email.mime.text import MIMEText
# from email.mime.application import MIMEApplication
region = os.environ['Region']
def send_mail(msg):
client_ses = boto3.client('ses', region)
try:
verify = client_ses.verify_email_address(EmailAddress=msg)
response = client_ses.send_email(
Source='<EMAIL>',
Destination={
'ToAddresses': [],
'CcAddresses': [],
'BccAddresses': []
},
Message={
'Subject': {'Data': f"from {msg['reply_address']}"},
'Body': {'Text': {'Data': msg['body']}}},
ReplyToAddresses=[msg['reply_address']],
)
except ClientError as e:
output = e.response['Error']['Message']
else:
output = "Email sent! Message ID: " + response['MessageId']
return output
def lambda_handler(event, context):
# print(event)
# print(event['reply_address'])
print(send_mail(event))
return {
'statusCode': 200,
'body': json.dumps('Hello from Lambda!')
}
| 2.203125 | 2 |
model.py | liuying350169/fl | 0 | 12797337 | import torch
import time
import torch.nn as nn
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
self.out = nn.Linear(in_features=32 * 7 * 7, out_features=10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1)
output = self.out(x)
return output, x
class AlexNet(nn.Module):
def __init__(self):
super(AlexNet, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=96, kernel_size=3, stride=2, padding=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=96, out_channels=256, kernel_size=5, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(in_channels=256, out_channels=384, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=384, out_channels=384, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=384, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.fc_layers = nn.Sequential(
nn.Linear(256 * 1 * 1, 4096),
nn.Dropout(0.5),
nn.Linear(4096, 4096),
nn.Dropout(0.5),
nn.Linear(4096, 10),
)
def forward(self, x):
x = self.conv_layers(x)
x = x.view(x.size(0), -1)
output = self.fc_layers(x)
return output, x
| 3.1875 | 3 |
pybabel_acorn/__init__.py | qbane/pybabel-acorn | 0 | 12797338 | import sys
from contextlib import contextmanager
from os import path
import importlib
# https://stackoverflow.com/a/41904558/2281355
@contextmanager
def add_to_path(p):
import sys
old_path = sys.path
old_modules = sys.modules
sys.modules = old_modules.copy()
sys.path = sys.path[:]
sys.path.insert(0, p)
try:
yield
finally:
sys.path = old_path
sys.modules = old_modules
def extract_javascript(fileobj, keywords, comment_tags, options):
# import the original lexer before altering sys.path
# this way, our mocked tokenizer can still access the original lexer
# and utilities
import babel.messages.jslexer
with add_to_path(path.dirname(__file__)):
# replace the jslexer
# first, reload all parent namespace so that it can adapt the new sys.path...
import babel
importlib.reload(babel)
import babel.messages
importlib.reload(babel.messages)
# this should load our mocked jslexer
importlib.reload(babel.messages.jslexer)
# babel.messages.extract is not changed, so we can use directly
from babel.messages.extract import extract_javascript
yield from extract_javascript(fileobj, keywords, comment_tags, options)
| 2.375 | 2 |
datastore/db.py | cs0x65/soil-classifier | 0 | 12797339 | import psycopg2
from psycopg2 import Error
try:
# Connect to an existing database
connection = psycopg2.connect(user="sa",
password="<PASSWORD>",
host="127.0.0.1",
port="5432",
database="soildb")
# Create a cursor to perform database operations
cursor = connection.cursor()
# Print PostgreSQL details
print("PostgreSQL server information")
print(connection.get_dsn_parameters(), "\n")
# Executing a SQL query
cursor.execute("SELECT version();")
# Fetch result
record = cursor.fetchone()
print("You are connected to - ", record, "\n")
print("Retrieving soil records...")
cursor.execute("SELECT * from soil_profile limit 10")
# Fetch result
records = cursor.fetchall()
print(f"soil records = {records}")
except (Exception, Error) as error:
print("Error while connecting to PostgreSQL", error)
finally:
if connection:
cursor.close()
connection.close()
print("PostgreSQL connection is closed") | 3.5625 | 4 |
database/compiled_templates/webapps/galaxy/admin/tool_sheds.mako.py | psnehal/MethylSig | 0 | 12797340 | # -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1433361565.2110319
_template_filename='templates/webapps/galaxy/admin/tool_sheds.mako'
_template_uri='/webapps/galaxy/admin/tool_sheds.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='ascii'
_exports = ['stylesheets', 'title']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
# SOURCE LINE 2
ns = runtime.TemplateNamespace('__anon_0x7f903c23edd0', context._clean_inheritance_tokens(), templateuri=u'/message.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x7f903c23edd0')] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/base.mako', _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f903c23edd0')._populate(_import_ns, [u'render_msg'])
status = _import_ns.get('status', context.get('status', UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
message = _import_ns.get('message', context.get('message', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
render_msg = _import_ns.get('render_msg', context.get('render_msg', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n')
# SOURCE LINE 2
__M_writer(u'\n\n')
# SOURCE LINE 4
__M_writer(u'\n\n')
# SOURCE LINE 9
__M_writer(u'\n\n')
# SOURCE LINE 11
if message:
# SOURCE LINE 12
__M_writer(u' ')
__M_writer(unicode(render_msg( message, status )))
__M_writer(u'\n')
pass
# SOURCE LINE 14
__M_writer(u'\n<div class="toolForm">\n <div class="toolFormTitle">Accessible Galaxy tool sheds</div>\n <div class="toolFormBody">\n <div class="form-row">\n <table class="grid">\n ')
# SOURCE LINE 20
shed_id = 0
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin_stored[__M_key]) for __M_key in ['shed_id'] if __M_key in __M_locals_builtin_stored]))
__M_writer(u'\n')
# SOURCE LINE 21
for name, url in trans.app.tool_shed_registry.tool_sheds.items():
# SOURCE LINE 22
__M_writer(u' <tr class="libraryTitle">\n <td>\n <div style="float: left; margin-left: 1px;" class="menubutton split popup" id="dataset-')
# SOURCE LINE 24
__M_writer(unicode(shed_id))
__M_writer(u'-popup">\n <a class="view-info" href="')
# SOURCE LINE 25
__M_writer(unicode(h.url_for( controller='admin_toolshed', action='browse_tool_shed', tool_shed_url=url )))
__M_writer(u'">')
__M_writer(unicode(name))
__M_writer(u'</a>\n </div>\n <div popupmenu="dataset-')
# SOURCE LINE 27
__M_writer(unicode(shed_id))
__M_writer(u'-popup">\n <a class="action-button" href="')
# SOURCE LINE 28
__M_writer(unicode(h.url_for( controller='admin_toolshed', action='browse_tool_shed', tool_shed_url=url )))
__M_writer(u'">Browse valid repositories</a>\n <a class="action-button" href="')
# SOURCE LINE 29
__M_writer(unicode(h.url_for( controller='admin_toolshed', action='find_tools_in_tool_shed', tool_shed_url=url )))
__M_writer(u'">Search for valid tools</a>\n <a class="action-button" href="')
# SOURCE LINE 30
__M_writer(unicode(h.url_for( controller='admin_toolshed', action='find_workflows_in_tool_shed', tool_shed_url=url )))
__M_writer(u'">Search for workflows</a>\n </div>\n </td>\n </tr>\n ')
# SOURCE LINE 34
shed_id += 1
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin_stored[__M_key]) for __M_key in ['shed_id'] if __M_key in __M_locals_builtin_stored]))
__M_writer(u'\n')
pass
# SOURCE LINE 36
__M_writer(u' </tr>\n </table>\n </div>\n <div style="clear: both"></div>\n </div>\n</div>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_stylesheets(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f903c23edd0')._populate(_import_ns, [u'render_msg'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 6
__M_writer(u'\n ')
# SOURCE LINE 7
__M_writer(unicode(parent.stylesheets()))
__M_writer(u'\n ')
# SOURCE LINE 8
__M_writer(unicode(h.css( "library" )))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_title(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f903c23edd0')._populate(_import_ns, [u'render_msg'])
__M_writer = context.writer()
# SOURCE LINE 4
__M_writer(u'Configured Galaxy tool sheds')
return ''
finally:
context.caller_stack._pop_frame()
| 1.773438 | 2 |
tests/xtest_laser_power2.py | dapperfu/python_GCode | 0 | 12797341 | import time
import grbl
import pytest
import gcode
@pytest.fixture(scope="session")
def cnc(request):
grbl_cfg = {
"port": request.config.getoption("--port"),
"baudrate": request.config.getoption("--baudrate"),
}
cnc = grbl.Grbl(**grbl_cfg)
time.sleep(2)
cnc.reset()
# Metric
cnc.cmd("G21")
cnc.cmd("G91")
cnc.cmd("G0X5Y5F300")
# Set this to 0.
# TODO: Get end-stops installed.
cnc.cmd("G92X0Y0Z0")
yield cnc
cnc.cmd("G90")
cnc.cmd("G0X0Y0F300")
def test_default_line():
print(gcode.Line())
def test_00_row1(cnc):
prog = gcode.GCode(machine=cnc)
prog.G90()
prog.G0(X=0, Y=0)
prog.run()
cnc.reset()
@pytest.mark.parametrize("laser_power", [10, 50, 75, 100, 150, 200, 255])
def test_01_laser_power(cnc, laser_power):
prog = gcode.Line(power=laser_power, machine=cnc)
cnc.cmd("G91")
prog.run()
cnc.reset()
def test_02_row2(cnc):
prog = gcode.GCode(machine=cnc)
prog.G90()
prog.G0(X=0, Y=10)
prog.run()
cnc.reset()
@pytest.mark.parametrize("feed", [30, 60, 120, 180, 240, 300])
def test_03_laser_feed(cnc, feed):
prog = gcode.Line(power=255, feed=feed, machine=cnc)
cnc.cmd("G91")
prog.run()
cnc.reset()
def test_04_row3(cnc):
prog = gcode.GCode(machine=cnc)
prog.G90()
prog.G0(X=0, Y=20)
prog.run()
cnc.reset()
@pytest.mark.parametrize("dynamic_power", [True, False])
@pytest.mark.parametrize("power", [150, 200, 255])
@pytest.mark.parametrize("feed", [30, 180])
def test_05_laser_power_feed(cnc, dynamic_power, power, feed):
prog = gcode.Line(machine=cnc, dynamic_power=dynamic_power, power=power, feed=feed)
cnc.cmd("G91")
prog.run()
cnc.reset()
| 1.84375 | 2 |
tests/test_raw_lightcurve.py | konkolyseismolab/autoeap | 3 | 12797342 | import numpy as np
import autoeap
from numpy.testing import assert_array_almost_equal
import os
PACKAGEDIR = os.path.abspath(os.path.dirname(__file__))
def test_raw_lightcurve():
time,flux,flux_err = autoeap.createlightcurve('EPIC220198696',campaign=8)
lc = np.genfromtxt(os.path.join(PACKAGEDIR,"EPIC220198696_c8_autoEAP.lc"),skip_header=1).T
assert_array_almost_equal(time,lc[0])
assert_array_almost_equal(flux,lc[1].astype(np.float32))
assert_array_almost_equal(flux_err,lc[2].astype(np.float32))
| 2.09375 | 2 |
common/services/unchained.py | shapeshift-legacy/watchtower | 0 | 12797343 | import logging
import os
import urllib3
import ast
from common.utils.requests import http
from common.utils.networks import ETH
from common.services import cointainer_web3 as web3
from common.utils.ethereum import ERC20_ABI
logger = logging.getLogger('watchtower.common.services.unchained')
class UnchainedClient(object):
def __init__(self, network):
baseurl = self.get_baseurl(network)
if baseurl is None:
raise Exception(
'UnchainedClient is not supported for network: {}'.format(network)
)
self.network = network
self.baseurl = baseurl
@staticmethod
def get_baseurl(network):
return {
ETH: os.getenv('UNCHAINED_ETH_URL')
}.get(network)
def get_balances(self, address, account_id, supported_tokens=None):
if not address:
logger.error("Unable to get %s balances for account: %s. No associated address.", self.network, account_id)
return dict()
resp = http.get('{}/api/v2/address/{}?details=tokenBalances'.format(self.baseurl, address)).json_data
balances = {token.get('contract').lower(): token.get('balance') for token in resp.get('tokens', list())}
balances[ETH] = resp.get('balance')
try:
weth_contract_address = supported_tokens.get('WETH') if supported_tokens and supported_tokens.get('WETH') else None
if weth_contract_address:
if balances.get(weth_contract_address) is None:
weth_address = web3.toChecksumAddress(weth_contract_address)
if weth_address:
contract = web3.eth.contract(address=weth_address, abi=ERC20_ABI)
balance = contract.functions.balanceOf(address).call()
balances[weth_address.lower()] = balance
except Exception as e:
logger.error("Failed to fetch WETH: %s balance for address: %s", weth_contract_address, address)
logger.error(e)
return balances
def get_client(network):
return UnchainedClient(network)
| 2.234375 | 2 |
PythonAndOop/N39_method_overloading_3.py | jiauy/before_work | 0 | 12797344 | class MyList(list):
def __getitem__(self, index):
if index == 0:
raise IndexError
if index > 0:
index -= 1
return list.__getitem__(self, index)
def __setitem__(self, index, value):
if index == 0:
raise IndexError
if index > 0:
index -= 1
list.__setitem__(self, index, value)
if __name__ == '__main__':
x = MyList(['a', 'b', 'c'])
print(x)
print("-" * 10)
x.append('d')
print(x)
print("-" * 10)
x.__setitem__(4, 'e')
print(x)
print("-" * 10)
print(x[1])
print(x.__getitem__(1))
print("-" * 10)
print(x[4])
print(x.__getitem__(4)) | 3.859375 | 4 |
build/lib/drf_advanced_token/management/commands/revoke_all_tokens.py | amp89/drf_advanced_token | 2 | 12797345 | from django.core.management.base import BaseCommand
from rest_framework.authtoken.models import Token
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--force',
action='store_true',
help='WARNING - Understand that this logs out and *PERMANENTLY* DELETES THE TOKENS FOR ALL USERS',
)
def handle(self, *args, **options):
if not options["force"]:
print("Include --force if you understand that this will log out all users.")
else:
Token.objects.all().delete()
print("All auth tokens deleted.")
| 2.046875 | 2 |
Tests/test_main_pytest.py | miki4920/ChessAI | 0 | 12797346 | from config import Config
from main import determine_colour, return_tile_colour, coordinates_to_notation
def test_determine_colour():
assert determine_colour(0, 0)
assert not determine_colour(0, 7)
assert not determine_colour(7, 0)
assert determine_colour(7, 7)
def test_return_tile_colour():
assert return_tile_colour(True) == Config.COLOUR_WHITE
assert return_tile_colour(False) == Config.COLOUR_BLACK
def test_coordinates_to_notation():
assert coordinates_to_notation(0, 0) == "A8"
assert coordinates_to_notation(7, 0) == "H8"
assert coordinates_to_notation(0, 7) == "A1"
assert coordinates_to_notation(7, 7) == "H1"
| 3.109375 | 3 |
tools/mqtt_listener.py | SaibboRiginal/Python_remote_PLC | 0 | 12797347 | # Essential modules import
import json
from paho.mqtt.client import *
# Variables modules import
from tools import *
# Importing custom Utility modules
from utility.logger import MyLogger
log = MyLogger("mqtt") # Logger
class Marker(Client):
'''
Client Marker : Broker client to send and/or receive MQTT publications.
'''
def __init__(self):
Client.__init__(self)
if USER: # Set authentication, if set
self.username_pw_set(USER, PASSW)
self.connect(IP_BROKER, PORT) # Connecting Client to Broker
self.obj = []
def on_connect(self, client, userdata, flags, rc):
'''
Do something when the Client successfully connect to the Broker.
'''
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
for topic in SUBSCRIPTIONS:
self.subscribe(topic)
def on_message(self, client, userdata, msg):
'''
Client receiving a publication.
'''
try:
topic = msg.topic
try: # Checks if payload is a valid JSON
j = json.loads(msg.payload.decode("utf-8")); log_message = ''
for key, value in j.items():
log_message += "[%s > %s]" % (key, value)
message = j
json_check = True
except:
log_message = msg.payload
message = msg.payload
json_check = False
log.info("%s received from %s ~ %s" % (log.timestamp(), topic, log_message))
if self.obj: # Check if any object is connected to MQTT
for _obj in self.obj:
_obj.receivedMQTT(topic, message, json=json_check) # receiveMQTT() function to receive MQTT pubs
except Exception as e:
log.warning("Failed something..")
log.exception(str(e))
def send_message(self, topic, message):
'''
Publish to destination topic a message.
topic : <STRING> Topic where to send.
message : <STRING> Payload to send.
'''
try:
if type(message) == dict:
self.publish(topic, json.dumps(message))
else:
self.publish(topic, message)
log.info("Sucseful")
#log.info("%s published to %s ~ %s" % (log.timestamp(), topic, message))
except Exception as e:
log.warning("Failed something..")
log.exception(str(e))
def attach(self, _object):
'''
Attach a * to receive MQTT publication with :
def receiveMQTT(topic, message, json=False):.
#CODE
json = <BOOL> True when message is a stringified JSON.
'''
try:
self.obj.append(_object)
log.info("Attached to broker")
except Exception as e:
log.exception(str(e))
log.error("Not attached to broker")
| 2.765625 | 3 |
vmfw.py | whs/OvzCP | 1 | 12797348 | import os, sys, ConfigParser
sys.path.insert(0, os.path.join(os.getcwd(), "Jinja2-2.3-py2.5.egg"))
sys.path.append(os.path.join(os.getcwd(), "netifaces-0.5-py2.5-linux-i686.egg"))
import jinja2, netifaces
_config = ConfigParser.SafeConfigParser()
_config.read("config.ini")
# iptables forwarding configuration generator
def update(data):
jinja = jinja2.Environment(loader=jinja2.loaders.FileSystemLoader("template"))
ip={}
for iface in netifaces.interfaces():
try:
ip[iface] = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr']
except KeyError:
pass
except ValueError:
pass
d = jinja.get_template("vmfw.sh").render(port=data, ip=ip, vmip=_config.get("iface", "vmIP"))
open("sysconf/vmfw.sh", "w").write(d)
def restart():
os.system("sysconf/vmfw.sh")
if __name__ == "__main__":
from models import PortForward
update(PortForward.select())
restart() | 2.28125 | 2 |
back/test.py | sresoft/thengrow | 0 | 12797349 | <filename>back/test.py
from minitwit import *
couch = Couch('sre.cloudant.com') | 1.335938 | 1 |
LeetCode/905 Sort Array By Parity.py | gesuwen/Algorithms | 0 | 12797350 | <reponame>gesuwen/Algorithms<filename>LeetCode/905 Sort Array By Parity.py
# Array
# Given an array A of non-negative integers, return an array consisting of all the even elements of A, followed by all the odd elements of A.
#
# You may return any answer array that satisfies this condition.
#
#
#
# Example 1:
#
# Input: [3,1,2,4]
# Output: [2,4,3,1]
# The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
#
#
# Note:
#
# 1 <= A.length <= 5000
# 0 <= A[i] <= 5000
class Solution:
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
output = []
for i in A:
if i % 2 == 0:
output = [i] + output
else:
output += [i]
return output
| 3.96875 | 4 |
Subsets and Splits