code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
import math
import theano.tensor as T
# ----------------------------------------------------------------------------
# this is all taken from the parmesan lib
c = - 0.5 * math.log(2*math.pi)
def log_bernoulli(x, p, eps=1e-5):
"""
Compute log pdf of a Bernoulli distribution with success probability p, at values x.
.. math:: \log p(x; p) = \log \mathcal{B}(x; p)
Parameters
----------
x : Theano tensor
Values at which to evaluate pdf.
p : Theano tensor
Success probability :math:`p(x=1)`, which is also the mean of the Bernoulli distribution.
eps : float
Small number used to avoid NaNs by clipping p in range [eps;1-eps].
Returns
-------
Theano tensor
Element-wise log probability, this has to be summed for multi-variate distributions.
"""
p = T.clip(p, eps, 1.0 - eps)
return -T.nnet.binary_crossentropy(p, x)
def log_normal(x, mean, std, eps=1e-5):
"""
Compute log pdf of a Gaussian distribution with diagonal covariance, at values x.
Variance is parameterized as standard deviation.
.. math:: \log p(x) = \log \mathcal{N}(x; \mu, \sigma^2I)
Parameters
----------
x : Theano tensor
Values at which to evaluate pdf.
mean : Theano tensor
Mean of the Gaussian distribution.
std : Theano tensor
Standard deviation of the diagonal covariance Gaussian.
eps : float
Small number added to standard deviation to avoid NaNs.
Returns
-------
Theano tensor
Element-wise log probability, this has to be summed for multi-variate distributions.
See also
--------
log_normal1 : using variance parameterization
log_normal2 : using log variance parameterization
"""
abs_std = T.abs_(std) + eps
return c - T.log(abs_std) - (x - mean)**2 / (2 * abs_std**2)
def log_normal2(x, mean, log_var, eps=1e-5):
"""
Compute log pdf of a Gaussian distribution with diagonal covariance, at values x.
Variance is parameterized as log variance rather than standard deviation, which ensures :math:`\sigma > 0`.
.. math:: \log p(x) = \log \mathcal{N}(x; \mu, \sigma^2I)
Parameters
----------
x : Theano tensor
Values at which to evaluate pdf.
mean : Theano tensor
Mean of the Gaussian distribution.
log_var : Theano tensor
Log variance of the diagonal covariance Gaussian.
eps : float
Small number added to denominator to avoid NaNs.
Returns
-------
Theano tensor
Element-wise log probability, this has to be summed for multi-variate distributions.
See also
--------
log_normal : using standard deviation parameterization
log_normal1 : using variance parameterization
"""
return c - log_var/2 - (x - mean)**2 / (2 * T.exp(log_var) + eps) | kuleshov/deep-learning-models | models/distributions/distributions.py | Python | mit | 2,861 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-27 07:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0020_auto_20160627_0139'),
]
operations = [
migrations.RemoveField(
model_name='like',
name='company',
),
migrations.RemoveField(
model_name='like',
name='user',
),
migrations.DeleteModel(
name='Like',
),
]
| iharsh234/eElectronics | posts/migrations/0021_auto_20160627_1326.py | Python | mit | 580 |
import time
from multiprocessing import Value
import pytest
from dash import Dash, Input, Output, State, callback_context, html, dcc, dash_table
from dash.exceptions import PreventUpdate
def test_cbmt001_called_multiple_times_and_out_of_order(dash_duo):
app = Dash(__name__)
app.layout = html.Div([html.Button(id="input", n_clicks=0), html.Div(id="output")])
call_count = Value("i", 0)
@app.callback(Output("output", "children"), [Input("input", "n_clicks")])
def update_output(n_clicks):
call_count.value = call_count.value + 1
if n_clicks == 1:
time.sleep(1)
return n_clicks
dash_duo.start_server(app)
dash_duo.multiple_click("#input", clicks=3)
time.sleep(3)
assert call_count.value == 4, "get called 4 times"
assert dash_duo.find_element("#output").text == "3", "clicked button 3 times"
assert not dash_duo.redux_state_is_loading
dash_duo.percy_snapshot(
name="test_callbacks_called_multiple_times_and_out_of_order"
)
def test_cbmt002_canceled_intermediate_callback(dash_duo):
# see https://github.com/plotly/dash/issues/1053
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="a", value="x"),
html.Div("b", id="b"),
html.Div("c", id="c"),
html.Div(id="out"),
]
)
@app.callback(
Output("out", "children"),
[Input("a", "value"), Input("b", "children"), Input("c", "children")],
)
def set_out(a, b, c):
return "{}/{}/{}".format(a, b, c)
@app.callback(Output("b", "children"), [Input("a", "value")])
def set_b(a):
raise PreventUpdate
@app.callback(Output("c", "children"), [Input("a", "value")])
def set_c(a):
return a
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out", "x/b/x")
chars = "x"
for i in list(range(10)) * 2:
dash_duo.find_element("#a").send_keys(str(i))
chars += str(i)
dash_duo.wait_for_text_to_equal("#out", "{0}/b/{0}".format(chars))
def test_cbmt003_chain_with_table(dash_duo):
# see https://github.com/plotly/dash/issues/1071
app = Dash(__name__)
app.layout = html.Div(
[
html.Div(id="a1"),
html.Div(id="a2"),
html.Div(id="b1"),
html.H1(id="b2"),
html.Button("Update", id="button"),
dash_table.DataTable(id="table"),
]
)
@app.callback(
# Changing the order of outputs here fixes the issue
[Output("a2", "children"), Output("a1", "children")],
[Input("button", "n_clicks")],
)
def a12(n):
return "a2: {!s}".format(n), "a1: {!s}".format(n)
@app.callback(Output("b1", "children"), [Input("a1", "children")])
def b1(a1):
return "b1: '{!s}'".format(a1)
@app.callback(
Output("b2", "children"),
[Input("a2", "children"), Input("table", "selected_cells")],
)
def b2(a2, selected_cells):
return "b2: '{!s}', {!s}".format(a2, selected_cells)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#a1", "a1: None")
dash_duo.wait_for_text_to_equal("#a2", "a2: None")
dash_duo.wait_for_text_to_equal("#b1", "b1: 'a1: None'")
dash_duo.wait_for_text_to_equal("#b2", "b2: 'a2: None', None")
dash_duo.find_element("#button").click()
dash_duo.wait_for_text_to_equal("#a1", "a1: 1")
dash_duo.wait_for_text_to_equal("#a2", "a2: 1")
dash_duo.wait_for_text_to_equal("#b1", "b1: 'a1: 1'")
dash_duo.wait_for_text_to_equal("#b2", "b2: 'a2: 1', None")
dash_duo.find_element("#button").click()
dash_duo.wait_for_text_to_equal("#a1", "a1: 2")
dash_duo.wait_for_text_to_equal("#a2", "a2: 2")
dash_duo.wait_for_text_to_equal("#b1", "b1: 'a1: 2'")
dash_duo.wait_for_text_to_equal("#b2", "b2: 'a2: 2', None")
@pytest.mark.parametrize("MULTI", [False, True])
def test_cbmt004_chain_with_sliders(MULTI, dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Button("Button", id="button"),
html.Div(
[
html.Label(id="label1"),
dcc.Slider(id="slider1", min=0, max=10, value=0),
]
),
html.Div(
[
html.Label(id="label2"),
dcc.Slider(id="slider2", min=0, max=10, value=0),
]
),
]
)
if MULTI:
@app.callback(
[Output("slider1", "value"), Output("slider2", "value")],
[Input("button", "n_clicks")],
)
def update_slider_vals(n):
if not n:
raise PreventUpdate
return n, n
else:
@app.callback(Output("slider1", "value"), [Input("button", "n_clicks")])
def update_slider1_val(n):
if not n:
raise PreventUpdate
return n
@app.callback(Output("slider2", "value"), [Input("button", "n_clicks")])
def update_slider2_val(n):
if not n:
raise PreventUpdate
return n
@app.callback(Output("label1", "children"), [Input("slider1", "value")])
def update_slider1_label(val):
return "Slider1 value {}".format(val)
@app.callback(Output("label2", "children"), [Input("slider2", "value")])
def update_slider2_label(val):
return "Slider2 value {}".format(val)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#label1", "")
dash_duo.wait_for_text_to_equal("#label2", "")
dash_duo.find_element("#button").click()
dash_duo.wait_for_text_to_equal("#label1", "Slider1 value 1")
dash_duo.wait_for_text_to_equal("#label2", "Slider2 value 1")
dash_duo.find_element("#button").click()
dash_duo.wait_for_text_to_equal("#label1", "Slider1 value 2")
dash_duo.wait_for_text_to_equal("#label2", "Slider2 value 2")
def test_cbmt005_multi_converging_chain(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Button("Button 1", id="b1"),
html.Button("Button 2", id="b2"),
dcc.Slider(id="slider1", min=-5, max=5),
dcc.Slider(id="slider2", min=-5, max=5),
html.Div(id="out"),
]
)
@app.callback(
[Output("slider1", "value"), Output("slider2", "value")],
[Input("b1", "n_clicks"), Input("b2", "n_clicks")],
)
def update_sliders(button1, button2):
if not callback_context.triggered:
raise PreventUpdate
if callback_context.triggered[0]["prop_id"] == "b1.n_clicks":
return -1, -1
else:
return 1, 1
@app.callback(
Output("out", "children"),
[Input("slider1", "value"), Input("slider2", "value")],
)
def update_graph(s1, s2):
return "x={}, y={}".format(s1, s2)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out", "")
dash_duo.find_element("#b1").click()
dash_duo.wait_for_text_to_equal("#out", "x=-1, y=-1")
dash_duo.find_element("#b2").click()
dash_duo.wait_for_text_to_equal("#out", "x=1, y=1")
def test_cbmt006_derived_props(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Div(id="output"), html.Button("click", id="btn"), dcc.Store(id="store")]
)
@app.callback(
Output("output", "children"),
[Input("store", "modified_timestamp")],
[State("store", "data")],
)
def on_data(ts, data):
return data
@app.callback(Output("store", "data"), [Input("btn", "n_clicks")])
def on_click(n_clicks):
return n_clicks or 0
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output", "0")
dash_duo.find_element("#btn").click()
dash_duo.wait_for_text_to_equal("#output", "1")
dash_duo.find_element("#btn").click()
dash_duo.wait_for_text_to_equal("#output", "2")
def test_cbmt007_early_preventupdate_inputs_above_below(dash_duo):
app = Dash(__name__, suppress_callback_exceptions=True)
app.layout = html.Div(id="content")
@app.callback(Output("content", "children"), [Input("content", "style")])
def content(_):
return html.Div(
[
html.Div(42, id="above-in"),
html.Div(id="above-dummy"),
html.Hr(),
html.Div(0, id="above-out"),
html.Div(0, id="below-out"),
html.Hr(),
html.Div(id="below-dummy"),
html.Div(44, id="below-in"),
]
)
# Create 4 callbacks - 2 above, 2 below.
for pos in ("above", "below"):
@app.callback(
Output("{}-dummy".format(pos), "children"),
[Input("{}-dummy".format(pos), "style")],
)
def dummy(_):
raise PreventUpdate
@app.callback(
Output("{}-out".format(pos), "children"),
[Input("{}-in".format(pos), "children")],
)
def out(v):
return v
dash_duo.start_server(app)
# as of https://github.com/plotly/dash/issues/1223, above-out would be 0
dash_duo.wait_for_text_to_equal("#above-out", "42")
dash_duo.wait_for_text_to_equal("#below-out", "44")
def test_cbmt008_direct_chain(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="input-1", value="input 1"),
dcc.Input(id="input-2"),
html.Div("test", id="output"),
]
)
call_counts = {"output": Value("i", 0), "input-2": Value("i", 0)}
@app.callback(Output("input-2", "value"), Input("input-1", "value"))
def update_input(input1):
call_counts["input-2"].value += 1
return "<<{}>>".format(input1)
@app.callback(
Output("output", "children"),
Input("input-1", "value"),
Input("input-2", "value"),
)
def update_output(input1, input2):
call_counts["output"].value += 1
return "{} + {}".format(input1, input2)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#input-1", "input 1")
dash_duo.wait_for_text_to_equal("#input-2", "<<input 1>>")
dash_duo.wait_for_text_to_equal("#output", "input 1 + <<input 1>>")
assert call_counts["output"].value == 1
assert call_counts["input-2"].value == 1
dash_duo.find_element("#input-1").send_keys("x")
dash_duo.wait_for_text_to_equal("#input-1", "input 1x")
dash_duo.wait_for_text_to_equal("#input-2", "<<input 1x>>")
dash_duo.wait_for_text_to_equal("#output", "input 1x + <<input 1x>>")
assert call_counts["output"].value == 2
assert call_counts["input-2"].value == 2
dash_duo.find_element("#input-2").send_keys("y")
dash_duo.wait_for_text_to_equal("#input-2", "<<input 1x>>y")
dash_duo.wait_for_text_to_equal("#output", "input 1x + <<input 1x>>y")
dash_duo.wait_for_text_to_equal("#input-1", "input 1x")
assert call_counts["output"].value == 3
assert call_counts["input-2"].value == 2
def test_cbmt009_branched_chain(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="grandparent", value="input 1"),
dcc.Input(id="parent-a"),
dcc.Input(id="parent-b"),
html.Div(id="child-a"),
html.Div(id="child-b"),
]
)
call_counts = {
"parent-a": Value("i", 0),
"parent-b": Value("i", 0),
"child-a": Value("i", 0),
"child-b": Value("i", 0),
}
@app.callback(Output("parent-a", "value"), Input("grandparent", "value"))
def update_parenta(value):
call_counts["parent-a"].value += 1
return "a: {}".format(value)
@app.callback(Output("parent-b", "value"), Input("grandparent", "value"))
def update_parentb(value):
time.sleep(0.2)
call_counts["parent-b"].value += 1
return "b: {}".format(value)
@app.callback(
Output("child-a", "children"),
Input("parent-a", "value"),
Input("parent-b", "value"),
)
def update_childa(parenta_value, parentb_value):
time.sleep(0.5)
call_counts["child-a"].value += 1
return "{} + {}".format(parenta_value, parentb_value)
@app.callback(
Output("child-b", "children"),
Input("parent-a", "value"),
Input("parent-b", "value"),
Input("grandparent", "value"),
)
def update_childb(parenta_value, parentb_value, grandparent_value):
call_counts["child-b"].value += 1
return "{} + {} + {}".format(parenta_value, parentb_value, grandparent_value)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#child-a", "a: input 1 + b: input 1")
dash_duo.wait_for_text_to_equal("#child-b", "a: input 1 + b: input 1 + input 1")
dash_duo.wait_for_text_to_equal("#parent-a", "a: input 1")
dash_duo.wait_for_text_to_equal("#parent-b", "b: input 1")
assert call_counts["parent-a"].value == 1
assert call_counts["parent-b"].value == 1
assert call_counts["child-a"].value == 1
assert call_counts["child-b"].value == 1
def test_cbmt010_shared_grandparent(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("id", id="session-id"),
dcc.Dropdown(id="dropdown-1"),
dcc.Dropdown(id="dropdown-2"),
html.Div(id="output"),
]
)
options = [{"value": "a", "label": "a"}]
call_counts = {"dropdown_1": Value("i", 0), "dropdown_2": Value("i", 0)}
@app.callback(
Output("dropdown-1", "options"),
[Input("dropdown-1", "value"), Input("session-id", "children")],
)
def dropdown_1(value, session_id):
call_counts["dropdown_1"].value += 1
return options
@app.callback(
Output("dropdown-2", "options"),
Input("dropdown-2", "value"),
Input("session-id", "children"),
)
def dropdown_2(value, session_id):
call_counts["dropdown_2"].value += 1
return options
@app.callback(
Output("output", "children"),
Input("dropdown-1", "value"),
Input("dropdown-2", "value"),
)
def set_output(v1, v2):
return (v1 or "b") + (v2 or "b")
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output", "bb")
assert call_counts["dropdown_1"].value == 1
assert call_counts["dropdown_2"].value == 1
assert not dash_duo.get_logs()
def test_cbmt011_callbacks_triggered_on_generated_output(dash_duo):
app = Dash(__name__, suppress_callback_exceptions=True)
call_counts = {"tab1": Value("i", 0), "tab2": Value("i", 0)}
app.layout = html.Div(
[
dcc.Dropdown(
id="outer-controls",
options=[{"label": i, "value": i} for i in ["a", "b"]],
value="a",
),
dcc.RadioItems(
options=[
{"label": "Tab 1", "value": 1},
{"label": "Tab 2", "value": 2},
],
value=1,
id="tabs",
),
html.Div(id="tab-output"),
]
)
@app.callback(Output("tab-output", "children"), Input("tabs", "value"))
def display_content(value):
return html.Div([html.Div(id="tab-{}-output".format(value))])
@app.callback(Output("tab-1-output", "children"), Input("outer-controls", "value"))
def display_tab1_output(value):
call_counts["tab1"].value += 1
return 'Selected "{}" in tab 1'.format(value)
@app.callback(Output("tab-2-output", "children"), Input("outer-controls", "value"))
def display_tab2_output(value):
call_counts["tab2"].value += 1
return 'Selected "{}" in tab 2'.format(value)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#tab-output", 'Selected "a" in tab 1')
dash_duo.wait_for_text_to_equal("#tab-1-output", 'Selected "a" in tab 1')
assert call_counts["tab1"].value == 1
assert call_counts["tab2"].value == 0
dash_duo.find_elements('input[type="radio"]')[1].click()
dash_duo.wait_for_text_to_equal("#tab-output", 'Selected "a" in tab 2')
dash_duo.wait_for_text_to_equal("#tab-2-output", 'Selected "a" in tab 2')
assert call_counts["tab1"].value == 1
assert call_counts["tab2"].value == 1
assert not dash_duo.get_logs()
@pytest.mark.parametrize("generate", [False, True])
def test_cbmt012_initialization_with_overlapping_outputs(generate, dash_duo):
app = Dash(__name__, suppress_callback_exceptions=generate)
block = html.Div(
[
html.Div(id="input-1", children="input-1"),
html.Div(id="input-2", children="input-2"),
html.Div(id="input-3", children="input-3"),
html.Div(id="input-4", children="input-4"),
html.Div(id="input-5", children="input-5"),
html.Div(id="output-1"),
html.Div(id="output-2"),
html.Div(id="output-3"),
html.Div(id="output-4"),
]
)
call_counts = {
"container": Value("i", 0),
"output-1": Value("i", 0),
"output-2": Value("i", 0),
"output-3": Value("i", 0),
"output-4": Value("i", 0),
}
if generate:
app.layout = html.Div([html.Div(id="input"), html.Div(id="container")])
@app.callback(Output("container", "children"), Input("input", "children"))
def set_content(_):
call_counts["container"].value += 1
return block
else:
app.layout = block
def generate_callback(outputid):
def callback(*args):
call_counts[outputid].value += 1
return "{}, {}".format(*args)
return callback
for i in range(1, 5):
outputid = "output-{}".format(i)
app.callback(
Output(outputid, "children"),
Input("input-{}".format(i), "children"),
Input("input-{}".format(i + 1), "children"),
)(generate_callback(outputid))
dash_duo.start_server(app)
for i in range(1, 5):
outputid = "output-{}".format(i)
dash_duo.wait_for_text_to_equal(
"#{}".format(outputid), "input-{}, input-{}".format(i, i + 1)
)
assert call_counts[outputid].value == 1
assert call_counts["container"].value == (1 if generate else 0)
| plotly/dash | tests/integration/callbacks/test_multiple_callbacks.py | Python | mit | 18,497 |
import unittest
from os import path
class RepoFiles(unittest.TestCase):
FILES = [
['./Dockerfile', './docker/Dockerfile'],
['./docker-compose.yml', './docker/docker-compose.yml'],
['./.env_sample'],
['./.gitignore'],
['./CHANGELOG.md'],
['./CODE_OF_CONDUCT.md'],
['./CONTRIBUTING.md'],
['./ISSUE_TEMPLATE.md'],
['./LICENSE'],
['./PULL_REQUEST_TEMPLATE.md'],
['./README.rst'],
['./TROUBLESHOOTING.md'],
['./USAGE.md'],
['./VERSION.txt']
]
def _all_file(self, files):
"""
Checks the list of files and sees if they exist. If all of them don't
exist, returns False. Otherwise, return True.
"""
return all(map(lambda f: not path.isfile(f), files))
def test_file_existence(self):
missing = list(filter(self._all_file, self.FILES))
self.assertEqual(len(missing), 0,
"Files {} aren't found".format(missing)
)
| sendgrid/python-http-client | tests/test_repofiles.py | Python | mit | 1,039 |
def get_service(context):
return context.services["emulator"]
def get_data(context):
return get_service(context).data
def get_search(context):
return get_service(context).data.search
def get_system(key, context):
return get_service(context).data.systems[key]
def get_systems(context):
return [d for (k, d) in get_system_items(context)]
def get_system_items(context):
return sorted(get_service(context).data.systems.items())
def get_bookmarks(context):
return get_service(context).data.bookmarks
def get_mru(context):
return get_service(context).data.mru
| dstenb/pylaunchr-emulator | emulator/utils.py | Python | mit | 600 |
"""
Module for adding stimulations to networks
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from numbers import Number
try:
basestring
except NameError:
basestring = str
# -----------------------------------------------------------------------------
# Add stims
# -----------------------------------------------------------------------------
def addStims(self):
"""
Function for/to <short description of `netpyne.network.stim.addStims`>
Parameters
----------
self : <type>
<Short description of self>
**Default:** *required*
"""
from .. import sim
sim.timing('start', 'stimsTime')
if self.params.stimSourceParams and self.params.stimTargetParams:
if sim.rank==0:
print('Adding stims...')
if sim.nhosts > 1: # Gather tags from all cells
allCellTags = sim._gatherAllCellTags()
else:
allCellTags = {cell.gid: cell.tags for cell in self.cells}
# allPopTags = {i: pop.tags for i,pop in enumerate(self.pops)} # gather tags from pops so can connect NetStim pops
sources = self.params.stimSourceParams
for targetLabel, target in self.params.stimTargetParams.items(): # for each target parameter set
if 'sec' not in target: target['sec'] = None # if section not specified, make None (will be assigned to first section in cell)
if 'loc' not in target: target['loc'] = None # if location not specified, make None
source = sources.get(target['source'])
postCellsTags = allCellTags
for condKey,condValue in target['conds'].items(): # Find subset of cells that match postsyn criteria
if condKey in ['x','y','z','xnorm','ynorm','znorm']:
postCellsTags = {gid: tags for (gid,tags) in postCellsTags.items() if condValue[0] <= tags.get(condKey, None) < condValue[1]} # dict with post Cell objects} # dict with pre cell tags
elif condKey == 'cellList':
pass
elif isinstance(condValue, list):
postCellsTags = {gid: tags for (gid,tags) in postCellsTags.items() if tags.get(condKey, None) in condValue} # dict with post Cell objects
else:
postCellsTags = {gid: tags for (gid,tags) in postCellsTags.items() if tags.get(condKey, None) == condValue} # dict with post Cell objects
# subset of cells from selected pops (by relative indices)
if 'cellList' in target['conds']:
orderedPostGids = sorted(postCellsTags.keys())
gidList = [orderedPostGids[i] for i in target['conds']['cellList']]
postCellsTags = {gid: tags for (gid,tags) in postCellsTags.items() if gid in gidList}
# initialize randomizer in case used in string-based function (see issue #89 for more details)
self.rand.Random123(sim.hashStr('stim_'+source['type']),
sim.hashList(sorted(postCellsTags)),
sim.cfg.seeds['stim'])
# calculate params if string-based funcs
strParams = self._stimStrToFunc(postCellsTags, source, target)
# loop over postCells and add stim target
for postCellGid in postCellsTags: # for each postsyn cell
if postCellGid in self.gid2lid: # check if postsyn is in this node's list of gids
postCell = self.cells[sim.net.gid2lid[postCellGid]] # get Cell object
# stim target params
params = {}
params['label'] = targetLabel
params['source'] = target['source']
params['sec'] = strParams['secList'][postCellGid] if 'secList' in strParams else target['sec']
params['loc'] = strParams['locList'][postCellGid] if 'locList' in strParams else target['loc']
if source['type'] == 'NetStim': # for NetStims add weight+delay or default values
params['weight'] = strParams['weightList'][postCellGid] if 'weightList' in strParams else target.get('weight', 1.0)
params['delay'] = strParams['delayList'][postCellGid] if 'delayList' in strParams else target.get('delay', 1.0)
params['synsPerConn'] = strParams['synsPerConnList'][postCellGid] if 'synsPerConnList' in strParams else target.get('synsPerConn', 1)
params['synMech'] = target.get('synMech', None)
for p in ['Weight', 'Delay', 'loc']:
if 'synMech'+p+'Factor' in target:
params['synMech'+p+'Factor'] = target.get('synMech'+p+'Factor')
if 'originalFormat' in source and source['originalFormat'] == 'NeuroML2':
if 'weight' in target:
params['weight'] = target['weight']
for sourceParam in source: # copy source params
params[sourceParam] = strParams[sourceParam+'List'][postCellGid] if sourceParam+'List' in strParams else source.get(sourceParam)
if source['type'] == 'NetStim':
self._addCellStim(params, postCell) # call method to add connections (sort out synMechs first)
else:
postCell.addStim(params) # call cell method to add connection
print((' Number of stims on node %i: %i ' % (sim.rank, sum([len(cell.stims) for cell in self.cells]))))
sim.pc.barrier()
sim.timing('stop', 'stimsTime')
if sim.rank == 0 and sim.cfg.timing: print((' Done; cell stims creation time = %0.2f s.' % sim.timingData['stimsTime']))
return [cell.stims for cell in self.cells]
# -----------------------------------------------------------------------------
# Set parameters and add stim
# -----------------------------------------------------------------------------
def _addCellStim(self, stimParam, postCell):
# convert synMech param to list (if not already)
if not isinstance(stimParam.get('synMech'), list):
stimParam['synMech'] = [stimParam.get('synMech')]
# generate dict with final params for each synMech
paramPerSynMech = ['weight', 'delay', 'loc']
finalParam = {}
for i, synMech in enumerate(stimParam.get('synMech')):
for param in paramPerSynMech:
finalParam[param+'SynMech'] = stimParam.get(param)
if len(stimParam['synMech']) > 1:
if isinstance (stimParam.get(param), list): # get weight from list for each synMech
finalParam[param+'SynMech'] = stimParam[param][i]
elif 'synMech'+param.title()+'Factor' in stimParam: # adapt weight for each synMech
finalParam[param+'SynMech'] = stimParam[param] * stimParam['synMech'+param.title()+'Factor'][i]
params = {k: stimParam.get(k) for k,v in stimParam.items()}
params['synMech'] = synMech
params['loc'] = finalParam['locSynMech']
params['weight'] = finalParam['weightSynMech']
params['delay'] = finalParam['delaySynMech']
postCell.addStim(params=params)
# -----------------------------------------------------------------------------
# Convert stim param string to function
# -----------------------------------------------------------------------------
def _stimStrToFunc(self, postCellsTags, sourceParams, targetParams):
# list of params that have a function passed in as a string
#params = sourceParams+targetParams
params = sourceParams.copy()
params.update(targetParams)
paramsStrFunc = [param for param in self.stimStringFuncParams+self.connStringFuncParams
if param in params and isinstance(params[param], basestring) and params[param] not in ['variable']]
# dict to store correspondence between string and actual variable
dictVars = {}
dictVars['post_x'] = lambda postConds: postConds['x']
dictVars['post_y'] = lambda postConds: postConds['y']
dictVars['post_z'] = lambda postConds: postConds['z']
dictVars['post_xnorm'] = lambda postConds: postConds['xnorm']
dictVars['post_ynorm'] = lambda postConds: postConds['ynorm']
dictVars['post_znorm'] = lambda postConds: postConds['znorm']
dictVars['rand'] = lambda unused1: self.rand
# add netParams variables
for k,v in self.params.__dict__.items():
if isinstance(v, Number):
dictVars[k] = v
# for each parameter containing a function, calculate lambda function and arguments
strParams = {}
for paramStrFunc in paramsStrFunc:
strFunc = params[paramStrFunc] # string containing function
for randmeth in self.stringFuncRandMethods: strFunc = strFunc.replace(randmeth, 'rand.'+randmeth) # append rand. to h.Random() methods
strVars = [var for var in list(dictVars.keys()) if var in strFunc and var+'norm' not in strFunc] # get list of variables used (eg. post_ynorm or dist_xyz)
lambdaStr = 'lambda ' + ','.join(strVars) +': ' + strFunc # convert to lambda function
lambdaFunc = eval(lambdaStr)
# store lambda function and func vars in connParam (for weight, delay and synsPerConn since only calculated for certain conns)
params[paramStrFunc+'Func'] = lambdaFunc
params[paramStrFunc+'FuncVars'] = {strVar: dictVars[strVar] for strVar in strVars}
# replace lambda function (with args as dict of lambda funcs) with list of values
strParams[paramStrFunc+'List'] = {postGid: params[paramStrFunc+'Func'](**{k:v if isinstance(v, Number) else v(postCellTags) for k,v in params[paramStrFunc+'FuncVars'].items()})
for postGid,postCellTags in sorted(postCellsTags.items())}
return strParams
| Neurosim-lab/netpyne | netpyne/network/stim.py | Python | mit | 10,067 |
import src.executor as executor
import src.dqn as dqn
import src.game_data as game_data
import src.graph as graph
import src.environment as environment
import numpy as np
import unittest
class TestIntegration(unittest.TestCase):
def setUp(self):
self.executor = executor.Executor()
self.dqn = dqn.Agent()
self.game_data = game_data.GameBatchData()
self.graph = graph.Graph()
self.environment = environment.GymEnvironment('Pong-vo')
def test_integration_game_data_and_executor(self):
print("Integration testing of game data and executor module")
self.executor.new_game(11111)
self.executor.add_screenshots(np.array([[100, 100], [100, 100]]))
self.executor._generate_gif(np.array([[100, 100], [100, 100]]))
self.executor.write_csv(['h1', 'h2'], [[1, 2], [2, 3]], [1, 2])
print("Successful Integration testing of game data and executor module")
def test_integration_environment_and_graph_execution(self):
print("Integration testing of game data and executor module")
self.environment.actions()
self.environment.action_space()
self.environment.make()
self.environment.render()
self.environment.step(0)
s, q_values, model = self.agent.build_network()
self.trainable_weights = model.trainable_weights
a, y, loss, grads_update = self.agent.build_training_op(self.trainable_weights)
stack = self.agent.get_initial_state(np.array([[100, 100], [100, 100]]))
action, action_value = self.agent.get_action(np.array([[100, 100], [100, 100]]))
summary_placeholders, update_ops, summary_op = self.agent.setup_summary()
self.agent.load_network()
stack = dqn.preprocess(np.array([100,100]),np.array([[100, 100], [100, 100]]))
print(s)
print(q_values)
print(model)
print(a)
print(y)
print(loss)
print(grads_update)
print(stack)
print(action)
print(action_value)
print(summary_placeholders)
print(update_ops)
print(summary_op)
print(stack)
print("Successful Integration testing of game data and executor module")
if __name__ == '__main__':
unittest.main() | escoboard/dqn | test/test_integration.py | Python | mit | 2,279 |
# -*- coding: utf-8 -*-
'''
canteen model tests
~~~~~~~~~~~~~~~~~~~
tests canteen's data modelling layer.
:author: Sam Gammon <[email protected]>
:copyright: (c) Keen IO, 2013
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
'''
# stdlib
import os
# canteen tests
from canteen.test import FrameworkTest
## ModelExportTests
class ModelExportTests(FrameworkTest):
''' Tests objects exported by `model`. '''
def test_concrete(self):
''' Test that we can import concrete classes. '''
try:
from canteen import model
from canteen.model import Key
from canteen.model import Model
from canteen.model import Property
from canteen.model import AbstractKey
from canteen.model import AbstractModel
except ImportError: # pragma: no cover
return self.fail("Failed to import concrete classes exported by Model.")
else:
self.assertTrue(Key) # must export Key
self.assertTrue(Model) # must export Model
self.assertTrue(Property) # must export Property
self.assertTrue(AbstractKey) # must export AbstractKey
self.assertTrue(AbstractModel) # must export AbstractModel
self.assertIsInstance(model, type(os)) # must be a module (lol)
| mindis/canteen | canteen_tests/test_model/__init__.py | Python | mit | 1,361 |
from bibliopixel import *
from bibliopixel.animation import BaseAnimation, AnimationQueue, OffAnim
from util import *
from static_objects import *
import loader
import config
import status
import traceback
import globals
class BPManager:
def __init__(self, off_timeout):
self.driver = []
self._driverCfg = None
self.led = None
self._ledCfg = None
self.anim = None
self._animCfg = None
self.drivers = {}
self._driverClasses = {}
self._driverNames = {}
self.controllers = {}
self._contClasses = {}
self._contNames = {}
self.anims = {}
self._animClasses = {}
self._animParams = {}
self._animNames = {}
self._preConfigs = {}
self._preNames = {}
self.animRunParams = BaseAnimation.RUN_PARAMS
self._off_timeout = off_timeout
self._offAnim = None
self.__loadFuncs = {
"driver": self.__loadDriverDef,
"controller": self.__loadControllerDef,
"animation": self.__loadAnimDef,
"preset": self.__loadPresetDef
}
config.initConfig()
def __genModObj(self, config):
if "desc" not in config:
config.desc = ""
if "presets" not in config:
config.presets = []
if "params" not in config:
config.params = []
for p in config.presets:
p.id = config.id
c = {
"display": config.display,
"desc": config.desc,
"params": config.params,
"presets": config.presets,
}
c = d(c)
if config.type == "controller" or (config.type == "preset" and config.preset_type == "controller"):
c.control_type = config.control_type
return c
def __loadDriverDef(self, config):
config = d(config)
self._driverClasses[config.id] = config['class']
self.drivers[config.id] = self.__genModObj(config)
self._driverNames[config.id] = config.display
def __loadControllerDef(self, config):
config = d(config)
self._contClasses[config.id] = config['class']
self.controllers[config.id] = self.__genModObj(config)
self._contNames[config.id] = config.display
def __addToAnims(self, config, c):
cont = config.controller
for p in c.presets:
p.type = cont
p.locked = True
if not cont in self.anims:
self.anims[cont] = {}
self.anims[cont][config.id] = c
self._animNames[config.id] = config.display
params = {}
for p in config.params:
params[p.id] = p
self._animParams[config.id] = params
def __loadAnimDef(self, config):
config = d(config)
self._animClasses[config.id] = config['class']
self.__addToAnims(config, self.__genModObj(config))
def __loadPresetDef(self, config):
config = d(config)
config.id = "*!#_" + config.id
config.display = "* " + config.display
self._preConfigs[config.id] = {
"class": config['class'],
"preconfig": config.preconfig
}
self._preNames[config.id] = config.display
c = self.__genModObj(config)
if config.preset_type == "driver":
self.drivers[config.id] = c
self._driverNames[config.id] = config.display
elif config.preset_type == "controller":
self.controllers[config.id] = c
self._contNames[config.id] = config.display
elif config.preset_type == "animation":
self.__addToAnims(config, c)
else:
return
# TODO: Add check for required fields for better errors
def loadModules(self, mods):
for m in mods:
if hasattr(m, 'MANIFEST'):
status.pushStatus("Loading: {}".format(m.__file__))
for ref in m.MANIFEST:
ref = d(ref)
if ref.type in self.__loadFuncs:
try:
self.__loadFuncs[ref.type](ref)
except:
status.pushStatus(
"Load module failure: {} - {}".format(m.__file__, traceback.format_exc()))
def loadBaseMods(self):
self.loadModules(moduleList)
def loadMods(self):
mod_dirs = globals._server_config.mod_dirs
for d in (mod_dirs + globals._bpa_dirs):
self.loadModules(loader.load_folder(d))
def __getInstance(self, config, inst_type):
config = d(config)
params = d(config.config)
result = None
obj = None
preconfig = None
if config.id in self._preConfigs:
p = self._preConfigs[config.id]
obj = p['class']
preconfig = p['preconfig']
else:
if inst_type == "driver":
if config.id in self._driverClasses:
obj = self._driverClasses[config.id]
elif inst_type == "controller":
if config.id in self._contClasses:
obj = self._contClasses[config.id]
elif inst_type == "animation":
if config.id in self._animClasses:
obj = self._animClasses[config.id]
if not obj:
raise Exception("Invalid " + inst_type)
if preconfig:
if hasattr(preconfig, '__call__'):
preconfig = preconfig()
params.upgrade(preconfig)
return (obj, params)
def _startOffAnim(self):
if self._off_timeout > 0:
if self._offAnim == None and self.led != None:
self._offAnim = OffAnim(self.led)
self.anim = OffAnim(self.led, timeout=self._off_timeout)
self.anim.run(threaded=True)
def startConfig(self, driverConfig, ledConfig):
self.stopConfig()
self._driverCfg = driverConfig
self._ledCfg = d(ledConfig)
ctype = ""
if self._ledCfg.id in self.controllers:
ctype = self.controllers[self._ledCfg.id].control_type
self._ledCfg.control_type = ctype
config.writeConfig("current_setup", self._driverCfg, "driver")
config.writeConfig("current_setup", self._ledCfg, "controller")
try:
status.pushStatus("Starting config...")
self.driver = []
for drv in self._driverCfg:
obj, params = self.__getInstance(d(drv), "driver")
self.driver.append(obj(**(params)))
obj, params = self.__getInstance(self._ledCfg, "controller")
params['driver'] = self.driver
self.led = obj(**(params))
self._startOffAnim()
status.pushStatus("Config start success!")
return success()
except Exception, e:
self.stopConfig()
status.pushError("Config start failure! {}".format(
traceback.format_exc()))
return fail(str(e), error=ErrorCode.BP_ERROR, data=None)
def getConfig(self):
setup = d(config.readConfig("current_setup"))
# setup = d({
# "driver": self._driverCfg,
# "controller": self._ledCfg
# })
if not ("driver" in setup):
setup.driver = None
if not ("controller" in setup):
setup.controller = None
setup.running = self.led != None and len(self.driver) > 0
return setup
def stopConfig(self):
status.pushStatus("Stopping current config")
self.stopAnim(doOff=False)
if len(self.driver) > 0:
for drv in self.driver:
drv.cleanup()
self.driver = []
self._driverCfg = None
if self.led:
self.led.cleanup()
self.led = None
self._ledCfg = None
self._offAnim = None
def stopAnim(self, doOff=True):
if self.anim:
try:
self.anim.cleanup()
except Exception, e:
status.pushError(e)
self.anim = None
self._animCfg = None
if doOff:
self._startOffAnim()
def startAnim(self, config):
def getAnim(c):
cfg = d(c['config'])
run = d(c['run'])
cfg.led = self.led
c['config'] = cfg
p = self._animParams[c.id]
for k in cfg:
if k in p:
pd = p[k]
if pd.type == "color":
cfg[k] = tuple(cfg[k])
elif pd.type == "multi_tuple" or pd.type == "multi":
if isinstance(pd.controls, list):
for i in range(len(pd.controls)):
if pd.controls[i].type == "color":
cfg[k][i] == tuple(cfg[k][i])
elif isinstance(pd.controls, dict):
if pd.controls.type == "color":
temp = []
for x in cfg[k]:
temp.append(tuple(x))
cfg[k] = temp
if pd.type == "multi_tuple":
cfg[k] = tuple(cfg[k])
obj, params = self.__getInstance(c, "animation")
anim = obj(**(params))
return anim, d(run)
try:
if not(self.led != None and len(self.driver) > 0):
return fail(msg="Output config not started! Please start first.")
self.stopAnim(doOff=False)
self._animCfg = config
if('queue' in config):
q = config['queue']
run = d(config['run'])
run.threaded = True
self.anim = AnimationQueue(self.led)
for a in q:
anim, r = getAnim(a)
self.anim.addAnim(
anim=anim,
amt=r.amt,
fps=r.fps,
max_steps=r.max_steps,
untilComplete=r.untilComplete,
max_cycles=r.max_cycles,
seconds=r.seconds)
status.pushStatus("Starting Animation Queue")
self.anim.run(**(run))
return success()
else:
self.anim, run = getAnim(config)
run.threaded = True
status.pushStatus("Starting Animation: {}".format(
self._animNames[config.id]))
self.anim.run(**(run))
return success()
except Exception, e:
status.pushError(traceback.format_exc())
return fail("Failure starting animation: " + str(e), error=ErrorCode.BP_ERROR, data=None)
| ManiacalLabs/PixelWeb | pixelweb/bpmanager.py | Python | mit | 11,336 |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TModel(models.Model):
name = models.CharField(max_length=200)
test = models.OneToOneField(
'self',
null=True,
blank=True,
related_name='related_test_models'
)
for_inline = models.ForeignKey(
'self',
null=True,
blank=True,
related_name='inline_test_models'
)
def __str__(self):
return self.name
class TestModel(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name | shubhamdipt/django-autocomplete-light | test_project/select2_one_to_one/models.py | Python | mit | 647 |
import pytest
from aiohttp_json_api.common import JSONAPI_CONTENT_TYPE
class TestDocumentStructure:
"""Document Structure"""
@pytest.mark.parametrize(
'resource_type',
('authors', 'books', 'chapters', 'photos', 'stores')
)
async def test_response_by_json_schema(self, fantasy_client,
jsonapi_validator, resource_type):
response = await fantasy_client.get(f'/api/{resource_type}')
json = await response.json(content_type=JSONAPI_CONTENT_TYPE)
assert jsonapi_validator.is_valid(json)
| vovanbo/aiohttp_json_api | tests/integration/test_document_structure.py | Python | mit | 587 |
from collections import defaultdict
from collections import namedtuple
from itertools import izip
from enum import Enum
from tilequeue.process import Source
def namedtuple_with_defaults(name, props, defaults):
t = namedtuple(name, props)
t.__new__.__defaults__ = defaults
return t
class LayerInfo(namedtuple_with_defaults(
'LayerInfo', 'min_zoom_fn props_fn shape_types', (None,))):
def allows_shape_type(self, shape):
if self.shape_types is None:
return True
typ = shape_type_lookup(shape)
return typ in self.shape_types
class ShapeType(Enum):
point = 1
line = 2
polygon = 3
# aliases, don't use these directly!
multipoint = 1
linestring = 2
multilinestring = 2
multipolygon = 3
@classmethod
def parse_set(cls, inputs):
outputs = set()
for value in inputs:
t = cls[value.lower()]
outputs.add(t)
return outputs or None
# determine the shape type from the raw WKB bytes. this means we don't have to
# parse the WKB, which can be an expensive operation for large polygons.
def wkb_shape_type(wkb):
reverse = ord(wkb[0]) == 1
type_bytes = map(ord, wkb[1:5])
if reverse:
type_bytes.reverse()
typ = type_bytes[3]
if typ == 1 or typ == 4:
return ShapeType.point
elif typ == 2 or typ == 5:
return ShapeType.line
elif typ == 3 or typ == 6:
return ShapeType.polygon
else:
assert False, 'WKB shape type %d not understood.' % (typ,)
def deassoc(x):
"""
Turns an array consisting of alternating key-value pairs into a
dictionary.
Osm2pgsql stores the tags for ways and relations in the planet_osm_ways and
planet_osm_rels tables in this format. Hstore would make more sense now,
but this encoding pre-dates the common availability of hstore.
Example:
>>> from raw_tiles.index.util import deassoc
>>> deassoc(['a', 1, 'b', 'B', 'c', 3.14])
{'a': 1, 'c': 3.14, 'b': 'B'}
"""
pairs = [iter(x)] * 2
return dict(izip(*pairs))
# fixtures extend metadata to include ways and relations for the feature.
# this is unnecessary for SQL, as the ways and relations tables are
# "ambiently available" and do not need to be passed in arguments.
class Metadata(object):
def __init__(self, source, ways, relations):
assert source is None or isinstance(source, Source)
self.source = source and source.name
self.ways = ways
self.relations = relations
class Table(namedtuple('Table', 'source rows')):
def __init__(self, source, rows):
super(Table, self).__init__(source, rows)
assert isinstance(source, Source)
def shape_type_lookup(shape):
typ = shape.geom_type
if typ.startswith('Multi'):
typ = typ[len('Multi'):]
return typ.lower()
# list of road types which are likely to have buses on them. used to cut
# down the number of queries the SQL used to do for relations. although this
# isn't necessary for fixtures, we replicate the logic to keep the behaviour
# the same.
BUS_ROADS = set([
'motorway', 'motorway_link', 'trunk', 'trunk_link', 'primary',
'primary_link', 'secondary', 'secondary_link', 'tertiary',
'tertiary_link', 'residential', 'unclassified', 'road', 'living_street',
])
class Relation(object):
def __init__(self, obj):
self.id = obj['id']
self.tags = deassoc(obj['tags'])
way_off = obj['way_off']
rel_off = obj['rel_off']
self.node_ids = obj['parts'][0:way_off]
self.way_ids = obj['parts'][way_off:rel_off]
self.rel_ids = obj['parts'][rel_off:]
def mz_is_interesting_transit_relation(tags):
public_transport = tags.get('public_transport')
typ = tags.get('type')
return public_transport in ('stop_area', 'stop_area_group') or \
typ in ('stop_area', 'stop_area_group', 'site')
# starting with the IDs in seed_relations, recurse up the transit relations
# of which they are members. returns the set of all the relation IDs seen
# and the "root" relation ID, which was the "furthest" relation from any
# leaf relation.
def mz_recurse_up_transit_relations(seed_relations, osm):
root_relation_ids = set()
root_relation_level = 0
all_relations = set()
for rel_id in seed_relations:
front = set([rel_id])
seen = set([rel_id])
level = 0
if root_relation_level == 0:
root_relation_ids.add(rel_id)
while front:
new_rels = set()
for r in front:
new_rels |= osm.transit_relations(r)
new_rels -= seen
level += 1
if new_rels and level > root_relation_level:
root_relation_ids = new_rels
root_relation_level = level
elif new_rels and level == root_relation_level:
root_relation_ids |= new_rels
front = new_rels
seen |= front
all_relations |= seen
root_relation_id = min(root_relation_ids) if root_relation_ids else None
return all_relations, root_relation_id
# extract a name for a transit route relation. this can expand comma
# separated lists and prefers to use the ref rather than the name.
def mz_transit_route_name(tags):
# prefer ref as it's less likely to contain the destination name
name = tags.get('ref')
if not name:
name = tags.get('name')
if name:
name = name.strip()
return name
def is_station_or_stop(fid, shape, props):
'Returns true if the given (point) feature is a station or stop.'
return (
props.get('railway') in ('station', 'stop', 'tram_stop') or
props.get('public_transport') in ('stop', 'stop_position', 'tram_stop')
)
def is_station_or_line(fid, shape, props):
"""
Returns true if the given (line or polygon from way) feature is a station
or transit line.
"""
railway = props.get('railway')
return railway in ('subway', 'light_rail', 'tram', 'rail')
Transit = namedtuple(
'Transit', 'score root_relation_id '
'trains subways light_rails trams railways')
def mz_calculate_transit_routes_and_score(osm, node_id, way_id, rel_id):
candidate_relations = set()
if node_id:
candidate_relations.update(osm.relations_using_node(node_id))
if way_id:
candidate_relations.update(osm.relations_using_way(way_id))
if rel_id:
candidate_relations.add(rel_id)
seed_relations = set()
for rel_id in candidate_relations:
rel = osm.relation(rel_id)
if rel and mz_is_interesting_transit_relation(rel.tags):
seed_relations.add(rel_id)
del candidate_relations
# this complex query does two recursive sweeps of the relations
# table starting from a seed set of relations which are or contain
# the original station.
#
# the first sweep goes "upwards" from relations to "parent" relations. if
# a relation R1 is a member of relation R2, then R2 will be included in
# this sweep as long as it has "interesting" tags, as defined by the
# function mz_is_interesting_transit_relation.
#
# the second sweep goes "downwards" from relations to "child" relations.
# if a relation R1 has a member R2 which is also a relation, then R2 will
# be included in this sweep as long as it also has "interesting" tags.
all_relations, root_relation_id = mz_recurse_up_transit_relations(
seed_relations, osm)
del seed_relations
# collect all the interesting nodes - this includes the station node (if
# any) and any nodes which are members of found relations which have
# public transport tags indicating that they're stations or stops.
stations_and_stops = set()
for rel_id in all_relations:
rel = osm.relation(rel_id)
if not rel:
continue
for node_id in rel.node_ids:
node = osm.node(node_id)
if node and is_station_or_stop(*node):
stations_and_stops.add(node_id)
if node_id:
stations_and_stops.add(node_id)
# collect any physical railway which includes any of the above
# nodes.
stations_and_lines = set()
for node_id in stations_and_stops:
for way_id in osm.ways_using_node(node_id):
way = osm.way(way_id)
if way and is_station_or_line(*way):
stations_and_lines.add(way_id)
if way_id:
stations_and_lines.add(way_id)
# collect all IDs together in one array to intersect with the parts arrays
# of route relations which may include them.
all_routes = set()
for lookup, ids in ((osm.relations_using_node, stations_and_stops),
(osm.relations_using_way, stations_and_lines),
(osm.relations_using_rel, all_relations)):
for i in ids:
for rel_id in lookup(i):
rel = osm.relation(rel_id)
if rel and \
rel.tags.get('type') == 'route' and \
rel.tags.get('route') in ('subway', 'light_rail', 'tram',
'train', 'railway'):
all_routes.add(rel_id)
routes_lookup = defaultdict(set)
for rel_id in all_routes:
rel = osm.relation(rel_id)
if not rel:
continue
route = rel.tags.get('route')
if route:
route_name = mz_transit_route_name(rel.tags)
routes_lookup[route].add(route_name)
trains = list(sorted(routes_lookup['train']))
subways = list(sorted(routes_lookup['subway']))
light_rails = list(sorted(routes_lookup['light_rail']))
trams = list(sorted(routes_lookup['tram']))
railways = list(sorted(routes_lookup['railway']))
del routes_lookup
# if a station is an interchange between mainline rail and subway or
# light rail, then give it a "bonus" boost of importance.
bonus = 2 if trains and (subways or light_rails) else 1
score = (100 * min(9, bonus * len(trains)) +
10 * min(9, bonus * (len(subways) + len(light_rails))) +
min(9, len(trams) + len(railways)))
return Transit(score=score, root_relation_id=root_relation_id,
trains=trains, subways=subways, light_rails=light_rails,
railways=railways, trams=trams)
_TAG_NAME_ALTERNATES = (
'name',
'int_name',
'loc_name',
'nat_name',
'official_name',
'old_name',
'reg_name',
'short_name',
'name_left',
'name_right',
'name:short',
)
_ALT_NAME_PREFIX_CANDIDATES = (
'name:left:', 'name:right:', 'name:', 'alt_name:', 'old_name:'
)
# given a dictionary of key-value properties, returns a list of all the keys
# which represent names. this is used to assign all the names to a single
# layer. this makes sure that when we generate multiple features from a single
# database record, only one feature gets named and labelled.
def name_keys(props):
name_keys = []
for k in props.keys():
is_name_key = k in _TAG_NAME_ALTERNATES
if not is_name_key:
for prefix in _ALT_NAME_PREFIX_CANDIDATES:
if k.startswith(prefix):
is_name_key = True
break
if is_name_key:
name_keys.append(k)
return name_keys
_US_ROUTE_MODIFIERS = set([
'Business',
'Spur',
'Truck',
'Alternate',
'Bypass',
'Connector',
'Historic',
'Toll',
'Scenic',
])
# properties for a feature (fid, shape, props) in layer `layer_name` at zoom
# level `zoom`. also takes an `osm` parameter, which is an object which can
# be used to look up nodes, ways and relations and the relationships between
# them.
def layer_properties(fid, shape, props, layer_name, zoom, osm):
layer_props = props.copy()
# drop the 'source' tag, if it exists. we override it anyway, and it just
# gets confusing having multiple source tags. in the future, we may
# replace the whole thing with a separate 'meta' for source.
layer_props.pop('source', None)
# need to make sure that the name is only applied to one of
# the pois, landuse or buildings layers - in that order of
# priority.
if layer_name in ('pois', 'landuse', 'buildings'):
for key in name_keys(layer_props):
layer_props.pop(key, None)
# urgh, hack!
if layer_name == 'water' and shape.geom_type == 'Point':
layer_props['label_placement'] = True
if shape.geom_type in ('Polygon', 'MultiPolygon'):
layer_props['area'] = shape.area
if layer_name == 'roads' and \
shape.geom_type in ('LineString', 'MultiLineString') and \
fid >= 0:
mz_networks = []
mz_cycling_networks = set()
mz_is_bus_route = False
for rel_id in osm.relations_using_way(fid):
rel = osm.relation(rel_id)
if not rel:
continue
typ, route, network, ref, modifier = [rel.tags.get(k) for k in (
'type', 'route', 'network', 'ref', 'modifier')]
# the `modifier` tag gives extra information about the route, but
# we want that information to be part of the `network` property.
if network and modifier:
modifier = modifier.capitalize()
us_network = network.startswith('US:')
us_route_modifier = modifier in _US_ROUTE_MODIFIERS
# don't want to add the suffix if it's already there.
suffix = ':' + modifier
needs_suffix = suffix not in network
if us_network and us_route_modifier and needs_suffix:
network += suffix
if route and (network or ref):
mz_networks.extend([route, network, ref])
if typ == 'route' and \
route in ('hiking', 'foot', 'bicycle') and \
network in ('icn', 'ncn', 'rcn', 'lcn'):
mz_cycling_networks.add(network)
if typ == 'route' and route in ('bus', 'trolleybus'):
mz_is_bus_route = True
mz_cycling_network = None
for cn in ('icn', 'ncn', 'rcn', 'lcn'):
if layer_props.get(cn) == 'yes' or \
('%s_ref' % cn) in layer_props or \
cn in mz_cycling_networks:
mz_cycling_network = cn
break
if mz_is_bus_route and \
zoom >= 12 and \
layer_props.get('highway') in BUS_ROADS:
layer_props['is_bus_route'] = True
layer_props['mz_networks'] = mz_networks
if mz_cycling_network:
layer_props['mz_cycling_network'] = mz_cycling_network
is_poi = layer_name == 'pois'
is_railway_station = props.get('railway') == 'station'
is_point_or_poly = shape.geom_type in (
'Point', 'MultiPoint', 'Polygon', 'MultiPolygon')
if is_poi and is_railway_station and \
is_point_or_poly:
node_id = None
way_id = None
rel_id = None
if shape.geom_type in ('Point', 'MultiPoint'):
node_id = fid
elif fid >= 0:
way_id = fid
else:
rel_id = -fid
transit = mz_calculate_transit_routes_and_score(
osm, node_id, way_id, rel_id)
layer_props['mz_transit_score'] = transit.score
layer_props['mz_transit_root_relation_id'] = (
transit.root_relation_id)
layer_props['train_routes'] = transit.trains
layer_props['subway_routes'] = transit.subways
layer_props['light_rail_routes'] = transit.light_rails
layer_props['tram_routes'] = transit.trams
return layer_props
| tilezen/tilequeue | tilequeue/query/common.py | Python | mit | 15,803 |
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
__all__ = ["book", "book_builder", "elements", "util"]
| m-wichmann/LimeTusk | limetusk/__init__.py | Python | mit | 148 |
import requests
from pprint import pprint
r = requests.get("http://pokeapi.co/api/v2/pokedex/1")
#pprint(r.json()["pokemon_entries"])
regpath = "regular/"
shinypath = "shiny/"
import os
for pkmn in r.json()["pokemon_entries"]:
id = str(pkmn["entry_number"])
name = pkmn["pokemon_species"]["name"]
for file in os.listdir(regpath):
if file.split(".")[0] == name:
os.rename(os.path.join(regpath, file), os.path.join(regpath, file.replace(name, id)))
os.rename(os.path.join(shinypath, file), os.path.join(shinypath, file.replace(name, id)))
break
| MarkSpencerTan/pokemaster_bot | img/pokemon/image_rename.py | Python | mit | 559 |
"""
Script that trains Sklearn singletask models on GDB7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
from sklearn.kernel_ridge import KernelRidge
tasks, datasets, transformers = dc.molnet.load_qm7(
featurizer='CoulombMatrix', split='stratified', move_mean=False)
train, valid, test = datasets
regression_metric = dc.metrics.Metric(
dc.metrics.mean_absolute_error, mode="regression")
def model_builder(model_dir):
sklearn_model = KernelRidge(kernel="rbf", alpha=5e-4, gamma=0.008)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder)
# Fit trained model
model.fit(train)
model.save()
train_evaluator = dc.utils.evaluate.Evaluator(model, train, transformers)
train_scores = train_evaluator.compute_model_performance([regression_metric])
print("Train scores [kcal/mol]")
print(train_scores)
test_evaluator = dc.utils.evaluate.Evaluator(model, test, transformers)
test_scores = test_evaluator.compute_model_performance([regression_metric])
print("Validation scores [kcal/mol]")
print(test_scores)
| ktaneishi/deepchem | examples/qm7/qm7_sklearn.py | Python | mit | 1,271 |
# Copyright (c) 2010-2014 Bo Lin
# Copyright (c) 2010-2014 Yanhong Annie Liu
# Copyright (c) 2010-2014 Stony Brook University
# Copyright (c) 2010-2014 The Research Foundation of SUNY
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import pickle
import random
import select
import socket
import logging
class EndPoint:
"""Represents a target for sending of messages.
This is the base class for all types of communication channels in
DistAlgo. It uniquely identifies a "node" in the distributed system. In
most scenarios, a process will only be associated with one EndPoint
instance. The 'self' keyword in DistAlgo is ultimately translated into an
instance of this class.
"""
def __init__(self, name=None, proctype=None):
if name is None:
self._name = socket.gethostname()
else:
self._name = name
self._proc = None
self._proctype = proctype
self._log = logging.getLogger("runtime.EndPoint")
self._address = None
def send(self, data, src, timestamp = 0):
pass
def recv(self, block, timeout = None):
pass
def setname(self, name):
self._name = name
def getlogname(self):
if self._address is not None:
return "%s_%s" % (self._address[0], str(self._address[1]))
else:
return self._name
def close(self):
pass
###################################################
# Make the EndPoint behave like a Process object:
def is_alive(self):
if self._proc is not None:
return self._proc.is_alive()
else:
self._log.warn("is_alive can only be called from parent process.")
return self
def join(self):
if self._proc is not None:
return self._proc.join()
else:
self._log.warn("join can only be called from parent process.")
return self
def terminate(self):
if self._proc is not None:
return self._proc.terminate()
else:
self._log.warn("terminate can only be called from parent process.")
return self
###################################################
def __getstate__(self):
return ("EndPoint", self._address, self._name, self._proctype)
def __setstate__(self, value):
proto, self._address, self._name, self._proctype = value
self._log = logging.getLogger("runtime.EndPoint")
def __str__(self):
if self._address is not None:
return str(self._address)
else:
return self._name
def __repr__(self):
if self._proctype is not None:
return "<" + self._proctype.__name__ + str(self) + ">"
else:
return "<process " + str(self) + ">"
def __hash__(self):
return hash(self._address)
def __eq__(self, obj):
if not hasattr(obj, "_address"):
return False
return self._address == obj._address
def __lt__(self, obj):
return self._address < obj._address
def __le__(self, obj):
return self._address <= obj._address
def __gt__(self, obj):
return self._address > obj._address
def __ge__(self, obj):
return self._address >= obj._address
def __ne__(self, obj):
if not hasattr(obj, "_address"):
return True
return self._address != obj._address
# TCP Implementation:
INTEGER_BYTES = 8
MAX_TCP_CONN = 200
MIN_TCP_PORT = 10000
MAX_TCP_PORT = 40000
MAX_TCP_BUFSIZE = 200000 # Maximum pickled message size
MAX_RETRY = 5
class TcpEndPoint(EndPoint):
"""Endpoint based on TCP.
"""
senders = None
receivers = None
def __init__(self, name=None, proctype=None, port=None):
super().__init__(name, proctype)
TcpEndPoint.receivers = dict()
TcpEndPoint.senders = LRU(MAX_TCP_CONN)
self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if port is None:
while True:
self._address = (self._name,
random.randint(MIN_TCP_PORT, MAX_TCP_PORT))
try:
self._conn.bind(self._address)
break
except socket.error:
pass
else:
self._address = (self._name, port)
self._conn.bind(self._address)
self._conn.listen(10)
TcpEndPoint.receivers[self._conn] = self._address
self._log = logging.getLogger("runtime.TcpEndPoint(%s)" %
super().getlogname())
self._log.debug("TcpEndPoint %s initialization complete",
str(self._address))
def send(self, data, src, timestamp = 0):
retry = 1
while True:
conn = TcpEndPoint.senders.get(self)
if conn is None:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
conn.connect(self._address)
TcpEndPoint.senders[self] = conn
except socket.error:
self._log.debug("Can not connect to %s. Peer is down.",
str(self._address))
return False
bytedata = pickle.dumps((src, timestamp, data))
l = len(bytedata)
header = int(l).to_bytes(INTEGER_BYTES, sys.byteorder)
mesg = header + bytedata
if len(mesg) > MAX_TCP_BUFSIZE:
self._log.warn("Packet size exceeded maximum buffer size! "
"Outgoing packet dropped.")
self._log.debug("Dropped packet: %s",
str((src, timestamp, data)))
break
else:
try:
if self._send_1(mesg, conn):
break
except socket.error as e:
pass
self._log.debug("Error sending packet, retrying.")
retry += 1
if retry > MAX_RETRY:
self._log.debug("Max retry count reached, reconnecting.")
conn.close()
del TcpEndPoint.senders[self]
retry = 1
self._log.debug("Sent packet %r to %r." % (data, self))
return True
def _send_1(self, data, conn):
msglen = len(data)
totalsent = 0
while totalsent < msglen:
sent = conn.send(data[totalsent:])
if sent == 0:
return False
totalsent += sent
return True
def recvmesgs(self):
try:
while True:
r, _, _ = select.select(TcpEndPoint.receivers.keys(), [], [])
if self._conn in r:
# We have pending new connections, handle the first in
# line. If there are any more they will have to wait until
# the next iteration
conn, addr = self._conn.accept()
TcpEndPoint.receivers[conn] = addr
r.remove(self._conn)
for c in r:
try:
bytedata = self._receive_1(INTEGER_BYTES, c)
datalen = int.from_bytes(bytedata, sys.byteorder)
bytedata = self._receive_1(datalen, c)
src, tstamp, data = pickle.loads(bytedata)
bytedata = None
if not isinstance(src, TcpEndPoint):
raise TypeError()
else:
yield (src, tstamp, data)
except pickle.UnpicklingError as e:
self._log.warn("UnpicklingError, packet from %s dropped",
TcpEndPoint.receivers[c])
except socket.error as e:
self._log.debug("Remote connection %s terminated.",
str(c))
del TcpEndPoint.receivers[c]
except select.error as e:
self._log.debug("select.error occured, terminating receive loop.")
def _receive_1(self, totallen, conn):
msg = bytes()
while len(msg) < totallen:
chunk = conn.recv(totallen-len(msg))
if len(chunk) == 0:
raise socket.error("EOF received")
msg += chunk
return msg
def close(self):
pass
def __getstate__(self):
return ("TCP", self._address, self._name, self._proctype)
def __setstate__(self, value):
proto, self._address, self._name, self._proctype = value
self._conn = None
self._log = logging.getLogger("runtime.TcpEndPoint(%s)" % self._name)
class Node(object):
__slots__ = ['prev', 'next', 'me']
def __init__(self, prev, me):
self.prev = prev
self.me = me
self.next = None
def __str__(self):
return str(self.me)
def __repr__(self):
return self.me.__repr__()
class LRU:
"""
Implementation of a length-limited O(1) LRU queue.
Built for and used by PyPE:
http://pype.sourceforge.net
Copyright 2003 Josiah Carlson.
"""
def __init__(self, count, pairs=[]):
self.count = max(count, 1)
self.d = {}
self.first = None
self.last = None
for key, value in pairs:
self[key] = value
def __contains__(self, obj):
return obj in self.d
def __getitem__(self, obj):
a = self.d[obj].me
self[a[0]] = a[1]
return a[1]
def __setitem__(self, obj, val):
if obj in self.d:
del self[obj]
nobj = Node(self.last, (obj, val))
if self.first is None:
self.first = nobj
if self.last:
self.last.next = nobj
self.last = nobj
self.d[obj] = nobj
if len(self.d) > self.count:
if self.first == self.last:
self.first = None
self.last = None
return
a = self.first
a.next.prev = None
self.first = a.next
a.next = None
del self.d[a.me[0]]
del a
def __delitem__(self, obj):
nobj = self.d[obj]
if nobj.prev:
nobj.prev.next = nobj.next
else:
self.first = nobj.next
if nobj.next:
nobj.next.prev = nobj.prev
else:
self.last = nobj.prev
del self.d[obj]
def __iter__(self):
cur = self.first
while cur != None:
cur2 = cur.next
yield cur.me[1]
cur = cur2
def __str__(self):
return str(self.d)
def __repr__(self):
return self.d.__repr__()
def iteritems(self):
cur = self.first
while cur != None:
cur2 = cur.next
yield cur.me
cur = cur2
def iterkeys(self):
return iter(self.d)
def itervalues(self):
for i,j in self.iteritems():
yield j
def keys(self):
return self.d.keys()
def get(self, k, d=None):
v = self.d.get(k)
if v is None: return None
a = v.me
self[a[0]] = a[1]
return a[1]
# UDP Implementation:
MIN_UDP_PORT = 10000
MAX_UDP_PORT = 40000
MAX_UDP_BUFSIZE = 20000
class UdpEndPoint(EndPoint):
sender = None
def __init__(self, name=None, proctype=None, port=None):
super().__init__(name, proctype)
UdpEndPoint.sender = None
self._conn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if port is None:
while True:
self._address = (self._name,
random.randint(MIN_UDP_PORT, MAX_UDP_PORT))
try:
self._conn.bind(self._address)
break
except socket.error:
pass
else:
self._address = (self._name, port)
self._conn.bind(self._address)
self._log = logging.getLogger("runtime.UdpEndPoint(%s)" %
super().getlogname())
self._log.debug("UdpEndPoint %s initialization complete",
str(self._address))
def send(self, data, src, timestamp = 0):
if UdpEndPoint.sender is None:
UdpEndPoint.sender = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
bytedata = pickle.dumps((src, timestamp, data))
if len(bytedata) > MAX_UDP_BUFSIZE:
self._log.warn("Data size exceeded maximum buffer size!" +
" Outgoing packet dropped.")
self._log.debug("Dropped packet: %s", str((src, timestamp, data)))
elif (UdpEndPoint.sender.sendto(bytedata, self._address) !=
len(bytedata)):
raise socket.error()
def recvmesgs(self):
flags = 0
try:
while True:
bytedata = self._conn.recv(MAX_UDP_BUFSIZE, flags)
src, tstamp, data = pickle.loads(bytedata)
if not isinstance(src, UdpEndPoint):
raise TypeError()
else:
yield (src, tstamp, data)
except socket.error as e:
self._log.debug("socket.error occured, terminating receive loop.")
def __getstate__(self):
return ("UDP", self._address, self._name, self._proctype)
def __setstate__(self, value):
proto, self._address, self._name, self._proctype = value
self._conn = None
self._log = logging.getLogger("runtime.UdpEndPoint")
| mayli/DistAlgo | da/endpoint.py | Python | mit | 14,912 |
import qor
from scripts import ctf
class Mod(qor.Mod):
def __init__(self):
# set up specific things that engine needs to know about the mod
# possibly setting up customized gui, background music, theming, etc.
# include possible selections of things, and which customizations to
# enable
pass
def validate():
# make sure the game can be run with the current players/settings
self.map_info = qor.MapInfo(qor.get("map","church"))
if qor.is_map(self.map_info):
raise qor.Error
# checks with ctf script if the given map is okay
if not ctf.valid_map(self.map_info):
raise qor.Error
def preload():
# set up FOV and all that good stuff (possibly read from player config)
qor.ortho(False)
# set up any mod-specific stuff
# load a specific (or user selected) map
self.level = qor.Map(self.map_fn)
self.mode_logic = ctf.logic
def update(t):
# possibly distinguish between client and server somewhere here
# do player logic (do different classes and characters have diff logic)?
self.mode_logic(t)
# update the game, call any modes logic or policies here
def event(ev):
# called upon specific game events
# also called if something changes in config that must affect game
pass
#def stats():
# pass
| flipcoder/qor | bin/mods/example/__init__.py | Python | mit | 1,487 |
from __future__ import absolute_import, print_function, division
import operator
from petl.compat import text_type
from petl.util.base import Table, asindices, itervalues
from petl.transform.sorts import sort
def duplicates(table, key=None, presorted=False, buffersize=None, tempdir=None,
cache=True):
"""
Select rows with duplicate values under a given key (or duplicate
rows where no key is given). E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, 2.0],
... ['B', 2, 3.4],
... ['D', 6, 9.3],
... ['B', 3, 7.8],
... ['B', 2, 12.3],
... ['E', None, 1.3],
... ['D', 4, 14.5]]
>>> table2 = etl.duplicates(table1, 'foo')
>>> table2
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'B' | 2 | 3.4 |
+-----+-----+------+
| 'B' | 3 | 7.8 |
+-----+-----+------+
| 'B' | 2 | 12.3 |
+-----+-----+------+
| 'D' | 6 | 9.3 |
+-----+-----+------+
| 'D' | 4 | 14.5 |
+-----+-----+------+
>>> # compound keys are supported
... table3 = etl.duplicates(table1, key=['foo', 'bar'])
>>> table3
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'B' | 2 | 3.4 |
+-----+-----+------+
| 'B' | 2 | 12.3 |
+-----+-----+------+
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
See also :func:`petl.transform.dedup.unique` and
:func:`petl.transform.dedup.distinct`.
"""
return DuplicatesView(table, key=key, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.duplicates = duplicates
class DuplicatesView(Table):
def __init__(self, source, key=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
def __iter__(self):
return iterduplicates(self.source, self.key)
def iterduplicates(source, key):
# assume source is sorted
# first need to sort the data
it = iter(source)
hdr = next(it)
yield tuple(hdr)
# convert field selection into field indices
if key is None:
indices = range(len(hdr))
else:
indices = asindices(hdr, key)
# now use field indices to construct a _getkey function
# N.B., this may raise an exception on short rows, depending on
# the field selection
getkey = operator.itemgetter(*indices)
previous = None
previous_yielded = False
for row in it:
if previous is None:
previous = row
else:
kprev = getkey(previous)
kcurr = getkey(row)
if kprev == kcurr:
if not previous_yielded:
yield tuple(previous)
previous_yielded = True
yield tuple(row)
else:
# reset
previous_yielded = False
previous = row
def unique(table, key=None, presorted=False, buffersize=None, tempdir=None,
cache=True):
"""
Select rows with unique values under a given key (or unique rows
if no key is given). E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, 2],
... ['B', '2', '3.4'],
... ['D', 'xyz', 9.0],
... ['B', u'3', u'7.8'],
... ['B', '2', 42],
... ['E', None, None],
... ['D', 4, 12.3],
... ['F', 7, 2.3]]
>>> table2 = etl.unique(table1, 'foo')
>>> table2
+-----+------+------+
| foo | bar | baz |
+=====+======+======+
| 'A' | 1 | 2 |
+-----+------+------+
| 'E' | None | None |
+-----+------+------+
| 'F' | 7 | 2.3 |
+-----+------+------+
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
See also :func:`petl.transform.dedup.duplicates` and
:func:`petl.transform.dedup.distinct`.
"""
return UniqueView(table, key=key, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.unique = unique
class UniqueView(Table):
def __init__(self, source, key=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
def __iter__(self):
return iterunique(self.source, self.key)
def iterunique(source, key):
# assume source is sorted
# first need to sort the data
it = iter(source)
hdr = next(it)
yield tuple(hdr)
# convert field selection into field indices
if key is None:
indices = range(len(hdr))
else:
indices = asindices(hdr, key)
# now use field indices to construct a _getkey function
# N.B., this may raise an exception on short rows, depending on
# the field selection
getkey = operator.itemgetter(*indices)
try:
prev = next(it)
except StopIteration:
return
prev_key = getkey(prev)
prev_comp_ne = True
for curr in it:
curr_key = getkey(curr)
curr_comp_ne = (curr_key != prev_key)
if prev_comp_ne and curr_comp_ne:
yield tuple(prev)
prev = curr
prev_key = curr_key
prev_comp_ne = curr_comp_ne
# last one?
if prev_comp_ne:
yield prev
def conflicts(table, key, missing=None, include=None, exclude=None,
presorted=False, buffersize=None, tempdir=None, cache=True):
"""
Select rows with the same key value but differing in some other field.
E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, 2.7],
... ['B', 2, None],
... ['D', 3, 9.4],
... ['B', None, 7.8],
... ['E', None],
... ['D', 3, 12.3],
... ['A', 2, None]]
>>> table2 = etl.conflicts(table1, 'foo')
>>> table2
+-----+-----+------+
| foo | bar | baz |
+=====+=====+======+
| 'A' | 1 | 2.7 |
+-----+-----+------+
| 'A' | 2 | None |
+-----+-----+------+
| 'D' | 3 | 9.4 |
+-----+-----+------+
| 'D' | 3 | 12.3 |
+-----+-----+------+
Missing values are not considered conflicts. By default, `None` is treated
as the missing value, this can be changed via the `missing` keyword
argument.
One or more fields can be ignored when determining conflicts by providing
the `exclude` keyword argument. Alternatively, fields to use when
determining conflicts can be specified explicitly with the `include`
keyword argument. This provides a simple mechanism for analysing the
source of conflicting rows from multiple tables, e.g.::
>>> table1 = [['foo', 'bar'], [1, 'a'], [2, 'b']]
>>> table2 = [['foo', 'bar'], [1, 'a'], [2, 'c']]
>>> table3 = etl.cat(etl.addfield(table1, 'source', 1),
... etl.addfield(table2, 'source', 2))
>>> table4 = etl.conflicts(table3, key='foo', exclude='source')
>>> table4
+-----+-----+--------+
| foo | bar | source |
+=====+=====+========+
| 2 | 'b' | 1 |
+-----+-----+--------+
| 2 | 'c' | 2 |
+-----+-----+--------+
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
"""
return ConflictsView(table, key, missing=missing, exclude=exclude,
include=include, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.conflicts = conflicts
class ConflictsView(Table):
def __init__(self, source, key, missing=None, exclude=None, include=None,
presorted=False, buffersize=None, tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.missing = missing
self.exclude = exclude
self.include = include
def __iter__(self):
return iterconflicts(self.source, self.key, self.missing, self.exclude,
self.include)
def iterconflicts(source, key, missing, exclude, include):
# normalise arguments
if exclude and not isinstance(exclude, (list, tuple)):
exclude = (exclude,)
if include and not isinstance(include, (list, tuple)):
include = (include,)
# exclude overrides include
if include and exclude:
include = None
it = iter(source)
hdr = next(it)
flds = list(map(text_type, hdr))
yield tuple(hdr)
# convert field selection into field indices
indices = asindices(hdr, key)
# now use field indices to construct a _getkey function
# N.B., this may raise an exception on short rows, depending on
# the field selection
getkey = operator.itemgetter(*indices)
previous = None
previous_yielded = False
for row in it:
if previous is None:
previous = row
else:
kprev = getkey(previous)
kcurr = getkey(row)
if kprev == kcurr:
# is there a conflict?
conflict = False
for x, y, f in zip(previous, row, flds):
if (exclude and f not in exclude) \
or (include and f in include) \
or (not exclude and not include):
if missing not in (x, y) and x != y:
conflict = True
break
if conflict:
if not previous_yielded:
yield tuple(previous)
previous_yielded = True
yield tuple(row)
else:
# reset
previous_yielded = False
previous = row
def distinct(table, key=None, count=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""
Return only distinct rows in the table.
If the `count` argument is not None, it will be used as the name for an
additional field, and the values of the field will be the number of
duplicate rows.
If the `key` keyword argument is passed, the comparison is done on the
given key instead of the full row.
See also :func:`petl.transform.dedup.duplicates`,
:func:`petl.transform.dedup.unique`,
:func:`petl.transform.reductions.groupselectfirst`,
:func:`petl.transform.reductions.groupselectlast`.
"""
return DistinctView(table, key=key, count=count, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.distinct = distinct
class DistinctView(Table):
def __init__(self, table, key=None, count=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
if presorted:
self.table = table
else:
self.table = sort(table, key=key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.count = count
def __iter__(self):
it = iter(self.table)
hdr = next(it)
# convert field selection into field indices
if self.key is None:
indices = range(len(hdr))
else:
indices = asindices(hdr, self.key)
# now use field indices to construct a _getkey function
# N.B., this may raise an exception on short rows, depending on
# the field selection
getkey = operator.itemgetter(*indices)
INIT = object()
if self.count:
hdr = tuple(hdr) + (self.count,)
yield hdr
previous = INIT
n_dup = 1
for row in it:
if previous is INIT:
previous = row
else:
kprev = getkey(previous)
kcurr = getkey(row)
if kprev == kcurr:
n_dup += 1
else:
yield tuple(previous) + (n_dup,)
n_dup = 1
previous = row
# deal with last row
yield tuple(previous) + (n_dup,)
else:
yield tuple(hdr)
previous_keys = INIT
for row in it:
keys = getkey(row)
if keys != previous_keys:
yield tuple(row)
previous_keys = keys
def isunique(table, field):
"""
Return True if there are no duplicate values for the given field(s),
otherwise False. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b'],
... ['b', 2],
... ['c', 3, True]]
>>> etl.isunique(table1, 'foo')
False
>>> etl.isunique(table1, 'bar')
True
The `field` argument can be a single field name or index (starting from
zero) or a tuple of field names and/or indexes.
"""
vals = set()
for v in itervalues(table, field):
if v in vals:
return False
else:
vals.add(v)
return True
Table.isunique = isunique
| alimanfoo/petl | petl/transform/dedup.py | Python | mit | 15,217 |
from composite import *
from ensemble import *
from metrics import *
# from parametrization import *
from pdf import *
from utils import *
| aimalz/qp | qp/__init__.py | Python | mit | 145 |
from sqlalchemy import Column, ForeignKey, UniqueConstraint, String, Text
from sqlalchemy.orm import relationship
from sqlalchemy.orm import column_property, relationship, validates
from frt_server.common import CommonColumns
import frt_server.config
import hashlib
import string
import random
import os
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import SignatureExpired, BadSignature
from PIL import Image, ImageOps
class User(CommonColumns):
__tablename__ = 'user'
username = Column(String(120))
password = Column(String(120))
email = Column(String(120), unique=True)
salt = Column(String(120))
biography = Column(Text)
fonts = relationship('Font', back_populates='author')
families = relationship('Family', back_populates='author')
attachments = relationship('Attachment', back_populates='owner')
thread_subscriptions = relationship('ThreadSubscription', back_populates='user')
def avatar_file_path(self):
return os.path.join(frt_server.config.AVATAR_UPLOAD_FOLDER, str(self._id)) + '.jpg'
def get_avatar_path(self):
if os.path.exists(self.avatar_file_path()):
return self.avatar_file_path()
else:
return os.path.join(frt_server.config.MEDIA_FOLDER, 'default_avatar.jpg')
def clean_avatar_file(self):
if os.path.exists(self.avatar_file_path()):
os.remove(self.avatar_file_path())
def ensure_avatar_folder_exists(self):
if not os.path.exists(frt_server.config.AVATAR_UPLOAD_FOLDER):
os.makedirs(frt_server.config.AVATAR_UPLOAD_FOLDER)
def convert_and_save_image(self, image_file):
size = (128, 128)
try:
image = Image.open(image_file.stream)
fitted_image = ImageOps.fit(image, size)
fitted_image.save(self.avatar_file_path())
except IOError:
return jsonify({'error': 'Converting file failed'}), 500
def generate_auth_token(self, expiration=frt_server.config.TOKEN_EXPIRATION):
"""Generates token for given expiration
and user login."""
s = Serializer(frt_server.config.SECRET_KEY, expires_in=expiration)
return s.dumps({'email': self.email })
@staticmethod
def verify_auth_token(token):
"""Verifies token and eventually returns
user login.
"""
s = Serializer(frt_server.config.SECRET_KEY)
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
return data['email']
def is_authorized(self, role_names):
"""We do not use roles at the moment, but in case they are added, they should be validated here"""
#"""Checks if user is related to given role_names.
#"""
#allowed_roles = set([r.id for r in self.roles])\
# .intersection(set(role_names))
#return len(allowed_roles) > 0
return True
def generate_salt(self):
return ''.join(random.sample(string.ascii_letters, 12))
def encrypt(self, password):
"""Encrypt password using hashlib and current salt.
"""
return str(hashlib.sha1((password + str(self.salt)).encode('utf-8')).hexdigest())
@validates('password')
def _set_password(self, key, value):
"""Using SQLAlchemy validation makes sure each
time password is changed it will get encrypted
before flushing to db.
"""
self.salt = self.generate_salt()
return self.encrypt(value)
def check_password(self, password):
if not self.password:
return False
return self.encrypt(password) == self.password
| HPI-SWA-Lab/BP2016H1 | frt_server/user.py | Python | mit | 3,794 |
#!/usr/bin/env python
import sys
import os
import argparse
import h5py
import numpy
import shutil
import logging
import json
from skimage import morphology as skmorph
from scipy.ndimage import label
import traceback
from . import imio, agglo, morpho, classify, evaluate, app_logger, \
session_manager, pixel, features
try:
from ray import stack_np
except ImportError:
np_installed = False
else:
np_installed = True
try:
import syngeo
except ImportError:
logging.warning('Could not import syngeo. ' +
'Synapse-aware mode not available.')
def grab_boundary(prediction, channels, master_logger):
boundary = None
master_logger.debug("Grabbing boundary labels: " + str(channels))
for channel_id in channels:
if boundary is None:
boundary = prediction[...,channel_id]
else:
boundary += prediction[...,channel_id]
return boundary
def gen_supervoxels(session_location, options, prediction_file, master_logger):
master_logger.debug("Generating supervoxels")
if not os.path.isfile(prediction_file):
raise Exception("Training file not found: " + prediction_file)
prediction = imio.read_image_stack(prediction_file, group='/volume/prediction', single_channel=False)
if options.extract_ilp_prediction:
prediction = prediction.transpose((2, 1, 0))
boundary = grab_boundary(prediction, options.bound_channels, master_logger)
master_logger.debug("watershed seed value threshold: " + str(options.seed_val))
seeds = label(boundary<=options.seed_val)[0]
if options.seed_size > 0:
master_logger.debug("Removing small seeds")
seeds = morpho.remove_small_connected_components(seeds, options.seed_size)
master_logger.debug("Finished removing small seeds")
master_logger.info("Starting watershed")
boundary_cropped = boundary
seeds_cropped = seeds
if options.border_size > 0:
boundary_cropped = boundary[options.border_size:(-1*options.border_size), options.border_size:(-1*options.border_size),options.border_size:(-1*options.border_size)]
seeds_cropped = seeds[options.border_size:(-1*options.border_size), options.border_size:(-1*options.border_size),options.border_size:(-1*options.border_size)]
supervoxels_cropped = skmorph.watershed(boundary_cropped, seeds_cropped)
supervoxels = supervoxels_cropped
if options.border_size > 0:
supervoxels = seeds.copy()
supervoxels.dtype = supervoxels_cropped.dtype
supervoxels[:,:,:] = 0
supervoxels[options.border_size:(-1*options.border_size), options.border_size:(-1*options.border_size),options.border_size:(-1*options.border_size)] = supervoxels_cropped
master_logger.info("Finished watershed")
if options.synapse_file is not None:
master_logger.info("Processing synapses")
pre_post_pairs = syngeo.io.raveler_synapse_annotations_to_coords(
options.synapse_file)
synapse_volume = syngeo.io.volume_synapse_view(pre_post_pairs, boundary.shape)
if options.border_size > 0:
synvol_cropped = synapse_volume[options.border_size:(-1*options.border_size), options.border_size:(-1*options.border_size),options.border_size:(-1*options.border_size)]
synvol_cropped = synvol_cropped.copy()
synapse_volume[:,:,:] = 0
synapse_volume[options.border_size:(-1*options.border_size), options.border_size:(-1*options.border_size),options.border_size:(-1*options.border_size)] = synvol_cropped
supervoxels = morpho.split_exclusions(boundary, supervoxels, synapse_volume,
options.synapse_dilation)
master_logger.info("Finished processing synapses")
return supervoxels, prediction
def agglomeration(options, agglom_stack, supervoxels, prediction,
image_stack, session_location, sp_outs, master_logger):
seg_thresholds = sorted(options.segmentation_thresholds)
for threshold in seg_thresholds:
if threshold != 0 or not options.use_neuroproof:
master_logger.info("Starting agglomeration to threshold " + str(threshold)
+ " with " + str(agglom_stack.number_of_nodes()))
agglom_stack.agglomerate(threshold)
master_logger.info("Finished agglomeration to threshold " + str(threshold)
+ " with " + str(agglom_stack.number_of_nodes()))
if options.inclusion_removal:
inclusion_removal(agglom_stack, master_logger)
segmentation = agglom_stack.get_segmentation()
if options.h5_output:
imio.write_image_stack(segmentation,
session_location+"/agglom-"+str(threshold)+".lzf.h5", compression='lzf')
if options.raveler_output:
sps_outs = output_raveler(segmentation, supervoxels, image_stack, "agglom-" + str(threshold),
session_location, master_logger)
master_logger.info("Writing graph.json")
agglom_stack.write_plaza_json(session_location+"/raveler-export/agglom-"+str(threshold)+"/graph.json",
options.synapse_file)
if options.synapse_file is not None:
shutil.copyfile(options.synapse_file,
session_location + "/raveler-export/agglom-"+str(threshold)+"/annotations-synapse.json")
master_logger.info("Finished writing graph.json")
def inclusion_removal(agglom_stack, master_logger):
master_logger.info("Starting inclusion removal with " + str(agglom_stack.number_of_nodes()) + " nodes")
agglom_stack.remove_inclusions()
master_logger.info("Finished inclusion removal with " + str(agglom_stack.number_of_nodes()) + " nodes")
def output_raveler(segmentation, supervoxels, image_stack, name, session_location, master_logger, sps_out=None):
outdir = session_location + "/raveler-export/" + name + "/"
master_logger.info("Exporting Raveler directory: " + outdir)
rav = imio.segs_to_raveler(supervoxels, segmentation, 0, do_conn_comp=False, sps_out=sps_out)
sps_out, dummy1, dummy2 = rav
if os.path.exists(outdir):
master_logger.warning("Overwriting Raveler diretory: " + outdir)
shutil.rmtree(outdir)
imio.write_to_raveler(*rav, directory=outdir, gray=image_stack)
return sps_out
def flow_perform_agglomeration(options, supervoxels, prediction, image_stack,
session_location, sps_out, master_logger):
# make synapse constraints
synapse_volume = numpy.array([])
if not options.use_neuroproof and options.synapse_file is not None:
pre_post_pairs = syngeo.io.raveler_synapse_annotations_to_coords(
options.synapse_file)
synapse_volume = \
syngeo.io.volume_synapse_view(pre_post_pairs, supervoxels.shape)
# ?! build RAG (automatically load features if classifier file is available, default to median
# if no classifier, check if np mode or not, automatically load features in NP as well)
if options.classifier is not None:
cl = classify.load_classifier(options.classifier)
fm_info = json.loads(str(cl.feature_description))
master_logger.info("Building RAG")
if fm_info is None or fm_info["neuroproof_features"] is None:
raise Exception("agglomeration classifier to old to be used")
if options.use_neuroproof:
if not fm_info["neuroproof_features"]:
raise Exception("random forest created not using neuroproof")
agglom_stack = stack_np.Stack(supervoxels, prediction,
single_channel=False, classifier=cl, feature_info=fm_info, synapse_file=options.synapse_file,
master_logger=master_logger)
else:
if fm_info["neuroproof_features"]:
master_logger.warning("random forest created using neuroproof features -- should still work")
fm = features.io.create_fm(fm_info)
if options.expected_vi:
mpf = agglo.expected_change_vi(fm, cl, beta=options.vi_beta)
else:
mpf = agglo.classifier_probability(fm, cl)
agglom_stack = agglo.Rag(supervoxels, prediction, mpf,
feature_manager=fm, show_progress=True, nozeros=True,
exclusions=synapse_volume)
master_logger.info("Finished building RAG")
else:
master_logger.info("Building RAG")
boundary = grab_boundary(prediction, options.bound_channels, master_logger)
if options.use_neuroproof:
agglom_stack = stack_np.Stack(supervoxels, boundary, synapse_file=options.synapse_file,
master_logger=master_logger)
else:
agglom_stack = agglo.Rag(supervoxels, boundary, merge_priority_function=agglo.boundary_median,
show_progress=True, nozeros=True, exclusions=synapse_volume)
master_logger.info("Finished building RAG")
# remove inclusions
if options.inclusion_removal:
inclusion_removal(agglom_stack, master_logger)
# actually perform the agglomeration
agglomeration(options, agglom_stack, supervoxels, prediction, image_stack,
session_location, sps_out, master_logger)
def run_segmentation_pipeline(session_location, options, master_logger):
# read grayscale
image_stack = None
if options.image_stack is not None:
image_stack = imio.read_image_stack(options.image_stack)
prediction_file = None
# run boundary prediction -- produces a prediction file
if options.gen_pixel:
pixel.gen_pixel_probabilities(session_location, options, master_logger, image_stack)
prediction_file = session_location + "/" + options.pixelprob_name
else:
prediction_file = options.pixelprob_file
# generate supervoxels -- produces supervoxels and output as appropriate
supervoxels = None
prediction = None
if options.gen_supervoxels:
supervoxels, prediction = gen_supervoxels(session_location, options, prediction_file, master_logger)
elif options.supervoxels_file:
master_logger.info("Reading supervoxels: " + options.supervoxels_file)
supervoxels = imio.read_image_stack(options.supervoxels_file)
master_logger.info("Finished reading supervoxels")
sps_out = None
if supervoxels is not None:
if options.h5_output:
imio.write_image_stack(supervoxels,
session_location + "/" + options.supervoxels_name, compression='lzf')
if options.raveler_output:
sps_out = output_raveler(supervoxels, supervoxels, image_stack, "supervoxels", session_location, master_logger)
if options.synapse_file is not None:
shutil.copyfile(options.synapse_file,
session_location + "/raveler-export/supervoxels/annotations-synapse.json")
# agglomerate and generate output
if options.gen_agglomeration:
if prediction is None and options.pixelprob_file is not None:
master_logger.info("Reading pixel prediction: " + options.pixelprob_file)
prediction = imio.read_image_stack(options.pixelprob_file, group='/volume/prediction', single_channel=False)
master_logger.info("Finished reading pixel prediction")
elif prediction is None:
raise Exception("No pixel probs available for agglomeration")
flow_perform_agglomeration(options, supervoxels, prediction, image_stack,
session_location, sps_out, master_logger)
def np_verify(options_parser, options, master_logger):
if options.use_neuroproof and not np_installed:
raise Exception("NeuroProof not properly installed on your machine. Install or disable neuroproof")
def synapse_file_verify(options_parser, options, master_logger):
if options.synapse_file:
if not os.path.exists(options.synapse_file):
raise Exception("Synapse file " + options.synapse_file + " not found")
if not options.synapse_file.endswith('.json'):
raise Exception("Synapse file " + options.synapse_file + " does not end with .json")
def classifier_verify(options_parser, options, master_logger):
if options.classifier is not None:
if not os.path.exists(options.classifier):
raise Exception("Classifier " + options.classifier + " not found")
if not options.classifier.endswith('.h5'):
raise Exception("Classifier " + options.classifier + " does not end with .h5")
def gen_supervoxels_verify(options_parser, options, master_logger):
if options.gen_supervoxels and not options.gen_pixel and options.pixelprob_file is None:
raise Exception("Must have a pixel prediction to generate supervoxels")
def supervoxels_file_verify(options_parser, options, master_logger):
if options.supervoxels_file is not None:
if not os.path.exists(options.supervoxels_file):
raise Exception("Supervoxel file " + options.supervoxels_file + " does not exist")
def gen_agglomeration_verify(options_parser, options, master_logger):
if options.gen_agglomeration:
if not options.gen_supervoxels and options.supervoxels_file is None:
raise Exception("No supervoxels available for agglomeration")
if not options.gen_pixel and options.pixelprob_file is None:
raise Exception("No prediction available for agglomeration")
def create_segmentation_pipeline_options(options_parser):
pixel.create_pixel_options(options_parser, False)
options_parser.create_option("use-neuroproof", "Use NeuroProof",
default_val=False, required=False, dtype=bool, verify_fn=np_verify, num_args=None,
shortcut='NP', warning=False, hidden=(not np_installed))
options_parser.create_option("supervoxels-name", "Name for the supervoxel segmentation",
default_val="supervoxels.lzf.h5", required=False, dtype=str, verify_fn=None, num_args=None,
shortcut=None, warning=False, hidden=True)
options_parser.create_option("supervoxels-file", "Supervoxel segmentation file or directory stack",
default_val=None, required=False, dtype=str, verify_fn=supervoxels_file_verify, num_args=None,
shortcut=None, warning=False, hidden=True)
options_parser.create_option("gen-supervoxels", "Enable supervoxel generation",
default_val=False, required=False, dtype=bool, verify_fn=gen_supervoxels_verify, num_args=None,
shortcut='GS', warning=True, hidden=False)
options_parser.create_option("inclusion-removal", "Disable inclusion removal",
default_val=True, required=False, dtype=bool, verify_fn=None, num_args=None,
shortcut='IR', warning=False, hidden=False)
options_parser.create_option("seed-val", "Threshold for choosing seeds",
default_val=0, required=False, dtype=int, verify_fn=None, num_args=None,
shortcut=None, warning=False, hidden=True)
options_parser.create_option("seed-size", "Threshold for seed size",
default_val=0, required=False, dtype=int, verify_fn=None, num_args=None,
shortcut='SS', warning=False, hidden=False)
options_parser.create_option("synapse-file", "Json file containing synapse information",
default_val=None, required=False, dtype=str, verify_fn=synapse_file_verify, num_args=None,
shortcut='SJ', warning=False, hidden=False)
options_parser.create_option("segmentation-thresholds", "Segmentation thresholds",
default_val=[], required=False, dtype=float, verify_fn=None, num_args='+',
shortcut='ST', warning=True, hidden=False)
options_parser.create_option("gen-agglomeration", "Enable agglomeration",
default_val=False, required=False, dtype=bool, verify_fn=gen_agglomeration_verify, num_args=None,
shortcut='GA', warning=True, hidden=False)
options_parser.create_option("raveler-output", "Disable Raveler output",
default_val=True, required=False, dtype=bool, verify_fn=None, num_args=None,
shortcut=None, warning=False, hidden=True)
options_parser.create_option("h5-output", "Enable h5 output",
default_val=False, required=False, dtype=bool, verify_fn=None, num_args=None,
shortcut=None, warning=False, hidden=True)
options_parser.create_option("classifier", "H5 file containing RF",
default_val=None, required=False, dtype=str, verify_fn=classifier_verify, num_args=None,
shortcut='k', warning=False, hidden=False)
options_parser.create_option("bound-channels", "Channel numbers designated as boundary",
default_val=[0], required=False, dtype=int, verify_fn=None, num_args='+',
shortcut=None, warning=False, hidden=True)
options_parser.create_option("expected-vi", "Enable expected VI during agglomeration",
default_val=False, required=False, dtype=bool, verify_fn=None, num_args=None,
shortcut=None, warning=False, hidden=True)
options_parser.create_option("vi-beta", "Relative penalty for false merges in weighted expected VI",
default_val=1.0, required=False, dtype=float, verify_fn=None, num_args=None,
shortcut=None, warning=False, hidden=True)
options_parser.create_option("synapse-dilation", "Dilate synapse points by this amount",
default_val=1, required=False, dtype=int, verify_fn=None, num_args=None,
shortcut=None, warning=False, hidden=True)
options_parser.create_option("border-size", "Size of the border in pixels",
default_val=0, required=False, dtype=int, verify_fn=None, num_args=None,
shortcut=None, warning=False, hidden=True)
def entrypoint(argv):
applogger = app_logger.AppLogger(False, 'seg-pipeline')
master_logger = applogger.get_logger()
try:
session = session_manager.Session("seg-pipeline", "Segmentation pipeline (featuring boundary prediction, median agglomeration or trained agglomeration, inclusion removal, and raveler exports)",
master_logger, applogger, create_segmentation_pipeline_options)
run_segmentation_pipeline(session.session_location, session.options, master_logger)
except Exception, e:
master_logger.error(str(traceback.format_exc()))
except KeyboardInterrupt, err:
master_logger.error(str(traceback.format_exc()))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| jni/ray | ray/segmentation_pipeline.py | Python | mit | 18,742 |
# Generic tests for data structures
import builtins
import itertools as itools
import unittest
KEY_RANGE = range(10)
VALUE_RANGE = range(1000)
# Some random (key, value) pairs
map_data = (
(0, 764),
(3, 448),
(8, 760),
(7, 648),
(4, 307),
(9, 384),
(7, 682),
(5, 841),
(1, 152),
(3, 102),
(5, 114),
(0, 555),
(7, 485),
(8, 255),
(0, 229),
(3, 623),
(0, 222),
(2, 653),
(1, 927),
(1, 233),
)
class SetTest(unittest.TestCase):
def _new(self, items=()):
"""Subclasses should override this to create their own set instances."""
return set(items)
def test_len_empty(self):
_set = self._new()
self.assertEqual(0, len(_set))
def test_len(self):
_set = self._new(KEY_RANGE)
self.assertEqual(len(KEY_RANGE), len(_set))
def test_contains_empty(self):
_set = self._new()
for key in KEY_RANGE:
self.assertNotIn(key, _set)
def test_contains(self):
_set = self._new(KEY_RANGE)
for key in KEY_RANGE:
self.assertIn(key, _set)
def test_add(self):
_set = self._new()
correct = set()
universe = set(KEY_RANGE)
# Add each key twice
for key in itools.chain(KEY_RANGE, KEY_RANGE):
correct.add(key)
_set.add(key)
# Check size
self.assertEqual(len(correct), len(_set))
# Check contents
for item in correct:
self.assertIn(item, _set)
for item in universe - correct:
self.assertNotIn(item, _set)
def test_discard(self):
_set = self._new()
correct = set()
universe = set(KEY_RANGE)
# Load it up!
for key in KEY_RANGE:
correct.add(key)
_set.add(key)
# Delete them all!
for key in itools.chain(KEY_RANGE, KEY_RANGE):
correct.discard(key)
_set.discard(key)
# Check size
self.assertEqual(len(correct), len(_set))
# Check contents
for item in correct:
self.assertIn(item, _set)
for item in universe - correct:
self.assertNotIn(item, _set)
self.assertEqual(0, len(_set))
class ListHeap(list):
def add(self, item):
self.append(item)
def discard(self, item):
if item in self:
self.remove(item)
def max(self):
if not self:
raise LookupError('Empty heap')
return builtins.max(self)
def pop(self):
if not self:
raise LookupError('Empty heap')
m = max(self)
self.remove(m)
return m
def update(self, old, new):
if old not in self:
return
i = self.index(old)
self[i] = new
class HeapTest(unittest.TestCase):
def _new(self, items=()):
return ListHeap(items)
def test_len_empty(self):
heap = self._new()
self.assertEqual(0, len(heap))
def test_len(self):
heap = self._new(KEY_RANGE)
self.assertEqual(len(KEY_RANGE), len(heap))
def test_contains_empty(self):
heap = self._new()
for key in KEY_RANGE:
self.assertNotIn(key, heap)
def test_contains(self):
heap = self._new(KEY_RANGE)
for key in KEY_RANGE:
self.assertIn(key, heap)
def test_add(self):
heap = self._new()
correct = []
universe = set(KEY_RANGE)
for key in itools.chain(KEY_RANGE, KEY_RANGE):
correct.append(key)
heap.add(key)
# Check size
self.assertEqual(len(correct), len(heap))
# Check contents
for item in correct:
self.assertIn(item, heap)
for item in universe - set(correct):
self.assertNotIn(item, heap)
def test_discard(self):
heap = self._new()
correct = []
universe = set(KEY_RANGE)
# Load it up!
for key in itools.chain(KEY_RANGE, KEY_RANGE):
correct.append(key)
heap.add(key)
# Delete them all!
for key in itools.chain(KEY_RANGE, KEY_RANGE, KEY_RANGE):
if key in correct:
correct.remove(key)
heap.discard(key)
# Check size
self.assertEqual(len(correct), len(heap))
# Check contents
for item in correct:
self.assertIn(item, heap)
for item in universe - set(correct):
self.assertNotIn(item, heap)
self.assertEqual(0, len(heap))
def test_max_empty(self):
heap = self._new()
self.assertRaises(LookupError, heap.max)
def test_max(self):
heap = self._new(KEY_RANGE)
self.assertEqual(KEY_RANGE[-1], heap.max())
def test_pop_empty(self):
heap = self._new()
self.assertRaises(LookupError, heap.pop)
def test_pop(self):
heap = self._new(KEY_RANGE)
for item in reversed(KEY_RANGE):
self.assertEqual(item, heap.pop())
def test_extend(self):
heap = self._new()
heap.extend(KEY_RANGE)
key_max = KEY_RANGE[-1]
self.assertEqual(key_max, heap.max())
for item in KEY_RANGE:
self.assertIn(item, heap)
hi_range = range(key_max + 1, key_max + 11)
heap.extend(hi_range)
self.assertEqual(hi_range[-1], heap.max())
for item in range(KEY_RANGE[0], hi_range[-1] + 1):
self.assertIn(item, heap)
def test_update_empty(self):
heap = self._new()
heap.update(1, 2)
self.assertNotIn(1, heap)
self.assertNotIn(2, heap)
self.assertEqual(0, len(heap))
def _check_update(self, heap, old_val, new_val, old_max, new_max):
self.assertIn(old_val, heap)
self.assertNotIn(new_val, heap)
self.assertEqual(old_max, heap.max())
heap.update(old_val, new_val)
self.assertNotIn(old_val, heap)
self.assertIn(new_val, heap)
self.assertEqual(new_max, heap.max())
def test_update_decrease_max(self):
heap = self._new(KEY_RANGE)
max = KEY_RANGE[-1]
self._check_update(heap, max, -max, max, max - 1)
def test_update_increase_to_max(self):
heap = self._new(KEY_RANGE)
middle = (KEY_RANGE[0] + KEY_RANGE[-1]) // 2
max = KEY_RANGE[-1]
new_max = max + 1
self._check_update(heap, middle, new_max, max, new_max)
def test_update_decrease_to_min(self):
heap = self._new(KEY_RANGE)
middle = (KEY_RANGE[0] + KEY_RANGE[-1]) // 2
max = KEY_RANGE[-1]
self._check_update(heap, middle, -middle, max, max)
class SortTest(unittest.TestCase):
def _new(self):
return sorted
def test_empty(self):
sort = self._new()
self.assertEqual([], sort([]))
def test_sort_1(self):
sort = self._new()
things = [6]
answer = [6]
result = sort(things)
self.assertEqual(answer, result)
def test_sort_2(self):
sort = self._new()
things = [5, 7]
answer = [5, 7]
result = sort(things)
self.assertEqual(answer, result)
sort = self._new()
things = [8, 1]
answer = [1, 8]
result = sort(things)
self.assertEqual(answer, result)
def test_sort_11(self):
sort = self._new()
things = [2, 1, 6, 8, 2, 7, 3, 6, 1, 3, 0]
answer = [0, 1, 1, 2, 2, 3, 3, 6, 6, 7, 8]
result = sort(things)
self.assertEqual(answer, result)
def test_increasing(self):
sort = self._new()
things = [1, 1, 1, 1, 2, 2, 3, 3, 5, 6, 6, 6, 8]
answer = [1, 1, 1, 1, 2, 2, 3, 3, 5, 6, 6, 6, 8]
result = sort(things)
self.assertEqual(answer, result)
def test_decreasing(self):
sort = self._new()
things = [7, 6, 6, 5, 2, 1, 1, 0]
answer = [0, 1, 1, 2, 5, 6, 6, 7]
result = sort(things)
self.assertEqual(answer, result)
def test_all_equal(self):
sort = self._new()
things = [7] * 7
answer = [7] * 7
result = sort(things)
self.assertEqual(answer, result)
def test_with_keys(self):
sort = self._new()
things = [
(4, 7), (7, 0), (0, 5), (1, 5), (5, 3),
(8, 6), (3, 3), (9, 8), (6, 7), (2, 5),
]
answer = [
(0, 5), (1, 5), (2, 5), (3, 3), (4, 7),
(5, 3), (6, 7), (7, 0), (8, 6), (9, 8),
]
result = sort(things, key=lambda x: x[0])
self.assertEqual(answer, result)
things = [
(8, 0), (7, 1), (2, 2), (2, 3), (6, 4),
(3, 5), (7, 6), (5, 7), (6, 8), (7, 9),
]
answer = [
(7, 9), (6, 8), (5, 7), (7, 6), (3, 5),
(6, 4), (2, 3), (2, 2), (7, 1), (8, 0),
]
result = sort(things, key=lambda x: -11.1 * x[1])
self.assertEqual(answer, result)
def test_in_place(self):
sort = self._new()
things = [4, 1, 4, 2, 5, 7, 7]
answer = [1, 2, 4, 4, 5, 7, 7]
result = sort(things)
self.assertEqual(answer, result)
if sort is not sorted:
self.assertIs(things, result)
def test_instantiate_iterable(self):
sort = self._new()
things = range(10)
answer = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
result = sort(things)
self.assertEqual(answer, result)
def test_immutable_indexable(self):
sort = self._new()
things = (4, 8, 0, 7, 2, 4, 2)
answer = [0, 2, 2, 4, 4, 7, 8]
result = sort(things)
self.assertEqual(answer, result)
class StableSortTest(SortTest):
def test_stability(self):
sort = self._new()
things = [
(2, 5), (0, 6), (0, 0), (0, 4), (4, 4),
(4, 9), (3, 1), (3, 2), (2, 8), (2, 3),
]
answer = [
(0, 6), (0, 0), (0, 4), (2, 5), (2, 8),
(2, 3), (3, 1), (3, 2), (4, 4), (4, 9),
]
result = sort(things, key=lambda x: x[0])
self.assertEqual(answer, result)
| afbarnard/glowing-broccoli | src/tests.py | Python | mit | 10,294 |
'''
Created on Oct 31, 2011
@author: robertsj
'''
import abc
class Bundle(object):
'''
An abstract base class for representing bundles.
'''
__metaclass__ = abs.ABCMeta
def __init__(self):
'''
Constructor
'''
| robertsj/poropy | poropy/bundletools/bundle.py | Python | mit | 264 |
"""
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import functools
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.compat import expanduser
from pip.download import path_to_url, unpack_url
from pip.exceptions import (
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, captured_stdout, rmtree, canonicalize_name)
from pip.utils.logging import indent_log
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = canonicalize_name(package_name)
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path))
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
block = f.read(blocksize)
while block:
length += len(block)
h.update(block)
block = f.read(blocksize)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return os.path.relpath(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
# is self.req.project_name case preserving?
s.lower().startswith(
req.project_name.replace('-', '_').lower())):
assert not info_dir, 'Multiple .dist-info directories'
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
if entry.suffix is None:
raise InstallationError(
"Invalid script entry point: %s for req: %s - A callable "
"suffix is required. Cf https://packaging.python.org/en/"
"latest/distributing.html#console-scripts for more "
"information." % (entry, req)
)
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadat 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((f, h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
pass
# Ignore return, we can't do anything else useful.
self._clean_one(req)
return None
finally:
rmtree(tempd)
def _base_setup_args(self, req):
return [
sys.executable, '-c',
"import setuptools;__file__=%r;"
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), "
"__file__, 'exec'))" % req.setup_py
] + list(self.global_options)
def __build_one(self, req, tempd):
base_args = self._base_setup_args(req)
logger.info('Running setup.py bdist_wheel for %s', req.name)
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
try:
call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed building wheel for %s', req.name)
return False
def _clean_one(self, req):
base_args = self._base_setup_args(req)
logger.info('Running setup.py clean for %s', req.name)
clean_args = base_args + ['clean', '--all']
try:
call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from the with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif req.editable:
if not autobuilding:
logger.info(
'Skipping bdist_wheel for %s, due to being editable',
req.name)
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
canonicalize_name(req.name)):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
if autobuilding:
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warn("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(req, output_dir)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
| James-Firth/pip | pip/wheel.py | Python | mit | 31,114 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Roles in this namespace are meant to provide `Nginx <http://wiki.nginx.org/Main>`_ web server utility methods for Debian distributions.
'''
from provy.core import Role
from provy.more.debian.package.aptitude import AptitudeRole
class NginxRole(Role):
'''
This role provides `Nginx <http://wiki.nginx.org/Main>`_ web server management utilities for Debian distributions.
Example:
::
from provy.core import Role
from provy.more.debian import NginxRole
class MySampleRole(Role):
def provision(self):
with self.using(NginxRole) as role:
role.ensure_conf(conf_template='nginx.conf')
role.ensure_site_disabled('default')
role.create_site(site='my-site', template='my-site')
role.ensure_site_enabled('my-site')
'''
def __available_site_for(self, name):
return '/etc/nginx/sites-available/%s' % name
def __enabled_site_for(self, name):
return '/etc/nginx/sites-enabled/%s' % name
def provision(self):
'''
Installs `Nginx <http://wiki.nginx.org/Main>`_ dependencies.
This method should be called upon if overriden in base classes, or `Nginx <http://wiki.nginx.org/Main>`_ won't work properly in the remote server.
Example:
::
from provy.core import Role
from provy.more.debian import NginxRole
class MySampleRole(Role):
def provision(self):
self.provision_role(NginxRole) # does not need to be called if using with block.
'''
with self.using(AptitudeRole) as role:
role.ensure_up_to_date()
role.ensure_package_installed('nginx')
def cleanup(self):
'''
Restarts nginx if any changes have been made.
There's no need to call this method manually.
'''
super(NginxRole, self).cleanup()
if 'must-restart-nginx' in self.context and self.context['must-restart-nginx']:
self.restart()
def ensure_conf(self, conf_template, options={}, nginx_conf_path='/etc/nginx/nginx.conf'):
'''
Ensures that nginx configuration is up-to-date with the specified template.
:param conf_template: Name of the template for nginx.conf.
:type conf_template: :class:`str`
:param options: Dictionary of options passed to template. Extends context.
:type options: :class:`dict`
:param nginx_conf_path: Path of the nginx configuration file. Defaults to /etc/nginx/nginx.conf.
:type nginx_conf_path: :class:`str`
Example:
::
from provy.core import Role
from provy.more.debian import NginxRole
class MySampleRole(Role):
def provision(self):
with self.using(NginxRole) as role:
role.ensure_conf(conf_template='nginx.conf')
'''
result = self.update_file(conf_template,
nginx_conf_path,
options=options,
sudo=True)
if result:
self.log('nginx conf updated!')
self.ensure_restart()
def ensure_site_disabled(self, site):
'''
Ensures that the specified site is removed from nginx list of enabled sites.
:param site: Name of the site to disable.
:type site: :class:`str`
Example:
::
from provy.core import Role
from provy.more.debian import NginxRole
class MySampleRole(Role):
def provision(self):
with self.using(NginxRole) as role:
role.ensure_site_disabled('default')
'''
result = self.remove_file(self.__enabled_site_for(site), sudo=True)
if result:
self.log('%s nginx site is disabled!' % site)
self.ensure_restart()
def ensure_site_enabled(self, site):
'''
Ensures that a symlink is created for the specified site at nginx list of enabled sites from the list of available sites.
:param site: Name of the site to enable.
:type site: :class:`str`
Example:
::
from provy.core import Role
from provy.more.debian import NginxRole
class MySampleRole(Role):
def provision(self):
with self.using(NginxRole) as role:
role.ensure_site_enabled('my-site')
'''
result = self.remote_symlink(self.__available_site_for(site),
self.__enabled_site_for(site),
sudo=True)
if result:
self.log('%s nginx site is enabled!' % site)
self.ensure_restart()
def create_site(self, site, template, options={}):
'''
Adds a website with the specified template to Nginx list of available sites.
Warning: Do not forget to call :meth:`ensure_site_enabled` after a call to `create_site`, or your site won't be enabled.
:param site: Name of the site to enable.
:type site: :class:`str`
:param template: Site configuration template.
:type template: :class:`str`
:param options: Options to pass to the template.
:type options: :class:`str`
Example:
::
from provy.core import Role
from provy.more.debian import NginxRole
class MySampleRole(Role):
def provision(self):
with self.using(NginxRole) as role:
role.create_site(site='my-site', template='my-site', options={
"user": "me"
})
'''
result = self.update_file(template,
self.__available_site_for(site),
options=options, sudo=True)
if result:
self.log('%s nginx site created!' % site)
self.ensure_restart()
def ensure_restart(self):
'''
Ensures that nginx gets restarted on cleanup. There's no need to call this method as any changes to Nginx will trigger it.
Example:
::
from provy.core import Role
from provy.more.debian import NginxRole
class MySampleRole(Role):
def provision(self):
with self.using(NginxRole) as role:
role.ensure_restart()
'''
self.context['must-restart-nginx'] = True
def restart(self):
'''
Forcefully restarts Nginx.
Example:
::
from provy.core import Role
from provy.more.debian import NginxRole
class MySampleRole(Role):
def provision(self):
with self.using(NginxRole) as role:
role.restart()
'''
command = '/etc/init.d/nginx restart'
self.execute(command, sudo=True)
| python-provy/provy | provy/more/debian/web/nginx.py | Python | mit | 7,202 |
from django.core.exceptions import ValidationError
from django.forms import UUIDField
import six
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from django_smalluuid import settings
class ShortUUIDField(UUIDField):
default_error_messages = {
'invalid': _('Enter a valid short-form UUID.'),
}
def __init__(self, uuid_class=settings.DEFAULT_CLASS, *args, **kwargs):
self.uuid_class = uuid_class
if isinstance(self.uuid_class, six.string_types):
self.uuid_class = import_string(uuid_class)
super(ShortUUIDField, self).__init__(*args, **kwargs)
def prepare_value(self, value):
if isinstance(value, self.uuid_class):
return six.text_type(value)
return value
def to_python(self, value):
if value in self.empty_values:
return None
if not isinstance(value, self.uuid_class):
try:
value = self.uuid_class(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
| adamcharnock/django-smalluuid | django_smalluuid/forms.py | Python | mit | 1,158 |
from collections import OrderedDict
from conans.paths import SimplePaths
from conans.client.output import Color
from conans.model.ref import ConanFileReference
from conans.model.ref import PackageReference
from conans.client.installer import build_id
import fnmatch
class Printer(object):
""" Print some specific information """
INDENT_COLOR = {0: Color.BRIGHT_CYAN,
1: Color.BRIGHT_RED,
2: Color.BRIGHT_GREEN,
3: Color.BRIGHT_YELLOW,
4: Color.BRIGHT_MAGENTA}
INDENT_SPACES = 4
def __init__(self, out):
self._out = out
def print_graph(self, deps_graph, registry):
""" Simple pretty printing of a deps graph, can be improved
with options, info like licenses, etc
"""
self._out.writeln("Requirements", Color.BRIGHT_YELLOW)
for node in sorted(deps_graph.nodes):
ref, _ = node
if not ref:
continue
remote = registry.get_ref(ref)
from_text = "from local" if not remote else "from %s" % remote.name
self._out.writeln(" %s %s" % (repr(ref), from_text), Color.BRIGHT_CYAN)
self._out.writeln("Packages", Color.BRIGHT_YELLOW)
for node in sorted(deps_graph.nodes):
ref, conanfile = node
if not ref:
continue
ref = PackageReference(ref, conanfile.info.package_id())
self._out.writeln(" %s" % repr(ref), Color.BRIGHT_CYAN)
self._out.writeln("")
def _print_paths(self, ref, conan, path_resolver, show):
if isinstance(ref, ConanFileReference):
if show("export_folder"):
path = path_resolver.export(ref)
self._out.writeln(" export_folder: %s" % path, Color.BRIGHT_GREEN)
if show("source_folder"):
path = path_resolver.source(ref, conan.short_paths)
self._out.writeln(" source_folder: %s" % path, Color.BRIGHT_GREEN)
if show("build_folder") and isinstance(path_resolver, SimplePaths):
# @todo: check if this is correct or if it must always be package_id()
bid = build_id(conan)
if not bid:
bid = conan.info.package_id()
path = path_resolver.build(PackageReference(ref, bid), conan.short_paths)
self._out.writeln(" build_folder: %s" % path, Color.BRIGHT_GREEN)
if show("package_folder") and isinstance(path_resolver, SimplePaths):
id_ = conan.info.package_id()
path = path_resolver.package(PackageReference(ref, id_), conan.short_paths)
self._out.writeln(" package_folder: %s" % path, Color.BRIGHT_GREEN)
def print_info(self, deps_graph, project_reference, _info, registry, graph_updates_info=None,
remote=None, node_times=None, path_resolver=None, package_filter=None,
show_paths=False):
""" Print the dependency information for a conan file
Attributes:
deps_graph: the dependency graph of conan file references to print
placeholder_reference: the conan file reference that represents the conan
file for a project on the path. This may be None,
in which case the project itself will not be part
of the printed dependencies.
remote: Remote specified in install command.
Could be different from the registry one.
"""
if _info is None: # No filter
def show(_):
return True
else:
_info_lower = [s.lower() for s in _info.split(",")]
def show(field):
return field in _info_lower
graph_updates_info = graph_updates_info or {}
for node in sorted(deps_graph.nodes):
ref, conan = node
if not ref:
# ref is only None iff info is being printed for a project directory, and
# not a passed in reference
if project_reference is None:
continue
else:
ref = project_reference
if package_filter and not fnmatch.fnmatch(str(ref), package_filter):
continue
self._out.writeln("%s" % str(ref), Color.BRIGHT_CYAN)
reg_remote = registry.get_ref(ref)
# Excludes PROJECT fake reference
remote_name = remote
if reg_remote and not remote:
remote_name = reg_remote.name
if show("id"):
id_ = conan.info.package_id()
self._out.writeln(" ID: %s" % id_, Color.BRIGHT_GREEN)
if show("build_id"):
bid = build_id(conan)
self._out.writeln(" BuildID: %s" % bid, Color.BRIGHT_GREEN)
if show_paths:
self._print_paths(ref, conan, path_resolver, show)
if isinstance(ref, ConanFileReference) and show("remote"):
if reg_remote:
self._out.writeln(" Remote: %s=%s" % (reg_remote.name, reg_remote.url),
Color.BRIGHT_GREEN)
else:
self._out.writeln(" Remote: None", Color.BRIGHT_GREEN)
url = getattr(conan, "url", None)
license_ = getattr(conan, "license", None)
author = getattr(conan, "author", None)
if url and show("url"):
self._out.writeln(" URL: %s" % url, Color.BRIGHT_GREEN)
if license_ and show("license"):
if isinstance(license_, (list, tuple, set)):
self._out.writeln(" Licenses: %s" % ", ".join(license_), Color.BRIGHT_GREEN)
else:
self._out.writeln(" License: %s" % license_, Color.BRIGHT_GREEN)
if author and show("author"):
self._out.writeln(" Author: %s" % author, Color.BRIGHT_GREEN)
if isinstance(ref, ConanFileReference) and show("update"): # Excludes PROJECT
update = graph_updates_info.get(ref)
update_messages = {
None: ("Version not checked", Color.WHITE),
0: ("You have the latest version (%s)" % remote_name, Color.BRIGHT_GREEN),
1: ("There is a newer version (%s)" % remote_name, Color.BRIGHT_YELLOW),
-1: ("The local file is newer than remote's one (%s)" % remote_name,
Color.BRIGHT_RED)
}
self._out.writeln(" Updates: %s" % update_messages[update][0],
update_messages[update][1])
if node_times and node_times.get(ref, None) and show("date"):
self._out.writeln(" Creation date: %s" % node_times.get(ref, None),
Color.BRIGHT_GREEN)
dependants = deps_graph.inverse_neighbors(node)
if isinstance(ref, ConanFileReference) and show("required"): # Excludes
self._out.writeln(" Required by:", Color.BRIGHT_GREEN)
for d in dependants:
ref = repr(d.conan_ref) if d.conan_ref else project_reference
self._out.writeln(" %s" % ref, Color.BRIGHT_YELLOW)
if show("requires"):
depends = deps_graph.neighbors(node)
if depends:
self._out.writeln(" Requires:", Color.BRIGHT_GREEN)
for d in depends:
self._out.writeln(" %s" % repr(d.conan_ref), Color.BRIGHT_YELLOW)
def print_search_recipes(self, references, pattern):
""" Print all the exported conans information
param pattern: wildcards, e.g., "opencv/*"
"""
if not references:
warn_msg = "There are no packages"
pattern_msg = " matching the %s pattern" % pattern
self._out.info(warn_msg + pattern_msg if pattern else warn_msg)
return
self._out.info("Existing package recipes:\n")
for conan_ref in sorted(references):
self._print_colored_line(str(conan_ref), indent=0)
def print_search_packages(self, packages_props, reference, recipe_hash, packages_query):
if not packages_props:
if packages_query:
warn_msg = "There are no packages for reference '%s' matching the query '%s'" % (str(reference),
packages_query)
else:
warn_msg = "There are no packages for pattern '%s'" % str(reference)
self._out.info(warn_msg)
return
self._out.info("Existing packages for recipe %s:\n" % str(reference))
# Each package
for package_id, properties in sorted(packages_props.items()):
self._print_colored_line("Package_ID", package_id, 1)
for section in ("options", "settings", "full_requires"):
attrs = properties.get(section, [])
if attrs:
section_name = {"full_requires": "requires"}.get(section, section)
self._print_colored_line("[%s]" % section_name, indent=2)
if isinstance(attrs, dict): # options, settings
attrs = OrderedDict(sorted(attrs.items()))
for key, value in attrs.items():
self._print_colored_line(key, value=value, indent=3)
elif isinstance(attrs, list): # full requires
for key in sorted(attrs):
self._print_colored_line(key, indent=3)
package_recipe_hash = properties.get("recipe_hash", None)
# Always compare outdated with local recipe, simplification,
# if a remote check is needed install recipe first
if recipe_hash:
self._print_colored_line("outdated from recipe: %s" % (recipe_hash != package_recipe_hash), indent=2)
self._out.writeln("")
def print_profile(self, name, profile):
self._out.info("Configuration for profile %s:\n" % name)
self._print_profile_section("settings", profile.settings.items())
envs = []
for package, env_vars in profile.env_values.data.items():
for name, value in env_vars.items():
key = "%s:%s" % (package, name) if package else name
envs.append((key, value))
self._print_profile_section("env", envs, separator='=')
scopes = profile.scopes.dumps().splitlines()
self._print_colored_line("[scopes]")
for scope in scopes:
self._print_colored_line(scope, indent=1)
def _print_profile_section(self, name, items, indent=0, separator=": "):
self._print_colored_line("[%s]" % name, indent=indent)
for key, value in items:
self._print_colored_line(key, value=str(value), indent=indent+1, separator=separator)
def _print_colored_line(self, text, value=None, indent=0, separator=": "):
""" Print a colored line depending on its indentation level
Attributes:
text: string line
split_symbol: if you want an output with different in-line colors
indent_plus: integer to add a plus indentation
"""
text = text.strip()
if not text:
return
text_color = Printer.INDENT_COLOR.get(indent, Color.BRIGHT_WHITE)
indent_text = ' ' * Printer.INDENT_SPACES * indent
if value is not None:
value_color = Color.BRIGHT_WHITE
self._out.write('%s%s%s' % (indent_text, text, separator), text_color)
self._out.writeln(value, value_color)
else:
self._out.writeln('%s%s' % (indent_text, text), text_color)
| mropert/conan | conans/client/printer.py | Python | mit | 12,165 |
import logging
import ConfigParser
from collections import namedtuple
# User Configuration Handler
class ConfigHandler:
# Some Config Constants
FILE_NAME = 'app.cfg'
FILE_MODE = 'wb'
CONSUMER_SECTION = 'Consumer Info'
ACCESS_SECTION = 'Access Info'
MENTION_SECTION = 'Mention Info'
CONSUMER_KEY = 'CONSUMER_KEY'
CONSUMER_SECRET = 'CONSUMER_SECRET'
ACCESS_KEY = 'ACCESS_KEY'
ACCESS_SECRET = 'ACCESS_SECRET'
MENTION_ID = 'MENTION_ID'
def __init__(self):
self.config = ConfigParser.SafeConfigParser()
self.get_config()
# GETTERS!!!
def consumer_key(self):
return self.config.get(self.CONSUMER_SECTION, self.CONSUMER_KEY)
def consumer_secret(self):
return self.config.get(self.CONSUMER_SECTION, self.CONSUMER_SECRET)
def access_key(self):
return self.config.get(self.ACCESS_SECTION, self.ACCESS_KEY)
def access_secret(self):
return self.config.get(self.ACCESS_SECTION, self.ACCESS_SECRET)
def mention_id(self):
return self.config.get(self.MENTION_SECTION, self.MENTION_ID)
# Gets settings out of the config file
def get_config(self):
logging.info('Attempting to read configuration')
try:
self.config.read(self.FILE_NAME)
logging.info('Config read')
except (ConfigParser.Error):
logging.error("There was an error reading your configuration")
return None
# Set a config values
def set_value(self, section, option, value):
logging.info('Updating Configuration Values')
self.config.set(section, option, value)
# Write changes to file
with open(self.FILE_NAME, self.FILE_MODE) as configFile:
self.config.write(configFile)
| Th3BFG/hex-note | HexNote/confighandler.py | Python | mit | 1,649 |
def ReadLines(filename):
content = []
with open(filename) as f:
content = [x.rstrip() for x in f.readlines()]
return content
| aawc/cryptopals | sets/1/challenges/common/read_lines.py | Python | mit | 136 |
from wdom.server import start
from wdom.document import set_app
from wdom.tag import Div, H1, Input
class MyElement(Div):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.h1 = H1(parent=self)
self.h1.textContent = 'Hello, WDOM'
self.input = Input(parent=self)
self.input.addEventListener('input', self.update)
def update(self, event):
self.h1.textContent = event.target.value
if __name__ == '__main__':
set_app(MyElement())
start()
| miyakogi/wdom | docs/guide/samples/wdom2.py | Python | mit | 526 |
#!/bin/python3
import sys
time = input().strip()
# 12AM = 00:00
# 12PM = 12:00
# 01PM = 13:00
meridian = time[-2:]
time = time[:-2]
hour, minute, second = time.split(":")
hour = int(hour, 10)
if meridian == "PM":
if hour != 12:
hour += 12
else:
if hour == 12:
hour = 0
print("{:0>2d}:{}:{}".format(hour, minute, second))
| costincaraivan/hackerrank | algorithms/warmup/python3/time_conversion.py | Python | mit | 351 |
""" Test suite for the cppext library.
The script can be executed on its own or incorporated into a larger test suite.
However the tests are run, be aware of which version of the package is actually
being tested. If the package is installed in site-packages, that version takes
precedence over the version in this project directory. Use a virtualenv test
environment or setuptools develop mode to test against the development version.
"""
import pytest
from cppext import *
def test_pyhello():
""" Test the pyhello() function.
"""
assert pyhello() == "Greetings from Python!"
return
def test_cpphello():
""" Test the cpphello() function.
"""
assert cpphello() == "Greetings from C++!"
return
class CppGreetingTest(object):
""" Test suite for the CppGreeting class.
"""
def test_hello(self):
""" Test the hello() method.
"""
name = "CppGreetingTest"
greeting = CppGreeting(name)
assert greeting.hello() == "Greetings from C++, {:s}!".format(name)
return
# Make the module executable.
if __name__ == "__main__":
raise SystemExit(pytest.main([__file__]))
| mdklatt/cppext-python | test/test_cppext.py | Python | mit | 1,176 |
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.is_published'
db.add_column('event_rsvp_event', 'is_published',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.is_published'
db.delete_column('event_rsvp_event', 'is_published')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'event_rsvp.event': {
'Meta': {'object_name': 'Event'},
'allow_anonymous_rsvp': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'available_seats': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 17, 0, 0)'}),
'hide_available_seats': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_seats_per_guest': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'required_fields': ('event_rsvp.models.MultiSelectField', [], {'max_length': '250', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 16, 0, 0)'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'venue': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'event_rsvp.guest': {
'Meta': {'object_name': 'Guest'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'guests'", 'to': "orm['event_rsvp.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_attending': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '4000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['event_rsvp'] | bitmazk/django-event-rsvp | event_rsvp/migrations/0008_auto__add_field_event_is_published.py | Python | mit | 7,470 |
# coding: utf-8
from numpy import matrix
from oneVsAll import oneVsAll
from numpy import loadtxt, savetxt
from predictOneVsAll import predictOneVsAll
def train():
num_labels = 34
print '... Training'
X = matrix(loadtxt('X.dat')) / 255.0
y = matrix(loadtxt('y.dat')).transpose()
the_lambda = 0.1
all_theta = oneVsAll(X, y, num_labels, the_lambda)
savetxt('theta.dat', all_theta)
def test():
print '... Testing'
all_theta = matrix(loadtxt('theta.dat'))
X_test = matrix(loadtxt('X_test.dat')) / 255.0
y_test = matrix(loadtxt('y_test.dat')).transpose()
acc, pred = predictOneVsAll(all_theta, X_test)
single_acc = sum(pred == y_test) / (len(y_test) * 1.0)
max_acc = pow(single_acc, 4)
min_acc = single_acc*4 - 3
print 'Theoretical accuracy:'
print '\tSingle accuracy: %2.2f%%' % (single_acc*100)
print '\tTotal accuracy: %2.2f%% ~ %2.2f%%' % (min_acc*100, max_acc*100)
test()
| skyduy/zfverify | Verify-Manual-python/train/core/train.py | Python | mit | 950 |
#!/usr/bin/env python
import queue
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.size = 1
self.self_count = 1
def treeBuilder(nodeString):
nodeList = nodeString[1:-1].split(',')
nodeQueue = queue.Queue()
if nodeList[0] == '':
return None
root = TreeNode(int(nodeList[0]))
currNode = root
leftDone, rightDone = 0, 0
for val in nodeList[1:]:
print('processing %s' % val)
print('leftDone,', leftDone, 'rightDone,', rightDone)
if val != 'null':
newNode = TreeNode(int(val))
print("create new node: %d" % newNode.val)
nodeQueue.put(newNode)
else:
newNode = None
if leftDone == 0:
currNode.left, leftDone = newNode, 1
elif rightDone == 0:
currNode.right, rightDone = newNode, 1
leftDone, rightDone = 0, 0
currNode = nodeQueue.get()
return root
def traverse(node):
if node == None:
return
print('Node: %d' % node.val)
if node.left != None:
print('Left: %d' % node.left.val)
if node.right != None:
print('Right: %d' % node.right.val)
if node.left != None:
traverse(node.left)
if node.right != None:
traverse(node.right)
def treeToString(root):
if root == None:
return None
nodeQueue = [root]
currStr = ''
# print(nodeQueue)
while len(nodeQueue) > 0:
node = nodeQueue[0]
nodeQueue = nodeQueue[1:]
# print(nodeQueue)
if node == None:
currStr += 'null,'
# print(None)
else:
# print(node.val, node.left, node.right)
nodeQueue += [node.left]
nodeQueue += [node.right]
currStr += str(node.val)+','
# print(nodeQueue)
# print(currStr)
stringList = currStr[:-1].split(',')
while stringList[-1] == 'null':
stringList = stringList[:-1]
currStr = ','.join(stringList)
return currStr
| eroicaleo/LearningPython | interview/leet/tree.py | Python | mit | 2,092 |
def tsvread(filename, delimiter = "\t", endline = "\n", **kwargs):
""" Parses tsv file to an iterable of dicts.
Args:
filename (str):
File to read. First line is considered header.
delimiter (str, Optional):
String used to mark fields in file. Defaults to '\\t'
endline (str, Optional):
String used to mark end of lines in file. Defaults to '\\n'
Kwargs:
Maps column name to type. If no type is given for a column,
it will be of type str by default.
Returns:
Iterable that returns a dict for every line.
Raises:
IOError in case file cannot be opened for reading.
"""
with open(filename) as inp:
head = inp.next().rstrip(endline).split(delimiter)
for line in inp:
yield {k: kwargs.get(k, str)(v) for k, v in zip(head, line.rstrip(endline).split(delimiter))}
def tsvdump(filename, data, cols, delimiter = "\t", endline = "\n", header = True):
""" Dumps data to a tsv file.
Args:
filename (str):
File used for output.
data (iterable of dicts):
Iterable of dicts representing the data.
cols (list of strs):
Names of columns that should be written to output file.
delimiter (str, Optional):
String used to mark fields in file. Defaults to '\\t'
endline (str, Optional):
String used to mark end of lines in file. Defaults to '\\n'
header (bool, Optional):
Wether or not write header to file. „True” by default.
Raises:
IOError in case file cannot be opened for writing.
"""
with open(filename, 'w') as oup:
if header:
oup.write("%s%s" % (delimiter.join(map(str, cols)), endline))
for row in data:
oup.write("%s%s" % (delimiter.join(str(row.get(col, "")) for col in cols), endline))
| btoth/utility | tsvio.py | Python | mit | 1,931 |
"""
Django settings for ProgrammerCompetencyMatrix project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^k(h30rq(chrdd0y2)327we@uh@nat3d*^8f1l--4t0bxo9_nm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'survey',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ProgrammerCompetencyMatrix.urls'
WSGI_APPLICATION = 'ProgrammerCompetencyMatrix.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static').replace(os.sep, '/'),
)
# BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
#logging.warning(TEMPLATE_DIR)
TEMPLATE_DIRS = (
# os.path.join(BASE_DIR, '../../templates/'),
os.path.join(BASE_DIR, 'templates').replace(os.sep, '/'),
# 'D:/Java/Project/GettingStartBlog/templates/'
)
| FiaDot/programmer-competency-matrix | ProgrammerCompetencyMatrix/settings.py | Python | mit | 2,486 |
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
from os import environ
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(dirname(abspath(__file__))))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Gregory Favre', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'Europe/Zurich'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'fr-CH'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_DIRS = [
join(DJANGO_ROOT, "static"),
]
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = get_env_setting('SECRET_KEY')
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
normpath(join(DJANGO_ROOT, 'templates')),
],
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'sekizai.context_processors.sekizai',
)
}
},
]
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
]
THIRD_PARTY_APPS = [
'captcha', # recaptcha
'compressor',
'sekizai',
]
# Apps specific for this project go here.
LOCAL_APPS = [
'contact',
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
RECAPTCHA_PUBLIC_KEY = get_env_setting('RECAPTCHA_PUBLIC_KEY')
RECAPTCHA_PRIVATE_KEY = get_env_setting('RECAPTCHA_PRIVATE_KEY') | gfavre/beyondthewall.ch | beyondthewall/beyondthewall/settings/base.py | Python | mit | 7,931 |
from wodcraft.api import resources
# Routing
def map_routes(api):
api.add_resource(resources.Activity,
'/api/v1.0/activities/<int:id>',
endpoint='activity')
api.add_resource(resources.Activities,
'/api/v1.0/activities',
endpoint='activities')
api.add_resource(resources.Score,
'/api/v1.0/scores/<int:id>',
endpoint='score')
api.add_resource(resources.Scores,
'/api/v1.0/scores',
endpoint='scores')
api.add_resource(resources.Tag,
'/api/v1.0/tags/<int:id>',
endpoint='tag')
api.add_resource(resources.Tags,
'/api/v1.0/tags',
endpoint='tags')
api.add_resource(resources.User,
'/api/v1.0/users/<int:id>',
endpoint='user')
api.add_resource(resources.Users,
'/api/v1.0/users',
endpoint='users')
| the-gigi/wod-craft-server | wodcraft/api/routes.py | Python | mit | 1,064 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename : video.py
# Author : Kim K
# Created : Fri, 29 Jan 2016
# Last Modified : Sun, 31 Jan 2016
from sys import exit as Die
try:
import sys
import cv2
from colordetection import ColorDetector
except ImportError as err:
Die(err)
class Webcam:
def __init__(self):
self.cam = cv2.VideoCapture(0)
self.stickers = self.get_sticker_coordinates('main')
self.current_stickers = self.get_sticker_coordinates('current')
self.preview_stickers = self.get_sticker_coordinates('preview')
def get_sticker_coordinates(self, name):
"""
Every array has 2 values: x and y.
Grouped per 3 since on the cam will be
3 rows of 3 stickers.
:param name: the requested color type
:returns: list
"""
stickers = {
'main': [
[200, 120], [300, 120], [400, 120],
[200, 220], [300, 220], [400, 220],
[200, 320], [300, 320], [400, 320]
],
'current': [
[20, 20], [54, 20], [88, 20],
[20, 54], [54, 54], [88, 54],
[20, 88], [54, 88], [88, 88]
],
'preview': [
[20, 130], [54, 130], [88, 130],
[20, 164], [54, 164], [88, 164],
[20, 198], [54, 198], [88, 198]
]
}
return stickers[name]
def draw_main_stickers(self, frame):
"""Draws the 9 stickers in the frame."""
for x,y in self.stickers:
cv2.rectangle(frame, (x,y), (x+30, y+30), (255,255,255), 2)
def draw_current_stickers(self, frame, state):
"""Draws the 9 current stickers in the frame."""
for index,(x,y) in enumerate(self.current_stickers):
cv2.rectangle(frame, (x,y), (x+32, y+32), ColorDetector.name_to_rgb(state[index]), -1)
def draw_preview_stickers(self, frame, state):
"""Draws the 9 preview stickers in the frame."""
for index,(x,y) in enumerate(self.preview_stickers):
cv2.rectangle(frame, (x,y), (x+32, y+32), ColorDetector.name_to_rgb(state[index]), -1)
def color_to_notation(self, color):
"""
Return the notation from a specific color.
We want a user to have green in front, white on top,
which is the usual.
:param color: the requested color
"""
notation = {
'green' : 'F',
'white' : 'U',
'blue' : 'B',
'red' : 'R',
'orange' : 'L',
'yellow' : 'D'
}
return notation[color]
def scan(self):
"""
Open up the webcam and scans the 9 regions in the center
and show a preview in the left upper corner.
After hitting the space bar to confirm, the block below the
current stickers shows the current state that you have.
This is show every user can see what the computer toke as input.
:returns: dictionary
"""
sides = {}
preview = ['white','white','white',
'white','white','white',
'white','white','white']
state = [0,0,0,
0,0,0,
0,0,0]
while True:
_, frame = self.cam.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
key = cv2.waitKey(10) & 0xff
# init certain stickers.
self.draw_main_stickers(frame)
self.draw_preview_stickers(frame, preview)
for index,(x,y) in enumerate(self.stickers):
roi = hsv[y:y+32, x:x+32]
avg_hsv = ColorDetector.average_hsv(roi)
color_name = ColorDetector.get_color_name(avg_hsv)
state[index] = color_name
# update when space bar is pressed.
if key == 32:
preview = list(state)
self.draw_preview_stickers(frame, state)
face = self.color_to_notation(state[4])
notation = [self.color_to_notation(color) for color in state]
sides[face] = notation
# show the new stickers
self.draw_current_stickers(frame, state)
# append amount of scanned sides
text = 'scanned sides: {}/6'.format(len(sides))
cv2.putText(frame, text, (20, 460), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
# quit on escape.
if key == 27:
break
# show result
cv2.imshow("default", frame)
self.cam.release()
cv2.destroyAllWindows()
return sides if len(sides) == 6 else False
webcam = Webcam()
| muts/qbr | src/video.py | Python | mit | 4,872 |
#!/usr/bin/python
"""
Author: Mohamed K. Eid ([email protected])
Description: stylizes an image using a generative model trained on a particular style
Args:
--input: path to the input image you'd like to apply a style to
--style: name of style (found in 'lib/generators') to apply to the input
--out: path to where the stylized image will be created
--styles: lists trained models available
"""
import argparse
import os
import time
import tensorflow as tf
import generator
import helpers
# Loss term weights
CONTENT_WEIGHT = 1.
STYLE_WEIGHT = 3.
TV_WEIGHT = .1
# Default image paths
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
TRAINED_MODELS_PATH = DIR_PATH + '/../lib/generators/'
INPUT_PATH, STYLE = None, None
OUT_PATH = DIR_PATH + '/../output/out_%.0f.jpg' % time.time()
if not os.path.isdir(DIR_PATH + '/../output'):
os.makedirs(DIR_PATH + '/../output')
# Parse arguments and assign them to their respective global variables
def parse_args():
global INPUT_PATH, STYLE, OUT_PATH
# Create flags
parser = argparse.ArgumentParser()
parser.add_argument('--input', help="path to the input image you'd like to apply a style to")
parser.add_argument('--style', help="name of style (found in 'lib/generators') to apply to the input")
parser.add_argument('--out', default=OUT_PATH, help="path to where the stylized image will be created")
parser.add_argument('--styles', action="store_true", help="list available styles")
args = parser.parse_args()
# Assign image paths from the arg parsing
if args.input and args.style:
INPUT_PATH = os.path.abspath(args.input)
STYLE = args.style
OUT_PATH = args.out
else:
if args.styles:
list_styles()
exit(0)
else:
parser.print_usage()
exit(1)
# Lists trained models
def list_styles():
print("Available styles:")
files = os.listdir(TRAINED_MODELS_PATH)
for file in files:
if os.path.isdir(TRAINED_MODELS_PATH + file):
print(file)
parse_args()
with tf.Session() as sess:
# Check if there is a model trained on the given style
if not os.path.isdir(TRAINED_MODELS_PATH + STYLE):
print("No trained model with the style '%s' was found." % STYLE)
list_styles()
exit(1)
# Load and initialize the image to be stlylized
input_img, _ = helpers.load_img(INPUT_PATH)
input_img = tf.convert_to_tensor(input_img, dtype=tf.float32)
input_img = tf.expand_dims(input_img, axis=0)
# Initialize new generative net
with tf.variable_scope('generator'):
gen = generator.Generator()
gen.build(tf.convert_to_tensor(input_img))
sess.run(tf.global_variables_initializer())
# Restore previously trained model
ckpt_dir = TRAINED_MODELS_PATH + STYLE
saved_path = ckpt_dir + "/{}".format(STYLE)
saver = tf.train.Saver()
saver.restore(sess, saved_path)
# Generate stylized image
img = sess.run(gen.output)
# Save the generated image and close the tf session
helpers.render(img, path_out=OUT_PATH)
sess.close()
| mohamedkeid/Feed-Forward-Style-Transfer | src/test.py | Python | mit | 3,177 |
"""Implementations of locale abstract base class objects."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class CalendarInfo:
"""This interface defines methods to examine a calendar.
A calendar is organized into "years," "months," and "days." A
calendar system may offer a diffreent designation for these
divisions which may or may not vary in duration.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_calendar_type(self):
"""Gets the calendar type.
:return: the calendar type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
calendar_type = property(fget=get_calendar_type)
@abc.abstractmethod
def get_display_name(self):
"""Gets the display name for this calendar.
:return: the display name
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
display_name = property(fget=get_display_name)
@abc.abstractmethod
def get_description(self):
"""Gets a description of this calendar.
:return: the description
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
description = property(fget=get_description)
@abc.abstractmethod
def get_common_era_name(self):
"""Gets the string for the common era in which years are positive.
:return: the common era label
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
common_era_name = property(fget=get_common_era_name)
@abc.abstractmethod
def get_common_era_abbrev(self):
"""Gets the abbreviation for the common era in which years are positive.
:return: the common era label
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
common_era_abbrev = property(fget=get_common_era_abbrev)
@abc.abstractmethod
def get_before_common_era_name(self):
"""Gets the string for before the common era in which years are negative.
:return: the before common era label
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
before_common_era_name = property(fget=get_before_common_era_name)
@abc.abstractmethod
def get_before_common_era_abbrev(self):
"""Gets the abbreviation for before the common era in which years are negative.
:return: the before common era label
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
before_common_era_abbrev = property(fget=get_before_common_era_abbrev)
@abc.abstractmethod
def get_first_year_in_common_era(self):
"""Gets the year number for the first year.
:return: the first year
:rtype: ``integer``
*compliance: mandatory -- This method must be implemented.*
"""
return # integer
first_year_in_common_era = property(fget=get_first_year_in_common_era)
@abc.abstractmethod
def get_last_year_before_common_era(self):
"""Gets the year number for the year before the common era.
:return: the last bce year
:rtype: ``integer``
*compliance: mandatory -- This method must be implemented.*
"""
return # integer
last_year_before_common_era = property(fget=get_last_year_before_common_era)
@abc.abstractmethod
def get_year_name(self):
"""Gets the display name for a calendar "year.
"
:return: the name
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
year_name = property(fget=get_year_name)
@abc.abstractmethod
def get_month_name(self):
"""Gets the display name for a calendar "month.
"
:return: the name
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
month_name = property(fget=get_month_name)
@abc.abstractmethod
def has_variable_months(self):
"""Tests if this calendar has a variable number of months in a year.
:return: ``true`` if the number of months varies, ``false`` if the number of months is constant
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_num_months(self):
"""Gets the number of months of the year.
For a variable month calendar, the number of all defined months
are returned. If there are no "months" in this calendar system
then this value may be zero.
:return: the number of months
:rtype: ``cardinal``
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
num_months = property(fget=get_num_months)
@abc.abstractmethod
def get_num_months_for_year(self, year):
"""Gets the number of months in the given year.
:param year: a year
:type year: ``integer``
:return: the number of months
:rtype: ``cardinal``
:raise: ``IllegalState`` -- ``year`` is greater than ``get_last_year_before_common_era()`` and less then ``get_first_year_in_common_era()``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
@abc.abstractmethod
def get_months(self):
"""Gets the months of the year in order of the calendar.
For a variable month calendar, all defined months are returned.
If there are no "months" in this calendar system then the list
may be empty.
:return: the months
:rtype: ``osid.locale.CalendarUnit``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.CalendarUnit
months = property(fget=get_months)
@abc.abstractmethod
def get_months_for_year(self, year):
"""Gets the months of the year in order of the calendar.
:param year: a year
:type year: ``integer``
:return: the months
:rtype: ``osid.locale.CalendarUnit``
:raise: ``IllegalState`` -- ``year`` is greater than ``get_last_year_before_common_era()`` and less then ``get_first_year_in_common_era()``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.CalendarUnit
@abc.abstractmethod
def get_day_name(self):
"""Gets the display name for a calendar "day.
"
:return: the name
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
day_name = property(fget=get_day_name)
@abc.abstractmethod
def has_variable_days(self):
"""Tests if this calendar has a variable number of days in a month.
:return: ``true`` if the number of days per month varies, ``false`` if the number of days is constant
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_num_days(self):
"""Gets the number of days in a year.
For a variable day calendar, the number of all defined days are
returned. If there are no "days" in this calendar system then
this value may be zero. If there are no "months" defined then
the number of days is the number of days in a year.
:return: the number of days
:rtype: ``cardinal``
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
num_days = property(fget=get_num_days)
@abc.abstractmethod
def get_num_days_for_month(self, year, month):
"""Gets the number of days in the given month.
:param year: a year
:type year: ``integer``
:param month: a ``DateTime`` month code
:type month: ``cardinal``
:return: the number of days
:rtype: ``cardinal``
:raise: ``IllegalState`` -- ``year`` is greater than ``get_last_year_before_common_era()`` and less then ``get_first_year_in_common_era()`` , or ``month`` is greater than ``get_months_for_year(year)``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
@abc.abstractmethod
def get_days(self):
"""Gets the days of the month in order of the calendar.
For a variable day calendar, all defined days are returned. If
there are no "days" in this time system then this value may be
zero. If there are no "months" defined then the number of days
applies to the entire year.
:return: the days
:rtype: ``osid.locale.CalendarUnit``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.CalendarUnit
days = property(fget=get_days)
@abc.abstractmethod
def get_days_for_month(self, year, month):
"""Gets the days of the given month in order of the calendar.
:param year: a year
:type year: ``integer``
:param month: a ``DateTime`` month code
:type month: ``cardinal``
:return: the days
:rtype: ``osid.locale.CalendarUnit``
:raise: ``IllegalState`` -- ``year`` is greater than ``get_last_year_before_common_era()`` and less then ``get_first_year_in_common_era()`` , or ``month`` is greater than or equal to than ``get_months_for_year(year)``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.CalendarUnit
@abc.abstractmethod
def get_first_day_of_year(self):
"""Gets the first day of the calendar year.
:return: the first day of the year
:rtype: ``osid.calendaring.DateTime``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.DateTime
first_day_of_year = property(fget=get_first_day_of_year)
@abc.abstractmethod
def get_end_of_days_name(self):
"""Gets the display name for the end of the calendar.
:return: the name
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
end_of_days_name = property(fget=get_end_of_days_name)
@abc.abstractmethod
def get_origin(self):
"""Gets the start of the "common era" for this calendar.
:return: start of the calendar
:rtype: ``osid.calendaring.DateTime``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.DateTime
origin = property(fget=get_origin)
@abc.abstractmethod
def get_end_of_days(self):
"""Gets the end of the world as specified by this calendar.
:return: end of days
:rtype: ``osid.calendaring.DateTime``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.DateTime
end_of_days = property(fget=get_end_of_days)
@abc.abstractmethod
def get_weekdays(self):
"""Gets the days of the week in order of the calendar.
:return: the week days
:rtype: ``osid.locale.CalendarUnit``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.CalendarUnit
weekdays = property(fget=get_weekdays)
class TimeInfo:
"""This interface defines methods to examine a time.
Time is organized intro "hours," "minutes," and "seconds." A time
system may offer a different designation for these divisions which
may or may not vary in duration.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_time_type(self):
"""Gets the time type.
:return: the time type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
time_type = property(fget=get_time_type)
@abc.abstractmethod
def get_display_name(self):
"""Gets the display name for this time system.
:return: the display name
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
display_name = property(fget=get_display_name)
@abc.abstractmethod
def get_display_label(self):
"""Gets a short label for this time system.
:return: the label
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
display_label = property(fget=get_display_label)
@abc.abstractmethod
def get_description(self):
"""Gets a description of this time system.
:return: the description
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
description = property(fget=get_description)
@abc.abstractmethod
def get_hour_name(self):
"""Gets the display name for "hours.
"
:return: the name
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
hour_name = property(fget=get_hour_name)
@abc.abstractmethod
def get_hour_abbrev(self):
"""Gets the abbreviation for "hours.
"
:return: the abbreviation
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
hour_abbrev = property(fget=get_hour_abbrev)
@abc.abstractmethod
def get_hour_initial(self):
"""Gets the initial for "hours.
"
:return: the initial
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
hour_initial = property(fget=get_hour_initial)
@abc.abstractmethod
def has_variable_hours(self):
"""Tests if this time system has a variable number of hours in a day.
:return: ``true`` if the number of hours per day varies, ``false`` if the number of hours per day is constant
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_num_hours(self):
"""Gets the number of hours in a day.
For a variable hour time system, the number of hours defined is
returned. If there are no "hours" in this time system then this
value may be zero.
:return: the number of hours
:rtype: ``cardinal``
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
num_hours = property(fget=get_num_hours)
@abc.abstractmethod
def get_num_hours_for_day(self, year, month, day):
"""Gets the number of hours for a given day.
:param year: a year
:type year: ``integer``
:param month: a ``DateTime`` month code
:type month: ``cardinal``
:param day: a ``DateTime`` day code
:type day: ``cardinal``
:return: the number of hours
:rtype: ``cardinal``
:raise: ``IllegalState`` -- ``year`` is greater than ``CalendarInfo.getLastYearBeforeCommonEra()`` and less then ``CalendarInfo.getFirstYearInCommonEra()`` , or ``month`` is greater than or equal to
``CalendarInfo.getNumMonthsForYear(year)`` , or ``day`` is greater than or equal to ``CalendarInfo.getDaysInMonth(year, month)``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
@abc.abstractmethod
def get_minute_name(self):
"""Gets the display name for "minutes.
"
:return: the name
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
minute_name = property(fget=get_minute_name)
@abc.abstractmethod
def get_minute_abbrev(self):
"""Gets the abbreviation for "minutes.
"
:return: the abbreviation
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
minute_abbrev = property(fget=get_minute_abbrev)
@abc.abstractmethod
def get_minute_initial(self):
"""Gets the initial for "minutes.
"
:return: the initial
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
minute_initial = property(fget=get_minute_initial)
@abc.abstractmethod
def has_variable_minutes(self):
"""Tests if this time system has a variable number of minutes in an hour.
:return: ``true`` if the number of minutes per hour varies, ``false`` if the number of minutes per hour is constant
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_num_minutes(self):
"""Gets the number of minutes in an hour.
For a variable minute time system, the number of minutes defined
is returned. If there are no "minutes" in this time system then
this value may be zero. If there are no "hours" defined then the
number of minutes is the number of minutes in a day.
:return: the number of minutes
:rtype: ``cardinal``
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
num_minutes = property(fget=get_num_minutes)
@abc.abstractmethod
def get_num_minutes_for_hour(self, year, month, day, hour):
"""Gets the minutes for a given hour.
:param year: a year
:type year: ``integer``
:param month: a ``DateTime`` month code
:type month: ``cardinal``
:param day: a ``DateTime`` day code
:type day: ``cardinal``
:param hour: an hour
:type hour: ``cardinal``
:return: the number of minutes
:rtype: ``cardinal``
:raise: ``IllegalState`` -- ``year`` is greater than ``CalendarInfo.getLastYearBeforeCommonEra()`` and less then ``CalendarInfo.getFirstYearInCommonEra(),`` or ``month`` is greater than or equal to
``CalendarInfo.getNumMonthsForYear(year)`` , or ``day`` is greater than or equal to ``CalendarInfo.getDaysInMonth(year, month)`` , or ``hour`` is greater than or equal to ``get_num_hours_in_day(year,
month, day)``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
@abc.abstractmethod
def get_second_name(self):
"""Gets the display name for "seconds.
"
:return: the name
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
second_name = property(fget=get_second_name)
@abc.abstractmethod
def get_second_abbrev(self):
"""Gets the abbreviation for "seconds.
"
:return: the abbreviation
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
second_abbrev = property(fget=get_second_abbrev)
@abc.abstractmethod
def get_second_initial(self):
"""Gets the initial for "seconds.
"
:return: the initial
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
second_initial = property(fget=get_second_initial)
@abc.abstractmethod
def has_variable_seconds(self):
"""Tests if this time system has a variable number of seconds in a minute.
:return: ``true`` if the number of seconds per minute varies, ``false`` if the number of seconds per minute is constant
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_num_seconds(self):
"""Gets the number of seconds in a minute.
For a variable second time system, the number of seconds defined
is returned. If there are no "seconds" in this time system then
this value may be zero. If there are no "minutes" defined then
the number of seconds is the number of seconds in an hour.
:return: the number of seconds
:rtype: ``cardinal``
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
num_seconds = property(fget=get_num_seconds)
@abc.abstractmethod
def get_num_seconds_for_minute(self, year, month, day, hour, minute):
"""Gets the seconds for a given minute.
:param year: a year
:type year: ``integer``
:param month: a ``DateTime`` month code
:type month: ``cardinal``
:param day: a ``DateTime`` day code
:type day: ``cardinal``
:param hour: an hour
:type hour: ``cardinal``
:param minute: a minute
:type minute: ``cardinal``
:return: the number of seconds
:rtype: ``cardinal``
:raise: ``IllegalState`` -- ``year`` is greater than ``get_last_year_before_common_era()`` and less then ``get_first_year_in_common_era()`` , or ``month`` is greater than or equal to ``CalendarInfo.getNumMonthsForYear(year)`` ,
or ``day`` is greater than or equal to ``CalendarInfo.getDaysInMonth(year, month)`` , or ``hour`` is greater than or equal to ``get_num_hours_in_day(year, month, day)`` , or ``minute`` is greater than
or equal to ``get_num_minutes_inhour(year, month, day, hour)``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
class CalendarUnit:
"""A description of a calendar unit."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_name(self):
"""Gets the full name of this unit.
:return: the name
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
name = property(fget=get_name)
@abc.abstractmethod
def get_abbrev3(self):
"""Gets a 3-letter abbreviation for this unit.
:return: the abbreviation
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
abbrev3 = property(fget=get_abbrev3)
@abc.abstractmethod
def get_abbrev2(self):
"""Gets a 2-letter abbreviation for this unit.
:return: the abbreviation
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
abbrev2 = property(fget=get_abbrev2)
@abc.abstractmethod
def get_initial(self):
"""Gets a single letter abbreviation for this unit.
:return: the initial
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
initial = property(fget=get_initial)
@abc.abstractmethod
def get_date_time_code(self):
"""Gets the number of this unit used in ``DateTime``.
:return: the code
:rtype: ``cardinal``
*compliance: mandatory -- This method must be implemented.*
"""
return # cardinal
date_time_code = property(fget=get_date_time_code)
@abc.abstractmethod
def get_description(self):
"""Gets a description of this unit.
:return: the description
:rtype: ``osid.locale.DisplayText``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.DisplayText
description = property(fget=get_description)
class Locale:
"""A locale is a collection of types.
``Locale`` defines a set of types that together define the
formatting, language, calendaring, and currency for a locale or
culture.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_language_type(self):
"""Gets the language ``Type``.
:return: the language type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
language_type = property(fget=get_language_type)
@abc.abstractmethod
def get_script_type(self):
"""Gets the script ``Type``.
:return: the script type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
script_type = property(fget=get_script_type)
@abc.abstractmethod
def get_calendar_type(self):
"""Gets the calendar ``Type``.
:return: the calendar type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
calendar_type = property(fget=get_calendar_type)
@abc.abstractmethod
def get_time_type(self):
"""Gets the time ``Type``.
:return: the time type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
time_type = property(fget=get_time_type)
@abc.abstractmethod
def get_currency_type(self):
"""Gets the currency ``Type``.
:return: the currency type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
currency_type = property(fget=get_currency_type)
@abc.abstractmethod
def get_unit_system_type(self):
"""Gets the unit system ``Type``.
:return: the unit system type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
unit_system_type = property(fget=get_unit_system_type)
@abc.abstractmethod
def get_numeric_format_type(self):
"""Gets the numeric format ``Type``.
:return: the numeric format type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
numeric_format_type = property(fget=get_numeric_format_type)
@abc.abstractmethod
def get_calendar_format_type(self):
"""Gets the calendar format ``Type``.
:return: the calendar format type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
calendar_format_type = property(fget=get_calendar_format_type)
@abc.abstractmethod
def get_time_format_type(self):
"""Gets the time format ``Type``.
:return: the time format type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
time_format_type = property(fget=get_time_format_type)
@abc.abstractmethod
def get_currency_format_type(self):
"""Gets the currency format ``Type``.
:return: the currency format type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
currency_format_type = property(fget=get_currency_format_type)
@abc.abstractmethod
def get_coordinate_format_type(self):
"""Gets the coordinate format ``Type``.
:return: the coordinate format type
:rtype: ``osid.type.Type``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.Type
coordinate_format_type = property(fget=get_coordinate_format_type)
class LocaleList:
"""Like all ``OsidLists,`` ``LocaleList`` provides a means for accessing ``Locale`` elements sequentially either one at a time or many at a time.
Examples: while (ll.hasNext()) { Locale locale = ll.getNextLocale();
}
or
while (ll.hasNext()) {
Locale[] locales = ll.getNextLocales(ll.available());
}
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_next_locale(self):
"""Gets the next ``Locale`` in this list.
:return: the next ``Locale`` in this list. The ``has_next()`` method should be used to test that a next ``Locale`` is available before calling this method.
:rtype: ``osid.locale.Locale``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.Locale
next_locale = property(fget=get_next_locale)
@abc.abstractmethod
def get_next_locales(self, n):
"""Gets the next set of ``Locale`` elements in this list.
The specified amount must be less than or equal to the return
from ``available()``.
:param n: the number of ``Locale`` elements requested which must be less than or equal to ``available()``
:type n: ``cardinal``
:return: an array of ``Locale`` elements.The length of the array is less than or equal to the number specified.
:rtype: ``osid.locale.Locale``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.locale.Locale
| mitsei/dlkit | dlkit/abstract_osid/locale/objects.py | Python | mit | 32,414 |
from django.contrib import admin
from feedhoos.worker.models.entry import EntryModel
from feedhoos.finder.models.feed import FeedModel
from feedhoos.reader.models.bookmark import BookmarkModel
from feedhoos.folder.models.folder import FolderModel
class EntryModelAdmin(admin.ModelAdmin):
list_display = ('id', "feed_id", "updated", 'url', 'title')
admin.site.register(EntryModel, EntryModelAdmin)
class FeedModelAdmin(admin.ModelAdmin):
list_display = ('id', "title", "etag", "modified", "url", "link", "last_access")
admin.site.register(FeedModel, FeedModelAdmin)
class BookmarkModelAdmin(admin.ModelAdmin):
list_display = ('id', "feed_id", "last_updated", "rating", "folder_id")
admin.site.register(BookmarkModel, BookmarkModelAdmin)
class FolderModelAdmin(admin.ModelAdmin):
list_display = ('id', "title", "rating")
admin.site.register(FolderModel, FolderModelAdmin)
| 38elements/feedhoos | feedhoos/worker/admin.py | Python | mit | 894 |
########
"""
This code is open source under the MIT license.
Its purpose is to help create special motion graphics
with effector controls inspired by Cinema 4D.
Example usage:
https://www.youtube.com/watch?v=BYXmV7fObOc
Notes to self:
Create a simialr, lower poly contorl handle and
perhaps auto enable x-ray/bounding.
Long term, might become an entire panel with tools
of all types of effectors, with secitons like
convinience tools (different split faces/loose buttons)
next to the actual different effector types
e.g. transform effector (as it is)
time effector (..?) etc. Research it more.
another scripting ref:
http://wiki.blender.org/index.php/Dev:2.5/Py/Scripts/Cookbook/Code_snippets/Interface#A_popup_dialog
plans:
- Make checkboxes for fields to have effector affect (loc, rot, scale)
- Name the constraints consistent to effector name, e.g. effector.L.001 for easy removal
- add falloff types (and update-able in driver setting)
- custome driver equation field ('advanced' tweaking, changes drivers for all in that effector group)
- Empty vertex objects for location which AREN'T transformed, so that there is no limit to
how much the location can do (now limited to be between object and base bone)
- create list panel that shows effectors added, and with each selected can do things:
- all more effector objects
- select current objects (and rig)
- change falloff/equation
- remove selected from effector
- remove effector (drivers & rig)
- apply effector in position (removes rig)
Source code available on github:
https://github.com/TheDuckCow/Blender_Effectors
"""
########
bl_info = {
"name": "Blender Effectors",
"author": "Patrick W. Crawford",
"version": (1, 0, 3),
"blender": (2, 71, 0),
"location": "3D window toolshelf",
"category": "Object",
"description": "Effector special motion graphics",
"wiki_url": "https://github.com/TheDuckCow/Blender_Effectors"
}
import bpy
## just in case
from bpy.props import *
from bpy_extras.io_utils import ExportHelper
from bpy.types import Operator
from os.path import dirname, join
""" needed? """
"""
class SceneButtonsPanel():
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
@classmethod
def poll(cls, context):
rd = context.scene.render
return context.scene and (rd.engine in cls.COMPAT_ENGINES)
"""
""" original """
def createEffectorRig(bones,loc=None):
[bone_base,bone_control] = bones
if (loc==None):
loc = bpy.context.scene.cursor_location
bpy.ops.object.armature_add(location=loc)
rig = bpy.context.active_object
rig.name = "effector"
bpy.types.Object.one = bpy.props.FloatProperty(name="does this setting do anything at all?", description="one hundred million", default=1.000, min=0.000, max=1.000)
rig.one = 0.6
#bpy.ops.wm.properties_add(data_path="object")
"""
bpy.ops.object.mode_set(mode='EDIT')
control = rig.data.edit_bones.new('control')
#bpy.ops.armature.bone_primitive_add() #control
# eventually add property as additional factor
rig.data.bones[0].name = 'base'
rig.data.bones[0].show_wire = True
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='EDIT')
# SCENE REFRESH OR SOMETHING???
rig.data.bones[1].name = 'control'
control = obj.pose.bones[bones['base']]
#rig.data.bones[1].parent = rig.data.[bones['base']] #need other path to bone data
bpy.ops.object.mode_set(mode='OBJECT')
rig.pose.bones[0].custom_shape = bone_base
rig.pose.bones[1].custom_shape = bone_control
# turn of inherent rotation for control??
# property setup
#bpy.ops.wm.properties_edit(data_path='object', property='Effector Scale',
# value='1.0', min=0, max=100, description='Falloff scale of effector')
#scene property='Effector.001'
"""
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.armature.select_all(action='SELECT')
bpy.ops.armature.delete()
arm = rig.data
bones = {}
bone = arm.edit_bones.new('base')
bone.head[:] = 0.0000, 0.0000, 0.0000
bone.tail[:] = 0.0000, 0.0000, 1.0000
bone.roll = 0.0000
bone.use_connect = False
bone.show_wire = True
bones['base'] = bone.name
bone = arm.edit_bones.new('control')
bone.head[:] = 0.0000, 0.0000, 0.0000
bone.tail[:] = 0.0000, 1.0000, 0.0000
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['base']]
bones['control'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = rig.pose.bones[bones['base']]
#pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = rig.pose.bones[bones['control']]
#pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
#pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True]
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
arm.layers = [(x in [0]) for x in range(32)]
bpy.ops.object.mode_set(mode='OBJECT')
rig.pose.bones[0].custom_shape = bone_base
rig.pose.bones[1].custom_shape = bone_control
return rig
def createBoneShapes():
if (bpy.data.objects.get("effectorBone1") is None) or (bpy.data.objects.get("effectorBone2") is None):
bpy.ops.mesh.primitive_uv_sphere_add(segments=8, ring_count=8, enter_editmode=True)
bpy.ops.mesh.delete(type='ONLY_FACE')
bpy.ops.object.editmode_toggle()
effectorBone1 = bpy.context.active_object
effectorBone1.name = "effectorBone1"
bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=1, enter_editmode=False, size=0.5)
effectorBone2 = bpy.context.active_object
effectorBone2.name = "effectorBone2"
#move to last layer and hide
[False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True]
effectorBone1.hide = True
effectorBone2.hide = True
effectorBone1.hide_render = True
effectorBone2.hide_render = True
return [bpy.data.objects["effectorBone1"],bpy.data.objects["effectorBone2"]]
def addEffectorObj(objList, rig):
# store previous selections/active etc
prevActive = bpy.context.scene.objects.active
#default expression, change later with different falloff etc
default_expression = "1/(.000001+objDist)*scale"
#empty list versus obj list?
emptyList = []
# explicit state set
bpy.ops.object.mode_set(mode='OBJECT')
# iterate over all objects passed in
for obj in objList:
if obj.type=="EMPTY": continue
##############################################
# Add the empty intermediate object/parent
bpy.ops.object.empty_add(type='PLAIN_AXES', view_align=False, location=obj.location)
empty = bpy.context.active_object
empty.name = "effr.empty"
obj.select = True
preParent = obj.parent
bpy.ops.object.parent_set(type='OBJECT', keep_transform=True)
bpy.context.object.empty_draw_size = 0.1
if (preParent):
bpy.ops.object.select_all(action='DESELECT')
# need to keep transform!
preParent.select = True
empty.select = True
bpy.context.scene.objects.active = preParent
bpy.ops.object.parent_set(type='OBJECT', keep_transform=True)
#empty.parent = preParent
bpy.context.scene.objects.active = obj
preConts = len(obj.constraints) # starting number of constraints
###############################################
# LOCATION
bpy.ops.object.constraint_add(type='COPY_LOCATION')
obj.constraints[preConts].use_offset = True
obj.constraints[preConts].target_space = 'LOCAL'
obj.constraints[preConts].owner_space = 'LOCAL'
obj.constraints[preConts].target = rig
obj.constraints[preConts].subtarget = "control"
driverLoc = obj.constraints[preConts].driver_add("influence").driver
driverLoc.type = 'SCRIPTED'
# var for objDist two targets, 1st is "base" second is "distanceRef"
varL_dist = driverLoc.variables.new()
varL_dist.type = 'LOC_DIFF'
varL_dist.name = "objDist"
varL_dist.targets[0].id = rig
varL_dist.targets[0].bone_target = 'base'
varL_dist.targets[1].id = empty
varL_scale = driverLoc.variables.new()
varL_scale.type = 'TRANSFORMS'
varL_scale.name = 'scale'
varL_scale.targets[0].id = rig
varL_scale.targets[0].transform_type = 'SCALE_Z'
varL_scale.targets[0].bone_target = 'base'
driverLoc.expression = default_expression
###############################################
# ROTATION
bpy.ops.object.constraint_add(type='COPY_ROTATION')
preConts+=1
obj.constraints[preConts].target_space = 'LOCAL'
obj.constraints[preConts].owner_space = 'LOCAL'
obj.constraints[preConts].target = rig
obj.constraints[preConts].subtarget = "control"
driverRot = obj.constraints[preConts].driver_add("influence").driver
driverRot.type = 'SCRIPTED'
# var for objDist two targets, 1st is "base" second is "distanceRef"
varR_dist = driverRot.variables.new()
varR_dist.type = 'LOC_DIFF'
varR_dist.name = "objDist"
varR_dist.targets[0].id = rig
varR_dist.targets[0].bone_target = 'base'
varR_dist.targets[1].id = obj
varR_scale = driverRot.variables.new()
varR_scale.type = 'TRANSFORMS'
varR_scale.name = 'scale'
varR_scale.targets[0].id = rig
varR_scale.targets[0].transform_type = 'SCALE_Z'
varR_scale.targets[0].bone_target = 'base'
driverRot.expression = default_expression
###############################################
# SCALE
bpy.ops.object.constraint_add(type='COPY_SCALE')
preConts+=1
obj.constraints[preConts].target_space = 'LOCAL'
obj.constraints[preConts].owner_space = 'LOCAL'
obj.constraints[preConts].target = rig
obj.constraints[preConts].subtarget = "control"
driverScale = obj.constraints[preConts].driver_add("influence").driver
driverScale.type = 'SCRIPTED'
# var for objDist two targets, 1st is "base" second is "distanceRef"
varS_dist = driverScale.variables.new()
varS_dist.type = 'LOC_DIFF'
varS_dist.name = "objDist"
varS_dist.targets[0].id = rig
varS_dist.targets[0].bone_target = 'base'
varS_dist.targets[1].id = obj
varS_scale = driverScale.variables.new()
varS_scale.type = 'TRANSFORMS'
varS_scale.name = 'scale'
varS_scale.targets[0].id = rig
varS_scale.targets[0].transform_type = 'SCALE_Z'
varS_scale.targets[0].bone_target = 'base'
driverScale.expression = default_expression
########################################################################################
# Above for precursor functions
# Below for the class functions
########################################################################################
class addEffector(bpy.types.Operator):
"""Create the effector object and setup"""
bl_idname = "object.add_effector"
bl_label = "Add Effector"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
objList = bpy.context.selected_objects
[effectorBone1,effectorBone2] = createBoneShapes()
rig = createEffectorRig([effectorBone1,effectorBone2])
addEffectorObj(objList, rig)
bpy.context.scene.objects.active = rig
return {'FINISHED'}
class updateEffector(bpy.types.Operator):
bl_idname = "object.update_effector"
bl_label = "Update Effector"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
print("Update Effector: NOT CREATED YET!")
# use the popup window??
return {'FINISHED'}
class selectEmpties(bpy.types.Operator):
bl_idname = "object.select_empties"
bl_label = "Select Effector Empties"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
print("Selecting effector empties: NOT COMPLETELY CORRECT YET!")
# just selects all empties, lol.
bpy.ops.object.select_by_type(type='EMPTY')
return {'FINISHED'}
class separateFaces(bpy.types.Operator):
"""Separate all faces into new meshes"""
bl_idname = "object.separate_faces"
bl_label = "Separate Faces to Objects"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
# make sure it's currently in object mode for sanity
bpy.ops.object.mode_set(mode='OBJECT')
for obj in bpy.context.selected_objects:
bpy.context.scene.objects.active = obj
if obj.type != "MESH": continue
#set scale to 1
try:
bpy.ops.object.transform_apply(location=False, rotation=True, scale=True)
except:
print("couodn't transform")
print("working?")
#mark all edges sharp
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.mark_sharp()
bpy.ops.object.mode_set(mode='OBJECT')
#apply modifier to split faces
bpy.ops.object.modifier_add(type='EDGE_SPLIT')
obj.modifiers[-1].split_angle = 0
bpy.ops.object.modifier_apply(apply_as='DATA', modifier=obj.modifiers[-1].name)
#clear sharp
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.mark_sharp(clear=True)
bpy.ops.object.mode_set(mode='OBJECT')
#separate to meshes
bpy.ops.mesh.separate(type="LOOSE")
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
return {'FINISHED'}
# SceneButtonsPanel
# ^^ above for attempt to make in scenes panel
class effectorPanel(bpy.types.Panel):
"""Effector Tools"""
bl_label = "Effector Tools"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "Tools"
"""
bl_label = "Effector Tools"
bl_space_type = 'VIEW_3D'#"PROPERTIES" #or 'VIEW_3D' ?
bl_region_type = "WINDOW"
bl_context = "scene"
"""
def draw(self, context):
view = context.space_data
scene = context.scene
layout = self.layout
split = layout.split()
col = split.column(align=True)
col.operator("object.separate_faces", text="Separate Faces")
split = layout.split() # uncomment to make vertical
#col = split.column(align=True) # uncomment to make horizontal
col.operator("object.add_effector", text="Add Effector")
split = layout.split()
col.operator("wm.mouse_position", text="Update Effector alt")
col.operator("object.select_empties", text="Select Empties")
split = layout.split()
col = split.column(align=True)
#col = layout.column()
layout.label("Disable Recommended:")
col.prop(view, "show_relationship_lines")
# shameless copy from vertex group pa
# funcitons to implement:
# add new (change from current, where it creates just the armature
# and later need to assign objects to it
# need to figure out data structure for it! I think properties
# for each of the objects, either unique per group or over+1 unique per group
# Assign
# Remove
# Select
# Deselect
# select Effector Control
"""
ob = context.object
group = ob.vertex_groups.active
rows = 1
if group:
rows = 3
row = layout.row()
row.template_list("MESH_UL_vgroups", "", ob, "vertex_groups", ob.vertex_groups, "active_index", rows=rows)
col = row.column(align=True)
col.operator("object.vertex_group_add", icon='ZOOMIN', text="")
col.operator("object.vertex_group_remove", icon='ZOOMOUT', text="").all = False
if ob.vertex_groups and (ob.mode == 'OBJECT'):
row = layout.row()
sub = row.row(align=True)
sub.operator("object.vertex_group_assign", text="Assign")
sub.operator("object.vertex_group_remove_from", text="Remove")
row = layout.row()
sub = row.row(align=True)
sub.operator("object.vertex_group_select", text="Select")
sub.operator("object.vertex_group_deselect", text="Deselect")
"""
########################################################################################
# Above for the class functions
# Below for extra classes/registration stuff
########################################################################################
#### WIP popup
class WIPpopup(bpy.types.Operator):
bl_idname = "error.message"
bl_label = "WIP popup"
type = StringProperty()
message = StringProperty()
def execute(self, context):
self.report({'INFO'}, self.message)
print(self.message)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_popup(self, width=350, height=40)
return self.execute(context)
def draw(self, context):
self.layout.label("This addon is a work in progress, feature not yet implemented")
row = self.layout.split(0.80)
row.label("")
row.operator("error.ok")
# shows in header when run
class notificationWIP(bpy.types.Operator):
bl_idname = "wm.mouse_position"
bl_label = "Mouse location"
def execute(self, context):
# rather then printing, use the report function,
# this way the message appears in the header,
self.report({'INFO'}, "Not Implemented")
return {'FINISHED'}
def invoke(self, context, event):
return self.execute(context)
# WIP OK button general purpose
class OkOperator(bpy.types.Operator):
bl_idname = "error.ok"
bl_label = "OK"
def execute(self, context):
#eventually another one for url lib
return {'FINISHED'}
# This allows you to right click on a button and link to the manual
# auto-generated code, not fully changed
def add_object_manual_map():
url_manual_prefix = "https://github.com/TheDuckCow"
url_manual_mapping = (
("bpy.ops.mesh.add_object", "Modeling/Objects"),
)
return url_manual_prefix, url_manual_mapping
def register():
bpy.utils.register_class(addEffector)
bpy.utils.register_class(updateEffector)
bpy.utils.register_class(effectorPanel)
bpy.utils.register_class(separateFaces)
bpy.utils.register_class(selectEmpties)
bpy.utils.register_class(WIPpopup)
bpy.utils.register_class(notificationWIP)
bpy.utils.register_class(OkOperator)
#bpy.utils.register_manual_map(add_object_manual_map)
def unregister():
bpy.utils.unregister_class(addEffector)
bpy.utils.unregister_class(updateEffector)
bpy.utils.unregister_class(effectorPanel)
bpy.utils.unregister_class(separateFaces)
bpy.utils.unregister_class(selectEmpties)
bpy.utils.unregister_class(WIPpopup)
bpy.utils.unregister_class(notificationWIP)
bpy.utils.unregister_class(OkOperator)
#bpy.utils.unregister_manual_map(add_object_manual_map)
if __name__ == "__main__":
register()
| kmeirlaen/Blender_Effectors | effector.py | Python | mit | 18,791 |
#! /usr/bin/env python
import flickr_api, pprint, os, sys,argparse,json,csv
flickr_api.set_keys(api_key = '52182e4e29c243689f8a8f45ee2939ae', api_secret = 'a6238b15052797dc')
def DownloadSource(SourceList, PhotosetTitle):
for url in SourceList:
os.system("wget --append-output=download.log --no-verbose --no-clobber -P '%s' -i %s" % (PhotosetTitle,'downloadstack'))
#print url
pass
def GetSource(PhotoList,SizeToDownload):
SourceList = []
f = open('downloadstack', 'wb')
for PhotoId in PhotoList:
r = flickr_api.method_call.call_api(
method = "flickr.photos.getSizes",
photo_id = PhotoId
)
for Size in r['sizes']['size']:
if Size['label'] == SizeToDownload:
SourceList.append(Size['source'])
f.write('%s \n' % Size['source'])
f.close()
return SourceList
def GetInfo(PhotoList,userid):
for PhotoId in PhotoList:
g = open('/home/chandrasg/Desktop/%s/%s_Info.json'%(userid,PhotoId),'wb')
g=convert(g)
h = open('/home/chandrasg/Desktop/%s/%s_Exif.json'%(userid,PhotoId),'wb')
h=convert(h)
#print g
q= flickr_api.method_call.call_api(
method = "flickr.photos.getInfo",
photo_id = PhotoId,
format='xml'
)
q=convert(q)
g.write(str(q))
g.close()
r=flickr_api.method_call.call_api(
method = "flickr.photos.getExif",
photo_id = PhotoId,
format='xml'
)
r=convert(r)
h.write(str(r))
h.close()
def GetFavorites(userid):
PhotoList=[]
r = flickr_api.method_call.call_api(
method = "flickr.favorites.getPublicList",
user_id = userid,
per_page=200
)
for r in r['photos']['photo']:
PhotoList.append(r['id'])
return PhotoList
def convert(input):
if isinstance(input, dict):
return {convert(key): convert(value) for key, value in input.iteritems()}
elif isinstance(input, list):
return [convert(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def GetUserid(userid):
userid= "http://www.flickr.com/photos/"+userid+"/"
r = flickr_api.method_call.call_api(
method = "flickr.urls.lookupUser",
url=userid
)
return r['user']['id']
def GetUserfriends(userid):
h = open('/home/chandrasg/Desktop/%s_friends.json'%userid,'wb')
h=convert(h)
q= flickr_api.method_call.call_api(
method = "flickr.contacts.getPublicList",
user_id = userid,
format='xml'
)
q=convert(q)
g.write(str(q))
g.close()
with open('users.txt','rb') as infile:
reader=csv.reader(infile)
for user in reader:
userid=user[0]
if userid[0].isdigit():
print userid
SizeToDownload = 'Large'
PhotoList = GetFavorites(userid)
print "Found %s Photos..." % (len(PhotoList))
SourceList = GetSource(PhotoList,SizeToDownload)
print "Downloading %s Photos..." % (len(SourceList))
DownloadSource(SourceList,userid)
GetInfo(PhotoList,userid)
GetUserfriends(userid)
os.remove('./downloadstack')
else:
print userid
userid=GetUserid(userid)
userid=convert(userid)
print userid
SizeToDownload = 'Large'
PhotoList = GetFavorites(userid)
print "Found %s Photos..." % (len(PhotoList))
SourceList = GetSource(PhotoList,SizeToDownload)
print "Downloading %s Photos..." % (len(SourceList))
DownloadSource(SourceList,userid)
GetInfo(PhotoList,userid)
GetUserfriends(userid)
os.remove('./downloadstack')
| sharathchandra92/flickrapi_downloadfavorites | flickrdownload.py | Python | mit | 3,886 |
import sigopt.cli.commands.cluster
import sigopt.cli.commands.config
import sigopt.cli.commands.experiment
import sigopt.cli.commands.init
import sigopt.cli.commands.local
import sigopt.cli.commands.version
import sigopt.cli.commands.training_run
from .base import sigopt_cli
| sigopt/sigopt-python | sigopt/cli/commands/__init__.py | Python | mit | 277 |
import json
from sqlalchemy import create_engine, inspect, Table, Column
import pandas as pd
from ilxutils.args_reader import read_args
from ilxutils.scicrunch_client import scicrunch
import sys
import time
from ilxutils.interlex_sql import interlex_sql
import re
from collections import defaultdict
import requests as r
import subprocess as sb
from pathlib import Path as p
with open('/home/troy/elastic_migration/auth.ini', 'r') as uk:
vars = uk.read().split('\n')
username = vars[0].split('=')[-1].strip()
password = vars[1].split('=')[-1].strip()
args = read_args(
api_key=p.home() / 'keys/production_api_scicrunch_key.txt',
db_url=p.home() / 'keys/production_engine_scicrunch_key.txt',
production=True)
#args = read_args(api_key='../production_api_scicrunch_key.txt', db_url='../production_engine_scicrunch_key.txt', production=True)
sql = interlex_sql(db_url=args.db_url)
sci = scicrunch(
api_key=args.api_key, base_path=args.base_path, db_url=args.db_url)
def plink(ilxid):
return "http://interlex.scicrunch.io/scicrunch/term/%s" % ilxid
def blink(ilxid):
return "https://5f86098ac2b28a982cebf64e82db4ea2.us-west-2.aws.found.io:9243/interlex/term/%s" % ilxid
#8583532179
from random import choice
from string import ascii_uppercase
def get_random_words(lenghth=15, count=1):
return [
'troy_test_' + ''.join(choice(ascii_uppercase) for i in range(12))
for _ in range(count)
]
def create_ilx_format_from_words(words):
superclasses = [{
'id': '146',
}]
return [{
'term': w,
'type': 'term',
"definition": "test123",
"superclasses": superclasses
} for w in words]
words = get_random_words()
test_terms = create_ilx_format_from_words(words)
#print(test_terms)
#print(test_terms)
"""
ilx_data = sci.addTerms(test_terms)#, _print=True)
ilx_to_label = sql.get_ilx_to_label()
for d in ilx_data:
if not ilx_to_label[d['ilx']] == d['label']:
sys.exit(blink(d['ilx']))
terms_data = []
for d in ilx_data:
terms_data.append({'id':d['id'], 'definition':'update_test'})
update_data = sci.updateTerms(terms_data)#, _print=True)
for d in update_data:
if d['definition'] != 'update_test':
sys.exit(blink[d['ilx']])
"""
sci.updateTerms([{'id': 15065, 'definition': 'troy_test_may_17_2018'}])
def ela_search(ilx):
return r.get(blink(ilx), auth=(username, password))
ilx_data = [{'ilx': 'ilx_0115063'}]
results = [
ela_search(d['ilx']).json()
if not ela_search(d['ilx']).raise_for_status() else sys.exit(d)
for d in ilx_data
]
#print(results)
#json.dump(results, open('../elastic_testing.json', 'w'), indent=4)
#python3 elastic_test.py 8.60s user 1.07s system 3% cpu 4:55.25 total for new elastic
#annotation_format = {'tid':'', 'annotation_tid':'', 'value':''}
#annotation_format['tid']=49552
#annotation_format['annotation_tid']=15077
#annotation_format['value']='testing value2'
#sci.addAnnotations([annotation_format])
| tgbugs/pyontutils | ilxutils/tests/elastic_test.py | Python | mit | 2,991 |
#!/bin/python
"""Fixes some common spelling errors. Might I add that this is one of the
reasons why having a website is useful?
"""
import os
import sys
def levenshtein_ratio(s1, s2, cutoff=None):
max_len = max(len(s1), len(s2))
if cutoff is not None:
cutoff = int(math.ceil((1.0 - cutoff) * max_len) + .1)
return 1.0 - (
float(levenshtein(s1, s2, cutoff=cutoff))
/ max_len
)
def levenshtein(s1, s2, cutoff=None):
"""Compute the Levenshtein edit distance between two strings. If the
minimum distance will be greater than cutoff, then quit early and return
at least cutoff.
"""
if len(s1) < len(s2):
return levenshtein(s2, s1, cutoff)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
current_row_min = sys.maxint
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
min_all_three = min(
insertions,
deletions,
substitutions
)
current_row.append(min_all_three)
current_row_min = min(current_row_min, min_all_three)
if cutoff is not None and current_row_min > cutoff:
return current_row_min
previous_row = current_row
return previous_row[-1]
def fix_misspellings(test=None):
if test is None:
test = False
words = (
'Anniversary',
'Bonus',
'Cemetery',
'Heritage',
'Historic',
'Historical',
'Monument',
'Memorial',
'National',
'Park',
'Preserve',
'Recreation',
'Recreational',
'Scenic',
)
ok_words = (
'Hermitage',
'History',
'Presence',
'Reception',
'Reservation',
'Renovation',
'Parks',
'Part',
)
for word in words:
command = r"grep -i -P -o '\b[{first_letter}][a-z]{{{length_minus_one},{length_plus_one}}}\b' master_list.csv | sort -u".format(
first_letter=word[0],
length_minus_one=(len(word[1:]) - 1),
length_plus_one=(len(word[1:]) + 1),
)
if test:
print('Running {command}'.format(command=command))
maybe_misspellings = os.popen(command).readlines()
# Trim newlines
maybe_misspellings = [m[:-1] for m in maybe_misspellings]
# Ignore the correct spelling
for j in (word, word.lower(), word.upper()):
if j in maybe_misspellings:
maybe_misspellings.remove(j)
# Ignore ok words
for i in ok_words:
for j in (i, i.lower(), i.upper()):
if j in maybe_misspellings:
maybe_misspellings.remove(j)
# 'Recreationa' => 'Recreational', not 'Recreation'
removals = []
for mm in maybe_misspellings:
if word == 'Historic' or word == 'Recreation':
if 'a' in mm[-3:].lower() or 'l' in mm[-3:].lower():
removals.append(mm)
for r in removals:
maybe_misspellings.remove(r)
# Misspellings must have most of the letters from the word
misspellings = [mm for mm in maybe_misspellings if levenshtein_ratio(mm[1:].lower(), word[1:].lower()) >= .65]
for misspelling in misspellings:
if misspelling == misspelling.upper():
replacement = word.upper()
elif misspelling == misspelling.lower():
replacement = word.lower()
else:
replacement = word
if test:
print(
'{word} found {times} times'.format(
word=misspelling,
times=os.popen(
r"grep -c -P '\b{word}\b' list.csv".format(
word=misspelling,
)
).readlines()[0][:-1],
)
)
print(
'Replacing {word} with {replacement}'.format(
word=misspelling,
replacement=replacement,
)
)
else:
os.system(
r"sed -i -r 's/\b{misspelling}\b/{replacement}/g' list.csv".format(
misspelling=misspelling,
replacement=replacement,
)
)
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: {program} [--test|--run]'.format(program=sys.argv[0]))
sys.exit(1)
elif sys.argv[1] == '--test':
fix_misspellings(test=True)
elif sys.argv[1] == '--run':
fix_misspellings(test=False)
# Fix whte space too
os.system(r"sed -i -r 's/\s+$//' list.csv")
else:
print('Usage: {program} [--test|--run]'.format(program=sys.argv[0]))
sys.exit(1)
| bskari/park-stamper | parks/scripts/initialize_db/fix_spelling.py | Python | mit | 5,292 |
"""
Tahmatassu Web Server
~~~~~~~~~~~~~~~~~~~~~
HTTP-status codes containing module
:copyright: (c) 2014 by Teemu Puukko.
:license: MIT, see LICENSE for more details.
"""
from werkzeug.wsgi import LimitedStream
class StreamConsumingMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
content_length = environ.get('CONTENT_LENGTH',0)
content_length = 0 if content_length is '' else content_length
stream = LimitedStream(environ.get('wsgi.input'),
int(content_length))
environ['wsgi.input'] = stream
app_iter = self.app(environ, start_response)
try:
stream.exhaust()
for event in app_iter:
yield event
finally:
if hasattr(app_iter, 'close'):
app_iter.close() | puumuki/tahmatassu-api | tahmatassu-server/middlewares.py | Python | mit | 813 |
from setuptools import setup
readme = open('README.md').read()
setup(name='HDFserver',
version='0.1',
author='Yohannes Libanos',
license='MIT',
description='REST service for HDF5 data stores',
py_modules=['HDFserver'],
long_description=readme,) | yohannesHL/HDFserver | setup.py | Python | mit | 281 |
import copy
class ArtifactEmulator:
def __init__(self, random_str, ctx, base_url):
self._random_str = random_str
self._ctx = ctx
self._artifacts = {}
self._artifacts_by_id = {}
self._files = {}
self._base_url = base_url
self._portfolio_links = {}
def create(self, variables):
collection_name = variables["artifactCollectionNames"][0]
state = "PENDING"
aliases = []
latest = None
art_id = variables.get("digest", "")
# Find most recent artifact
versions = self._artifacts.get(collection_name)
if versions:
last_version = versions[-1]
latest = {"id": last_version["digest"], "versionIndex": len(versions) - 1}
art_seq = {"id": art_id, "latestArtifact": latest}
aliases.append(dict(artifactCollectionName=collection_name, alias="latest"))
base_url = self._base_url
direct_url = f"{base_url}/storage?file=wandb_manifest.json"
art_data = {
"id": art_id,
"digest": "abc123",
"state": state,
"labels": [],
"aliases": aliases,
"artifactSequence": art_seq,
"currentManifest": dict(file=dict(directUrl=direct_url)),
}
response = {"data": {"createArtifact": {"artifact": copy.deepcopy(art_data)}}}
# save in artifact emu object
art_seq["name"] = collection_name
art_data["artifactSequence"] = art_seq
art_data["state"] = "COMMITTED"
art_type = variables.get("artifactTypeName")
if art_type:
art_data["artifactType"] = {"id": 1, "name": art_type}
art_save = copy.deepcopy(art_data)
self._artifacts.setdefault(collection_name, []).append(art_save)
self._artifacts_by_id[art_id] = art_save
# save in context
self._ctx["artifacts_created"].setdefault(collection_name, {})
self._ctx["artifacts_created"][collection_name].setdefault("num", 0)
self._ctx["artifacts_created"][collection_name]["num"] += 1
if art_type:
self._ctx["artifacts_created"][collection_name]["type"] = art_type
return response
def link(self, variables):
pfolio_name = variables.get("artifactPortfolioName")
artifact_id = variables.get("artifactID") or variables.get("clientID")
if not pfolio_name or not artifact_id:
raise ValueError(
"query variables must contain artifactPortfolioName and either artifactID or clientID"
)
aliases = variables.get("aliases")
# We automatically create a portfolio for the user if we can't find the one given.
links = self._portfolio_links.setdefault(pfolio_name, [])
if not any(map(lambda x: x["id"] == artifact_id, links)):
art = {"id": artifact_id, "aliases": [a["alias"] for a in aliases]}
links.append(art)
self._ctx["portfolio_links"].setdefault(pfolio_name, {})
num = len(links)
self._ctx["portfolio_links"][pfolio_name]["num"] = num
response = {"data": {"linkArtifact": {"versionIndex": num - 1}}}
return response
def create_files(self, variables):
base_url = self._base_url
response = {
"data": {
"createArtifactFiles": {
"files": {
"edges": [
{
"node": {
"id": idx,
"name": af["name"],
"displayName": af["name"],
"uploadUrl": f"{base_url}/storage?file={af['name']}&id={af['artifactID']}",
"uploadHeaders": [],
"artifact": {"id": af["artifactID"]},
},
}
for idx, af in enumerate(variables["artifactFiles"])
],
},
},
},
}
return response
def query(self, variables, query=None):
public_api_query_str = "query Artifact($id: ID!) {"
public_api_query_str2 = "query ArtifactWithCurrentManifest($id: ID!) {"
art_id = variables.get("id")
art_name = variables.get("name")
assert art_id or art_name
is_public_api_query = query and (
query.startswith(public_api_query_str)
or query.startswith(public_api_query_str2)
)
if art_name:
collection_name, version = art_name.split(":", 1)
artifact = None
artifacts = self._artifacts.get(collection_name)
if artifacts:
if version == "latest":
version_num = len(artifacts)
else:
assert version.startswith("v")
version_num = int(version[1:])
artifact = artifacts[version_num - 1]
# TODO: add alias info?
elif art_id:
artifact = self._artifacts_by_id[art_id]
if is_public_api_query:
response = {"data": {"artifact": artifact}}
else:
response = {"data": {"project": {"artifact": artifact}}}
return response
def file(self, entity, digest):
# TODO?
return "ARTIFACT %s" % digest, 200
def storage(self, request):
fname = request.args.get("file")
if request.method == "PUT":
data = request.get_data(as_text=True)
self._files.setdefault(fname, "")
# TODO: extend? instead of overwrite, possible to differentiate wandb_manifest.json artifactid?
self._files[fname] = data
data = ""
if request.method == "GET":
data = self._files[fname]
return data, 200
| wandb/client | tests/utils/artifact_emu.py | Python | mit | 5,987 |
import warnings
from collections import defaultdict
from queryset import QuerySet, QuerySetManager
from queryset import DoesNotExist, MultipleObjectsReturned
from queryset import DO_NOTHING
from mongoengine import signals
import sys
import pymongo
from bson import ObjectId
import operator
from functools import partial
from bson.dbref import DBRef
class NotRegistered(Exception):
pass
class InvalidDocumentError(Exception):
pass
class ValidationError(AssertionError):
"""Validation exception.
May represent an error validating a field or a
document containing fields with validation errors.
:ivar errors: A dictionary of errors for fields within this
document or list, or None if the error is for an
individual field.
"""
errors = {}
field_name = None
_message = None
def __init__(self, message="", **kwargs):
self.errors = kwargs.get('errors', {})
self.field_name = kwargs.get('field_name')
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return '%s(%s,)' % (self.__class__.__name__, self.message)
def __getattribute__(self, name):
message = super(ValidationError, self).__getattribute__(name)
if name == 'message':
if self.field_name:
message = '%s' % message
if self.errors:
message = '%s(%s)' % (message, self._format_errors())
return message
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
def to_dict(self):
"""Returns a dictionary of all errors within a document
Keys are field names or list indices and values are the
validation error messages, or a nested dictionary of
errors for an embedded document or list.
"""
def build_dict(source):
errors_dict = {}
if not source:
return errors_dict
if isinstance(source, dict):
for field_name, error in source.iteritems():
errors_dict[field_name] = build_dict(error)
elif isinstance(source, ValidationError) and source.errors:
return build_dict(source.errors)
else:
return unicode(source)
return errors_dict
if not self.errors:
return {}
return build_dict(self.errors)
def _format_errors(self):
"""Returns a string listing all errors within a document"""
def generate_key(value, prefix=''):
if isinstance(value, list):
value = ' '.join([generate_key(k) for k in value])
if isinstance(value, dict):
value = ' '.join(
[generate_key(v, k) for k, v in value.iteritems()])
results = "%s.%s" % (prefix, value) if prefix else value
return results
error_dict = defaultdict(list)
for k, v in self.to_dict().iteritems():
error_dict[generate_key(v)].append(k)
return ' '.join(["%s: %s" % (k, v) for k, v in error_dict.iteritems()])
_document_registry = {}
def get_document(name):
doc = _document_registry.get(name, None)
if not doc:
# Possible old style names
end = ".%s" % name
possible_match = [k for k in _document_registry.keys() if k.endswith(end)]
if len(possible_match) == 1:
doc = _document_registry.get(possible_match.pop(), None)
if not doc:
raise NotRegistered("""
`%s` has not been registered in the document registry.
Importing the document class automatically registers it, has it
been imported?
""".strip() % name)
return doc
class BaseField(object):
"""A base class for fields in a MongoDB document. Instances of this class
may be added to subclasses of `Document` to define a document's schema.
.. versionchanged:: 0.5 - added verbose and help text
"""
name = None
# Fields may have _types inserted into indexes by default
_index_with_types = True
_geo_index = False
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that MongoEngine implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
def __init__(self, db_field=None, name=None, required=False, default=None,
unique=False, unique_with=None, primary_key=False,
validation=None, choices=None, verbose_name=None, help_text=None):
self.db_field = (db_field or name) if not primary_key else '_id'
if name:
msg = "Fields' 'name' attribute deprecated in favour of 'db_field'"
warnings.warn(msg, DeprecationWarning)
self.name = None
self.required = required or primary_key
self.default = default
self.unique = bool(unique or unique_with)
self.unique_with = unique_with
self.primary_key = primary_key
self.validation = validation
self.choices = choices
self.verbose_name = verbose_name
self.help_text = help_text
# Adjust the appropriate creation counter, and save our local copy.
if self.db_field == '_id':
self.creation_counter = BaseField.auto_creation_counter
BaseField.auto_creation_counter -= 1
else:
self.creation_counter = BaseField.creation_counter
BaseField.creation_counter += 1
def __get__(self, instance, owner):
"""Descriptor for retrieving a value from a field in a document. Do
any necessary conversion between Python and MongoDB types.
"""
if instance is None:
# Document class being used rather than a document object
return self
# Get value from document instance if available, if not use default
value = instance._data.get(self.name)
if value is None:
value = self.default
# Allow callable default values
if callable(value):
value = value()
return value
def __set__(self, instance, value):
"""Descriptor for assigning a value to a field in a document.
"""
instance._data[self.name] = value
instance._mark_as_changed(self.name)
def error(self, message="", errors=None, field_name=None):
"""Raises a ValidationError.
"""
field_name = field_name if field_name else self.name
raise ValidationError(message, errors=errors, field_name=field_name)
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type.
"""
return value
def to_mongo(self, value):
"""Convert a Python type to a MongoDB-compatible type.
"""
return self.to_python(value)
def prepare_query_value(self, op, value):
"""Prepare a value that is being used in a query for PyMongo.
"""
return value
def validate(self, value):
"""Perform validation on a value.
"""
pass
def _validate(self, value):
from mongoengine import Document, EmbeddedDocument
# check choices
if self.choices:
is_cls = isinstance(value, (Document, EmbeddedDocument))
value_to_check = value.__class__ if is_cls else value
err_msg = 'an instance' if is_cls else 'one'
if isinstance(self.choices[0], (list, tuple)):
option_keys = [option_key for option_key, option_value in self.choices]
if value_to_check not in option_keys:
self.error('Value must be %s of %s' % (err_msg, unicode(option_keys)))
elif value_to_check not in self.choices:
self.error('Value must be %s of %s' % (err_msg, unicode(self.choices)))
# check validation argument
if self.validation is not None:
if callable(self.validation):
if not self.validation(value):
self.error('Value does not match custom validation method')
else:
raise ValueError('validation argument for "%s" must be a '
'callable.' % self.name)
self.validate(value)
class ComplexBaseField(BaseField):
"""Handles complex fields, such as lists / dictionaries.
Allows for nesting of embedded documents inside complex types.
Handles the lazy dereferencing of a queryset by lazily dereferencing all
items in a list / dict rather than one at a time.
.. versionadded:: 0.5
"""
field = None
_dereference = False
def __get__(self, instance, owner):
"""Descriptor to automatically dereference references.
"""
if instance is None:
# Document class being used rather than a document object
return self
from fields import GenericReferenceField, ReferenceField
dereference = self.field is None or isinstance(self.field,
(GenericReferenceField, ReferenceField))
if not self._dereference and instance._initialised and dereference:
from dereference import DeReference
self._dereference = DeReference() # Cached
instance._data[self.name] = self._dereference(
instance._data.get(self.name), max_depth=1, instance=instance,
name=self.name
)
value = super(ComplexBaseField, self).__get__(instance, owner)
# Convert lists / values so we can watch for any changes on them
if isinstance(value, (list, tuple)) and not isinstance(value, BaseList):
value = BaseList(value, instance, self.name)
instance._data[self.name] = value
elif isinstance(value, dict) and not isinstance(value, BaseDict):
value = BaseDict(value, instance, self.name)
instance._data[self.name] = value
if self._dereference and instance._initialised and \
isinstance(value, (BaseList, BaseDict)) and not value._dereferenced:
value = self._dereference(
value, max_depth=1, instance=instance, name=self.name
)
value._dereferenced = True
instance._data[self.name] = value
return value
def __set__(self, instance, value):
"""Descriptor for assigning a value to a field in a document.
"""
instance._data[self.name] = value
instance._mark_as_changed(self.name)
def to_python(self, value):
"""Convert a MongoDB-compatible type to a Python type.
"""
from mongoengine import Document
if isinstance(value, basestring):
return value
if hasattr(value, 'to_python'):
return value.to_python()
is_list = False
if not hasattr(value, 'items'):
try:
is_list = True
value = dict([(k, v) for k, v in enumerate(value)])
except TypeError: # Not iterable return the value
return value
if self.field:
value_dict = dict([(key, self.field.to_python(item)) for key, item in value.items()])
else:
value_dict = {}
for k, v in value.items():
if isinstance(v, Document):
# We need the id from the saved object to create the DBRef
if v.pk is None:
self.error('You can only reference documents once they'
' have been saved to the database')
collection = v._get_collection_name()
value_dict[k] = DBRef(collection, v.pk)
elif hasattr(v, 'to_python'):
value_dict[k] = v.to_python()
else:
value_dict[k] = self.to_python(v)
if is_list: # Convert back to a list
return [v for k, v in sorted(value_dict.items(), key=operator.itemgetter(0))]
return value_dict
def to_mongo(self, value):
"""Convert a Python type to a MongoDB-compatible type.
"""
from mongoengine import Document
if isinstance(value, basestring):
return value
if hasattr(value, 'to_mongo'):
return value.to_mongo()
is_list = False
if not hasattr(value, 'items'):
try:
is_list = True
value = dict([(k, v) for k, v in enumerate(value)])
except TypeError: # Not iterable return the value
return value
if self.field:
value_dict = dict([(key, self.field.to_mongo(item)) for key, item in value.items()])
else:
value_dict = {}
for k, v in value.items():
if isinstance(v, Document):
# We need the id from the saved object to create the DBRef
if v.pk is None:
self.error('You can only reference documents once they'
' have been saved to the database')
# If its a document that is not inheritable it won't have
# _types / _cls data so make it a generic reference allows
# us to dereference
meta = getattr(v, 'meta', getattr(v, '_meta', {}))
if meta and not meta.get('allow_inheritance', True) and not self.field:
from fields import GenericReferenceField
value_dict[k] = GenericReferenceField().to_mongo(v)
else:
collection = v._get_collection_name()
value_dict[k] = DBRef(collection, v.pk)
elif hasattr(v, 'to_mongo'):
value_dict[k] = v.to_mongo()
else:
value_dict[k] = self.to_mongo(v)
if is_list: # Convert back to a list
return [v for k, v in sorted(value_dict.items(), key=operator.itemgetter(0))]
return value_dict
def validate(self, value):
"""If field is provided ensure the value is valid.
"""
errors = {}
if self.field:
if hasattr(value, 'iteritems'):
sequence = value.iteritems()
else:
sequence = enumerate(value)
for k, v in sequence:
try:
self.field._validate(v)
except ValidationError, error:
errors[k] = error.errors or error
except (ValueError, AssertionError), error:
errors[k] = error
if errors:
field_class = self.field.__class__.__name__
self.error('Invalid %s item (%s)' % (field_class, value),
errors=errors)
# Don't allow empty values if required
if self.required and not value:
self.error('Field is required and cannot be empty')
def prepare_query_value(self, op, value):
return self.to_mongo(value)
def lookup_member(self, member_name):
if self.field:
return self.field.lookup_member(member_name)
return None
def _set_owner_document(self, owner_document):
if self.field:
self.field.owner_document = owner_document
self._owner_document = owner_document
def _get_owner_document(self, owner_document):
self._owner_document = owner_document
owner_document = property(_get_owner_document, _set_owner_document)
class ObjectIdField(BaseField):
"""An field wrapper around MongoDB's ObjectIds.
"""
def to_python(self, value):
return value
def to_mongo(self, value):
if not isinstance(value, ObjectId):
try:
return ObjectId(unicode(value))
except Exception, e:
# e.message attribute has been deprecated since Python 2.6
self.error(unicode(e))
return value
def prepare_query_value(self, op, value):
return self.to_mongo(value)
def validate(self, value):
try:
ObjectId(unicode(value))
except:
self.error('Invalid Object ID')
class DocumentMetaclass(type):
"""Metaclass for all documents.
"""
def __new__(cls, name, bases, attrs):
def _get_mixin_fields(base):
attrs = {}
attrs.update(dict([(k, v) for k, v in base.__dict__.items()
if issubclass(v.__class__, BaseField)]))
# Handle simple mixin's with meta
if hasattr(base, 'meta') and not isinstance(base, DocumentMetaclass):
meta = attrs.get('meta', {})
meta.update(base.meta)
attrs['meta'] = meta
for p_base in base.__bases__:
#optimize :-)
if p_base in (object, BaseDocument):
continue
attrs.update(_get_mixin_fields(p_base))
return attrs
metaclass = attrs.get('__metaclass__')
super_new = super(DocumentMetaclass, cls).__new__
if metaclass and issubclass(metaclass, DocumentMetaclass):
return super_new(cls, name, bases, attrs)
doc_fields = {}
class_name = [name]
superclasses = {}
simple_class = True
for base in bases:
# Include all fields present in superclasses
if hasattr(base, '_fields'):
doc_fields.update(base._fields)
# Get superclasses from superclass
superclasses[base._class_name] = base
superclasses.update(base._superclasses)
else: # Add any mixin fields
attrs.update(_get_mixin_fields(base))
if hasattr(base, '_meta') and not base._meta.get('abstract'):
# Ensure that the Document class may be subclassed -
# inheritance may be disabled to remove dependency on
# additional fields _cls and _types
class_name.append(base._class_name)
if not base._meta.get('allow_inheritance_defined', True):
warnings.warn(
"%s uses inheritance, the default for allow_inheritance "
"is changing to off by default. Please add it to the "
"document meta." % name,
FutureWarning
)
if base._meta.get('allow_inheritance', True) == False:
raise ValueError('Document %s may not be subclassed' %
base.__name__)
else:
simple_class = False
doc_class_name = '.'.join(reversed(class_name))
meta = attrs.get('_meta', {})
meta.update(attrs.get('meta', {}))
if 'allow_inheritance' not in meta:
meta['allow_inheritance'] = True
# Only simple classes - direct subclasses of Document - may set
# allow_inheritance to False
if not simple_class and not meta['allow_inheritance'] and not meta['abstract']:
raise ValueError('Only direct subclasses of Document may set '
'"allow_inheritance" to False')
attrs['_meta'] = meta
attrs['_class_name'] = doc_class_name
attrs['_superclasses'] = superclasses
# Add the document's fields to the _fields attribute
field_names = {}
for attr_name, attr_value in attrs.items():
if hasattr(attr_value, "__class__") and \
issubclass(attr_value.__class__, BaseField):
attr_value.name = attr_name
if not attr_value.db_field:
attr_value.db_field = attr_name
doc_fields[attr_name] = attr_value
field_names[attr_value.db_field] = field_names.get(attr_value.db_field, 0) + 1
duplicate_db_fields = [k for k, v in field_names.items() if v > 1]
if duplicate_db_fields:
raise InvalidDocumentError("Multiple db_fields defined for: %s " % ", ".join(duplicate_db_fields))
attrs['_fields'] = doc_fields
attrs['_db_field_map'] = dict([(k, v.db_field) for k, v in doc_fields.items() if k != v.db_field])
attrs['_reverse_db_field_map'] = dict([(v, k) for k, v in attrs['_db_field_map'].items()])
from mongoengine import Document, EmbeddedDocument, DictField
new_class = super_new(cls, name, bases, attrs)
for field in new_class._fields.values():
field.owner_document = new_class
delete_rule = getattr(field, 'reverse_delete_rule', DO_NOTHING)
f = field
if isinstance(f, ComplexBaseField) and hasattr(f, 'field'):
delete_rule = getattr(f.field, 'reverse_delete_rule', DO_NOTHING)
if isinstance(f, DictField) and delete_rule != DO_NOTHING:
raise InvalidDocumentError("Reverse delete rules are not supported for %s (field: %s)" % (field.__class__.__name__, field.name))
f = field.field
if delete_rule != DO_NOTHING:
if issubclass(new_class, EmbeddedDocument):
raise InvalidDocumentError("Reverse delete rules are not supported for EmbeddedDocuments (field: %s)" % field.name)
f.document_type.register_delete_rule(new_class, field.name, delete_rule)
if field.name and hasattr(Document, field.name) and EmbeddedDocument not in new_class.mro():
raise InvalidDocumentError("%s is a document method and not a valid field name" % field.name)
module = attrs.get('__module__')
base_excs = tuple(base.DoesNotExist for base in bases
if hasattr(base, 'DoesNotExist')) or (DoesNotExist,)
exc = subclass_exception('DoesNotExist', base_excs, module)
new_class.add_to_class('DoesNotExist', exc)
base_excs = tuple(base.MultipleObjectsReturned for base in bases
if hasattr(base, 'MultipleObjectsReturned'))
base_excs = base_excs or (MultipleObjectsReturned,)
exc = subclass_exception('MultipleObjectsReturned', base_excs, module)
new_class.add_to_class('MultipleObjectsReturned', exc)
global _document_registry
_document_registry[doc_class_name] = new_class
return new_class
def add_to_class(self, name, value):
setattr(self, name, value)
class TopLevelDocumentMetaclass(DocumentMetaclass):
"""Metaclass for top-level documents (i.e. documents that have their own
collection in the database.
"""
def __new__(cls, name, bases, attrs):
super_new = super(TopLevelDocumentMetaclass, cls).__new__
# Classes defined in this package are abstract and should not have
# their own metadata with DB collection, etc.
# __metaclass__ is only set on the class with the __metaclass__
# attribute (i.e. it is not set on subclasses). This differentiates
# 'real' documents from the 'Document' class
#
# Also assume a class is abstract if it has abstract set to True in
# its meta dictionary. This allows custom Document superclasses.
if (attrs.get('__metaclass__') == TopLevelDocumentMetaclass or
('meta' in attrs and attrs['meta'].get('abstract', False))):
# Make sure no base class was non-abstract
non_abstract_bases = [b for b in bases
if hasattr(b, '_meta') and not b._meta.get('abstract', False)]
if non_abstract_bases:
raise ValueError("Abstract document cannot have non-abstract base")
return super_new(cls, name, bases, attrs)
collection = ''.join('_%s' % c if c.isupper() else c for c in name).strip('_').lower()
id_field = None
abstract_base_indexes = []
base_indexes = []
base_meta = {}
# Subclassed documents inherit collection from superclass
for base in bases:
if hasattr(base, '_meta'):
if 'collection' in attrs.get('meta', {}) and not base._meta.get('abstract', False):
import warnings
msg = "Trying to set a collection on a subclass (%s)" % name
warnings.warn(msg, SyntaxWarning)
del(attrs['meta']['collection'])
if base._get_collection_name():
collection = base._get_collection_name()
# Propagate inherited values
keys_to_propogate = (
'index_background', 'index_drop_dups', 'index_opts',
'allow_inheritance', 'queryset_class', 'db_alias',
)
for key in keys_to_propogate:
if key in base._meta:
base_meta[key] = base._meta[key]
id_field = id_field or base._meta.get('id_field')
if base._meta.get('abstract', False):
abstract_base_indexes += base._meta.get('indexes', [])
else:
base_indexes += base._meta.get('indexes', [])
try:
base_meta['objects'] = base.__getattribute__(base, 'objects')
except TypeError:
pass
except AttributeError:
pass
# defaults
meta = {
'abstract': False,
'collection': collection,
'max_documents': None,
'max_size': None,
'ordering': [], # default ordering applied at runtime
'indexes': [], # indexes to be ensured at runtime
'id_field': id_field,
'index_background': False,
'index_drop_dups': False,
'index_opts': {},
'queryset_class': QuerySet,
'delete_rules': {},
'allow_inheritance': True
}
allow_inheritance_defined = ('allow_inheritance' in base_meta or
'allow_inheritance'in attrs.get('meta', {}))
meta['allow_inheritance_defined'] = allow_inheritance_defined
meta.update(base_meta)
# Apply document-defined meta options
meta.update(attrs.get('meta', {}))
attrs['_meta'] = meta
# Set up collection manager, needs the class to have fields so use
# DocumentMetaclass before instantiating CollectionManager object
new_class = super_new(cls, name, bases, attrs)
collection = attrs['_meta'].get('collection', None)
if callable(collection):
new_class._meta['collection'] = collection(new_class)
# Provide a default queryset unless one has been manually provided
manager = attrs.get('objects', meta.get('objects', QuerySetManager()))
if hasattr(manager, 'queryset_class'):
meta['queryset_class'] = manager.queryset_class
new_class.objects = manager
indicies = list(meta['indexes']) + abstract_base_indexes
user_indexes = [QuerySet._build_index_spec(new_class, spec)
for spec in indicies] + base_indexes
new_class._meta['indexes'] = user_indexes
unique_indexes = cls._unique_with_indexes(new_class)
new_class._meta['unique_indexes'] = unique_indexes
for field_name, field in new_class._fields.items():
# Check for custom primary key
if field.primary_key:
current_pk = new_class._meta['id_field']
if current_pk and current_pk != field_name:
raise ValueError('Cannot override primary key field')
if not current_pk:
new_class._meta['id_field'] = field_name
# Make 'Document.id' an alias to the real primary key field
new_class.id = field
if not new_class._meta['id_field']:
new_class._meta['id_field'] = 'id'
new_class._fields['id'] = ObjectIdField(db_field='_id')
new_class.id = new_class._fields['id']
return new_class
@classmethod
def _unique_with_indexes(cls, new_class, namespace=""):
unique_indexes = []
for field_name, field in new_class._fields.items():
# Generate a list of indexes needed by uniqueness constraints
if field.unique:
field.required = True
unique_fields = [field.db_field]
# Add any unique_with fields to the back of the index spec
if field.unique_with:
if isinstance(field.unique_with, basestring):
field.unique_with = [field.unique_with]
# Convert unique_with field names to real field names
unique_with = []
for other_name in field.unique_with:
parts = other_name.split('.')
# Lookup real name
parts = QuerySet._lookup_field(new_class, parts)
name_parts = [part.db_field for part in parts]
unique_with.append('.'.join(name_parts))
# Unique field should be required
parts[-1].required = True
unique_fields += unique_with
# Add the new index to the list
index = [("%s%s" % (namespace, f), pymongo.ASCENDING) for f in unique_fields]
unique_indexes.append(index)
# Grab any embedded document field unique indexes
if field.__class__.__name__ == "EmbeddedDocumentField" and field.document_type != new_class:
field_namespace = "%s." % field_name
unique_indexes += cls._unique_with_indexes(field.document_type,
field_namespace)
return unique_indexes
class BaseDocument(object):
_dynamic = False
_created = True
_dynamic_lock = True
_initialised = False
def __init__(self, **values):
signals.pre_init.send(self.__class__, document=self, values=values)
self._data = {}
# Assign default values to instance
for attr_name, field in self._fields.items():
value = getattr(self, attr_name, None)
setattr(self, attr_name, value)
# Set passed values after initialisation
if self._dynamic:
self._dynamic_fields = {}
dynamic_data = {}
for key, value in values.items():
if key in self._fields or key == '_id':
setattr(self, key, value)
elif self._dynamic:
dynamic_data[key] = value
else:
for key, value in values.items():
key = self._reverse_db_field_map.get(key, key)
setattr(self, key, value)
# Set any get_fieldname_display methods
self.__set_field_display()
if self._dynamic:
self._dynamic_lock = False
for key, value in dynamic_data.items():
setattr(self, key, value)
# Flag initialised
self._initialised = True
signals.post_init.send(self.__class__, document=self)
def __setattr__(self, name, value):
# Handle dynamic data only if an initialised dynamic document
if self._dynamic and not self._dynamic_lock:
field = None
if not hasattr(self, name) and not name.startswith('_'):
from fields import DynamicField
field = DynamicField(db_field=name)
field.name = name
self._dynamic_fields[name] = field
if not name.startswith('_'):
value = self.__expand_dynamic_values(name, value)
# Handle marking data as changed
if name in self._dynamic_fields:
self._data[name] = value
if hasattr(self, '_changed_fields'):
self._mark_as_changed(name)
if not self._created and name in self._meta.get('shard_key', tuple()):
from queryset import OperationError
raise OperationError("Shard Keys are immutable. Tried to update %s" % name)
super(BaseDocument, self).__setattr__(name, value)
def __expand_dynamic_values(self, name, value):
"""expand any dynamic values to their correct types / values"""
if not isinstance(value, (dict, list, tuple)):
return value
is_list = False
if not hasattr(value, 'items'):
is_list = True
value = dict([(k, v) for k, v in enumerate(value)])
if not is_list and '_cls' in value:
cls = get_document(value['_cls'])
value = cls(**value)
value._dynamic = True
value._changed_fields = []
return value
data = {}
for k, v in value.items():
key = name if is_list else k
data[k] = self.__expand_dynamic_values(key, v)
if is_list: # Convert back to a list
data_items = sorted(data.items(), key=operator.itemgetter(0))
value = [v for k, v in data_items]
else:
value = data
# Convert lists / values so we can watch for any changes on them
if isinstance(value, (list, tuple)) and not isinstance(value, BaseList):
value = BaseList(value, self, name)
elif isinstance(value, dict) and not isinstance(value, BaseDict):
value = BaseDict(value, self, name)
return value
def validate(self):
"""Ensure that all fields' values are valid and that required fields
are present.
"""
# Get a list of tuples of field names and their current values
fields = [(field, getattr(self, name))
for name, field in self._fields.items()]
# Ensure that each field is matched to a valid value
errors = {}
for field, value in fields:
if value is not None:
try:
field._validate(value)
except ValidationError, error:
errors[field.name] = error.errors or error
except (ValueError, AttributeError, AssertionError), error:
errors[field.name] = error
elif field.required:
errors[field.name] = ValidationError('Field is required',
field_name=field.name)
if errors:
raise ValidationError('ValidationError', errors=errors)
def to_mongo(self):
"""Return data dictionary ready for use with MongoDB.
"""
data = {}
for field_name, field in self._fields.items():
value = getattr(self, field_name, None)
if value is not None:
data[field.db_field] = field.to_mongo(value)
# Only add _cls and _types if allow_inheritance is not False
if not (hasattr(self, '_meta') and
self._meta.get('allow_inheritance', True) == False):
data['_cls'] = self._class_name
data['_types'] = self._superclasses.keys() + [self._class_name]
if '_id' in data and data['_id'] is None:
del data['_id']
if not self._dynamic:
return data
for name, field in self._dynamic_fields.items():
data[name] = field.to_mongo(self._data.get(name, None))
return data
@classmethod
def _get_collection_name(cls):
"""Returns the collection name for this class.
"""
return cls._meta.get('collection', None)
@classmethod
def _from_son(cls, son):
"""Create an instance of a Document (subclass) from a PyMongo SON.
"""
# get the class name from the document, falling back to the given
# class if unavailable
class_name = son.get('_cls', cls._class_name)
data = dict(("%s" % key, value) for key, value in son.items())
if '_types' in data:
del data['_types']
if '_cls' in data:
del data['_cls']
# Return correct subclass for document type
if class_name != cls._class_name:
cls = get_document(class_name)
changed_fields = []
errors_dict = {}
for field_name, field in cls._fields.items():
if field.db_field in data:
value = data[field.db_field]
try:
data[field_name] = (value if value is None
else field.to_python(value))
if field_name != field.db_field:
del data[field.db_field]
except (AttributeError, ValueError), e:
errors_dict[field_name] = e
elif field.default:
default = field.default
if callable(default):
default = default()
if isinstance(default, BaseDocument):
changed_fields.append(field_name)
if errors_dict:
errors = "\n".join(["%s - %s" % (k, v) for k, v in errors_dict.items()])
raise InvalidDocumentError("""
Invalid data to create a `%s` instance.\n%s""".strip() % (cls._class_name, errors))
obj = cls(**data)
obj._changed_fields = changed_fields
obj._created = False
return obj
def _mark_as_changed(self, key):
"""Marks a key as explicitly changed by the user
"""
if not key:
return
key = self._db_field_map.get(key, key)
if hasattr(self, '_changed_fields') and key not in self._changed_fields:
self._changed_fields.append(key)
def _get_changed_fields(self, key='', inspected=None):
"""Returns a list of all fields that have explicitly been changed.
"""
from mongoengine import EmbeddedDocument, DynamicEmbeddedDocument
_changed_fields = []
_changed_fields += getattr(self, '_changed_fields', [])
inspected = inspected or set()
if hasattr(self, 'id'):
if self.id in inspected:
return _changed_fields
inspected.add(self.id)
field_list = self._fields.copy()
if self._dynamic:
field_list.update(self._dynamic_fields)
for field_name in field_list:
db_field_name = self._db_field_map.get(field_name, field_name)
key = '%s.' % db_field_name
field = self._data.get(field_name, None)
if hasattr(field, 'id'):
if field.id in inspected:
continue
inspected.add(field.id)
if isinstance(field, (EmbeddedDocument, DynamicEmbeddedDocument)) and db_field_name not in _changed_fields: # Grab all embedded fields that have been changed
_changed_fields += ["%s%s" % (key, k) for k in field._get_changed_fields(key, inspected) if k]
elif isinstance(field, (list, tuple, dict)) and db_field_name not in _changed_fields: # Loop list / dict fields as they contain documents
# Determine the iterator to use
if not hasattr(field, 'items'):
iterator = enumerate(field)
else:
iterator = field.iteritems()
for index, value in iterator:
if not hasattr(value, '_get_changed_fields'):
continue
list_key = "%s%s." % (key, index)
_changed_fields += ["%s%s" % (list_key, k) for k in value._get_changed_fields(list_key, inspected) if k]
return _changed_fields
def _delta(self):
"""Returns the delta (set, unset) of the changes for a document.
Gets any values that have been explicitly changed.
"""
# Handles cases where not loaded from_son but has _id
doc = self.to_mongo()
set_fields = self._get_changed_fields()
set_data = {}
unset_data = {}
parts = []
if hasattr(self, '_changed_fields'):
set_data = {}
# Fetch each set item from its path
for path in set_fields:
parts = path.split('.')
d = doc
new_path = []
for p in parts:
if isinstance(d, DBRef):
break
elif p.isdigit():
d = d[int(p)]
elif hasattr(d, 'get'):
d = d.get(p)
new_path.append(p)
path = '.'.join(new_path)
set_data[path] = d
else:
set_data = doc
if '_id' in set_data:
del(set_data['_id'])
# Determine if any changed items were actually unset.
for path, value in set_data.items():
if value or isinstance(value, bool):
continue
# If we've set a value that ain't the default value dont unset it.
default = None
if self._dynamic and len(parts) and parts[0] in self._dynamic_fields:
del(set_data[path])
unset_data[path] = 1
continue
elif path in self._fields:
default = self._fields[path].default
else: # Perform a full lookup for lists / embedded lookups
d = self
parts = path.split('.')
db_field_name = parts.pop()
for p in parts:
if p.isdigit():
d = d[int(p)]
elif hasattr(d, '__getattribute__') and not isinstance(d, dict):
real_path = d._reverse_db_field_map.get(p, p)
d = getattr(d, real_path)
else:
d = d.get(p)
if hasattr(d, '_fields'):
field_name = d._reverse_db_field_map.get(db_field_name,
db_field_name)
if field_name in d._fields:
default = d._fields.get(field_name).default
else:
default = None
if default is not None:
if callable(default):
default = default()
if default != value:
continue
del(set_data[path])
unset_data[path] = 1
return set_data, unset_data
@classmethod
def _geo_indices(cls, inspected=None):
inspected = inspected or []
geo_indices = []
inspected.append(cls)
from fields import EmbeddedDocumentField, GeoPointField
for field in cls._fields.values():
if not isinstance(field, (EmbeddedDocumentField, GeoPointField)):
continue
if hasattr(field, 'document_type'):
field_cls = field.document_type
if field_cls in inspected:
continue
if hasattr(field_cls, '_geo_indices'):
geo_indices += field_cls._geo_indices(inspected)
elif field._geo_index:
geo_indices.append(field)
return geo_indices
def __getstate__(self):
removals = ["get_%s_display" % k for k, v in self._fields.items() if v.choices]
for k in removals:
if hasattr(self, k):
delattr(self, k)
return self.__dict__
def __setstate__(self, __dict__):
self.__dict__ = __dict__
self.__set_field_display()
def __set_field_display(self):
for attr_name, field in self._fields.items():
if field.choices: # dynamically adds a way to get the display value for a field with choices
setattr(self, 'get_%s_display' % attr_name, partial(self.__get_field_display, field=field))
def __get_field_display(self, field):
"""Returns the display value for a choice field"""
value = getattr(self, field.name)
if field.choices and isinstance(field.choices[0], (list, tuple)):
return dict(field.choices).get(value, value)
return value
def __iter__(self):
return iter(self._fields)
def __getitem__(self, name):
"""Dictionary-style field access, return a field's value if present.
"""
try:
if name in self._fields:
return getattr(self, name)
except AttributeError:
pass
raise KeyError(name)
def __setitem__(self, name, value):
"""Dictionary-style field access, set a field's value.
"""
# Ensure that the field exists before settings its value
if name not in self._fields:
raise KeyError(name)
return setattr(self, name, value)
def __contains__(self, name):
try:
val = getattr(self, name)
return val is not None
except AttributeError:
return False
def __len__(self):
return len(self._data)
def __repr__(self):
try:
u = unicode(self).encode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return '<%s: %s>' % (self.__class__.__name__, u)
def __str__(self):
if hasattr(self, '__unicode__'):
return unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
if isinstance(other, self.__class__) and hasattr(other, 'id'):
if self.id == other.id:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self.pk is None:
# For new object
return super(BaseDocument, self).__hash__()
else:
return hash(self.pk)
class BaseList(list):
"""A special list so we can watch any changes
"""
_dereferenced = False
_instance = None
_name = None
def __init__(self, list_items, instance, name):
self._instance = instance
self._name = name
return super(BaseList, self).__init__(list_items)
def __setitem__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__setitem__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__delitem__(*args, **kwargs)
def __getstate__(self):
self.observer = None
return self
def __setstate__(self, state):
self = state
return self
def append(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).append(*args, **kwargs)
def extend(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).extend(*args, **kwargs)
def insert(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).insert(*args, **kwargs)
def pop(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).pop(*args, **kwargs)
def remove(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).remove(*args, **kwargs)
def reverse(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).reverse(*args, **kwargs)
def sort(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).sort(*args, **kwargs)
def _mark_as_changed(self):
if hasattr(self._instance, '_mark_as_changed'):
self._instance._mark_as_changed(self._name)
class BaseDict(dict):
"""A special dict so we can watch any changes
"""
_dereferenced = False
_instance = None
_name = None
def __init__(self, dict_items, instance, name):
self._instance = instance
self._name = name
return super(BaseDict, self).__init__(dict_items)
def __setitem__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).__setitem__(*args, **kwargs)
def __delete__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).__delete__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).__delitem__(*args, **kwargs)
def __delattr__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).__delattr__(*args, **kwargs)
def __getstate__(self):
self.instance = None
self._dereferenced = False
return self
def __setstate__(self, state):
self = state
return self
def clear(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).clear(*args, **kwargs)
def pop(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).pop(*args, **kwargs)
def popitem(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).popitem(*args, **kwargs)
def update(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).update(*args, **kwargs)
def _mark_as_changed(self):
if hasattr(self._instance, '_mark_as_changed'):
self._instance._mark_as_changed(self._name)
if sys.version_info < (2, 5):
# Prior to Python 2.5, Exception was an old-style class
import types
def subclass_exception(name, parents, unused):
import types
return types.ClassType(name, parents, {})
else:
def subclass_exception(name, parents, module):
return type(name, parents, {'__module__': module})
| newvem/mongoengine | mongoengine/base.py | Python | mit | 50,511 |
import os
import sys
import numpy as np
import properties
svm = properties.svm_path
model = properties.svm_model_path
output = properties.svm_output_path
config = []
config.append("bsvm-train.exe")
config.append("bsvm-predict.exe")
t = " -t 1 "
c = " -c 1 "
m = " -m 1024 "
w0 = " -w0 0.0384 "
w1 = " -w1 1.136 "
w2 = " -w2 0.37 "
w3 = " -w3 0.33 "
w4 = " -w4 0.20 "
w5 = " -w5 0.0164 "
d = " -d 4 "
predictFile = properties.test_features_file_path
def SVMTrain(feature):
print 'SVM training started...\n'
cmd = "\""+svm+config[0]+"\""+ t + c + m + w0 + w1 + w2 + w3 + w4 + w5 +d +feature+" "+model
print cmd
os.system(cmd)
print 'SVM training finished\n'
def SVMPreditct():
cmd = "\""+svm+config[1]+"\""+" "+predictFile+" "+model+" "+output
print cmd
os.system(cmd)
if __name__=="__main__":
#SVMTrain(properties.feature_file_path)
SVMPreditct() | sureshbvn/nlpProject | SVM_A/svm.py | Python | mit | 891 |
import pandas as pd
import os
import subprocess as sub
import re
import sys
from Bio import SeqUtils
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
path = os.path.join(os.path.expanduser('~'),'GENOMES_BACTER_RELEASE69/genbank')
# ['DbxRefs','Description','FeaturesNum','GenomicID','GenomicLen','GenomicName','Keywords','NucsPresent','Organism_des',
# 'SourceDbxRefs','SourceOrganism','SourcePlasmid','SourceStrain','Taxonomy','BioProject','TaxonID','Organism_env',
# 'OptimumTemperature','TemperatureRange','OxygenReq','Habitat','Salinity','crit_NC','crit_WGS','crit_genlen',
# 'crit_features','crit_comp_genome','crit_plasmid']
env_dat = pd.read_csv(os.path.join(path,"env_catalog_compgenome.dat"))
#['GenomicID','cDNA','fid','pid','product','protein','status','table','ribosomal','CAI','TrOp']
gen_dat = pd.read_csv(os.path.join(path,"complete_CDS_CAI_DNA.dat"))
# PROTEOME LEVEL AMINO ACID FREQUENCIES ...
# "proteome_all.dat"
# # file with the organisms of interest
# dat_fname = os.path.join(bib2_scr_path,'catalog_with_accesion.dat')
# dat = pd.read_csv(dat_fname)
aacids = sorted(list('CMFILVWYAGTSNQDEHRKP'))
cost_vec_path = path
akashi = os.path.join(cost_vec_path,'akashi-cost.d')
argentina = os.path.join(cost_vec_path,'argentina-cost.d')
akashi_cost = pd.read_csv(akashi,header=None,sep=' ')
argentina_cost = pd.read_csv(argentina,header=None,sep=' ')
thermo_freq = pd.read_csv(os.path.join(path,'thermo.dat'),header=None,sep=' ')
akashi_cost.set_index(0,inplace=True)
argentina_cost.set_index(0,inplace=True)
thermo_freq.set_index(0,inplace=True)
akashi_cost.sort_index(inplace=True)
argentina_cost.sort_index(inplace=True)
thermo_freq.sort_index(inplace=True)
#################################################
# after we processed all organism's genomes, we would need to
# pull out proteins with top XXX percent CAI, to analyse their amino acid compositions ...
#################################################################
#################################################################
# (1) for each asm - open protein detail file, sort by CAI and analyse proteins with top 10% CAI ...
# (2) output analysis results to external file ...
#################################################################
#################################################################
# def get_aausage_proteome(seqrec):
# # seqrec = db[seqrec_id]
# features = seqrec.features
# proteome = []
# for feature in features:
# qualifiers = feature.qualifiers
# if (feature.type == 'CDS')and('translation' in qualifiers):
# proteome.append(qualifiers['translation'][0])
# #return the results ...
# proteome = ''.join(proteome)
# prot_len = float(len(proteome))
# aa_freq = tuple(proteome.count(aa)/prot_len for aa in aacids)
# #
# return (int(prot_len),) + aa_freq
# def analyse_genome(db,seqrec_id):
# seqrec = db[seqrec_id]
# pl_aa_freq = get_aausage_proteome(seqrec)
# gc = SeqUtils.GC(seqrec.seq)
# id = seqrec.id
# return (id,gc) + pl_aa_freq
# PERCENTILE = 0.1
# accounted_GC = []
# aafs = {}
# for aa in aacids:
# aafs[aa] = []
# genome_length = []
# proteome_length = []
# and for each assembley it goes ...
######################
# fname = os.path.join(path_CAI,'%s_genes.dat'%asm)
######################
#
#
gen_dat_org = gen_dat.groupby('GenomicID')
# genom_id = orgs.groups.keys() # env_dat['GenomicID'] ...
# gen_dat_grouped.get_group(idx)
#
# how to get quantile ...
# q75 = pid_cai['CAI'].quantile(q=0.75)
#
#
num_of_quantiles = 5
#
stat_dat = {'GenomicID':[],
'OptimumTemperature':[],
'TrOp':[]}
for i in range(num_of_quantiles):
stat_dat['q%d'%i] = []
stat_dat['R20_q%d'%i] = []
stat_dat['Akashi_q%d'%i] = []
#
#
for idx,topt in env_dat[['GenomicID','OptimumTemperature']].itertuples(index=False):
cds_cai_dat = gen_dat_org.get_group(idx)
# is it a translationally optimized organism ?
all,any = cds_cai_dat['TrOp'].all(),cds_cai_dat['TrOp'].any()
if all == any:
trans_opt = all
else: #any != all
print "%s@T=%f: Something wrong is happening: TrOp flag is not same for all ..."%(idx,topt)
# THIS IS just a stupid precaution measure, in case we messed something upstream ...
# not that stupid after all, because NaN is behaving badly here ...
if cds_cai_dat['TrOp'].notnull().all():
#
# we can use this 'qcut' function from pandas to divide our proteins by the quantiles ...
category,bins = pd.qcut(cds_cai_dat['CAI'],q=num_of_quantiles,retbins=True,labels=False)
#
stat_dat['GenomicID'].append(idx)
stat_dat['OptimumTemperature'].append(topt)
stat_dat['TrOp'].append(trans_opt)
#
# then we could iterate over proteins/cDNAs in these categories ...
for cat in range(num_of_quantiles):
cds_cai_category = cds_cai_dat[category==cat]
total_length = cds_cai_category['protein'].str.len().sum()
IVYWREL = sum(cds_cai_category['protein'].str.count(aa).sum() for aa in list('IVYWREL'))
# IVYWREL = cds_cai_category['protein'].str.count('|'.join("IVYWREL")).sum() # tiny bit slower ...
f_IVYWREL = float(IVYWREL)/float(total_length)
# 20-vector for of amino acid composition ...
aa_freq_20 = np.true_divide([cds_cai_category['protein'].str.count(aa).sum() for aa in aacids],float(total_length))
# slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
_1,_2,R20,_4,_5 = stats.linregress(aa_freq_20, thermo_freq[1])
# Akashi ...
cost = np.dot(aa_freq_20,akashi_cost[1])
# appending ...
#
#
stat_dat['q%d'%cat].append(f_IVYWREL)
stat_dat['R20_q%d'%cat].append(R20)
stat_dat['Akashi_q%d'%cat].append(cost)
#
#
#
cai_stats_quant = pd.DataFrame(stat_dat)
#
cai_stats_quant_TrOp = cai_stats_quant[cai_stats_quant.TrOp]
cai_stats_quant_noTrOp = cai_stats_quant[~cai_stats_quant.TrOp]
plt.clf()
bins = np.linspace(-0.05,0.05,50)
plt.hist(list(cai_stats_quant_TrOp.q4 - cai_stats_quant_TrOp.q1),bins=bins,color='blue')
plt.hist(list(cai_stats_quant_noTrOp.q4 - cai_stats_quant_noTrOp.q1),bins=bins,color='red',alpha=0.8)
plt.show()
plt.clf()
bins = np.linspace(-0.15,0.15,50)
# bins=50
plt.hist(list(cai_stats_quant[cai_stats_quant.OptimumTemperature<=50].R20_q4 - cai_stats_quant[cai_stats_quant.OptimumTemperature<=50].R20_q1),bins=bins,color='black',cumulative=False)
plt.hist(list(cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature<=50].R20_q4 - cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature<=50].R20_q1),bins=bins,color='blue',cumulative=False)
plt.hist(list(cai_stats_quant_TrOp[cai_stats_quant_TrOp.OptimumTemperature<=50].R20_q4 - cai_stats_quant_TrOp[cai_stats_quant_TrOp.OptimumTemperature<=50].R20_q1),bins=bins,color='red',alpha=0.8,cumulative=False)
plt.xlabel('$R^{4}_{T} - R^{1}_{T}$')
# plt.hist(list(cai_stats_quant_TrOp.R20_q4 - cai_stats_quant_TrOp.R20_q1),bins=bins,color='blue')
# plt.hist(list(cai_stats_quant_noTrOp.R20_q4 - cai_stats_quant_noTrOp.R20_q1),bins=bins,color='red',alpha=0.8)
plt.show()
plt.clf()
plt.plot(cai_stats_quant.OptimumTemperature,cai_stats_quant.q1,'bo',alpha=0.8)
plt.plot(cai_stats_quant.OptimumTemperature,cai_stats_quant.q4,'ro',alpha=0.8)
plt.show()
# #
plt.clf()
plt.plot(cai_stats_quant.OptimumTemperature,cai_stats_quant.R20_q1,'bo',alpha=0.8)
plt.plot(cai_stats_quant.OptimumTemperature,cai_stats_quant.R20_q4,'ro',alpha=0.8)
plt.show()
plt.clf()
plt.plot(cai_stats_quant.GC,cai_stats_quant.R20_q1,'bo',alpha=0.8)
plt.plot(cai_stats_quant.GC,cai_stats_quant.R20_q4,'ro',alpha=0.8)
plt.show()
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.plot([i+1,]*cai_stats_quant.shape[0],cai_stats_quant[k1],alpha=0.7)
plt.xlim(0,6)
plt.clf()
for i in range(num_of_quantiles):
k1 = 'q%d'%i
k2 = 'R20_q%d'%i
k3 = 'Akashi_q%d'%i
#
plt.errorbar([i+1,],cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature>0][k2].mean(),yerr=cai_stats_quant_noTrOp[cai_stats_quant_noTrOp.OptimumTemperature>0][k2].std(),fmt='o')
plt.xlim(0,6)
plt.show()
# R20 grows on average,
# | meso thermo
# ------+-------------
# TrOp | ++ ~+
# noTrOp| + ~
# Akashi is declining on average
# | meso thermo
# ------+-------------
# TrOp | -- --
# noTrOp| ~- ~-
# IVYWREL is declining on average
# | meso thermo
# ------+-------------
# TrOp | -- ~-
# noTrOp| ~- -
# After reshuffling given CAI, everything becomes flat and 0-centered with much narrower distributions
# #
# # # move on ...
# # # quantiles calculation ...
# # q20,q40,q60,q80 = cds_cai_dat['CAI'].quantile(q=[0.2,0.4,0.6,0.8])
# # #
# # q1_idx = (cds_cai_dat['CAI']<=q20)
# # q2_idx = (q20<cds_cai_dat['CAI'])&(cds_cai_dat['CAI']<=q40)
# # q3_idx = (q40<cds_cai_dat['CAI'])&(cds_cai_dat['CAI']<=q60)
# # q4_idx = (q60<cds_cai_dat['CAI'])&(cds_cai_dat['CAI']<=q80)
# # q5_idx = (q80<cds_cai_dat['CAI'])
# # # q3_idx = q40<cds_cai_dat['CAI']<=q60
# # # q4_idx = q60<cds_cai_dat['CAI']<=q80
# # # q5_idx = q80<cds_cai_dat['CAI']
# # ['q1', 'q2', 'q3', 'q4', 'q5']
# for IndexId,prot_det in dat[['IndexId','protein_details']].get_values():
# ###################
# # in the case of Bacteria, we know for sure that a single accession number refers to related
# if prot_det:
# # open a file with the analysed organismal proteins...
# protein_fname = os.path.join(path_CAI,'%s_genes.dat'%IndexId)
# # load the data ...
# protein_dat = pd.read_csv(protein_fname)
# # "prot_id,cai,gene_product,gene_seq,prot_seq" are the columns ...
# # we'll be taking proteins with top PERCENTILE% CAI in the list ...
# # protein total number is the first dimension of the table here ...
# number_of_proteins,_ = protein_dat.shape
# accounted_proteins = int(number_of_proteins*PERCENTILE)
# # top PERCENTILE proteins will be considered for analysis ...
# accounted_data = protein_dat.sort(columns='cai',ascending=False)[:accounted_proteins]
# # analyse that stuff ...
# cai_proteome = ''.join(accounted_data['prot_seq'])
# cai_proteome_len = float(len(cai_proteome))
# cai_genome = ''.join(accounted_data['gene_seq'])
# #
# accounted_GC.append(SeqUtils.GC(cai_genome))
# #
# for aa in aacids:
# aafs[aa].append(cai_proteome.count(aa)/cai_proteome_len)
# else:
# # no protein details exist, no corresponding file exist at all...
# accounted_GC.append(0.0)
# for aa in aacids:
# aafs[aa].append(0.0)
# dat['GC'] = accounted_GC
# for aa in aacids:
# dat[aa] = aafs[aa]
# dat.to_csv('cai5_bacter.dat',index=False)
| sergpolly/Thermal_adapt_scripts | composition_analysis_Thermo.py | Python | mit | 11,114 |
import pandas as pd
df_ab = pd.DataFrame({'a': ['a_1', 'a_2', 'a_3'], 'b': ['b_1', 'b_2', 'b_3']})
df_ac = pd.DataFrame({'a': ['a_1', 'a_2', 'a_4'], 'c': ['c_1', 'c_2', 'c_4']})
print(df_ab)
# a b
# 0 a_1 b_1
# 1 a_2 b_2
# 2 a_3 b_3
print(df_ac)
# a c
# 0 a_1 c_1
# 1 a_2 c_2
# 2 a_4 c_4
print(pd.merge(df_ab, df_ac))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(df_ab.merge(df_ac))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(pd.merge(df_ab, df_ac, on='a'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
df_ac_ = df_ac.rename(columns={'a': 'a_'})
print(df_ac_)
# a_ c
# 0 a_1 c_1
# 1 a_2 c_2
# 2 a_4 c_4
print(pd.merge(df_ab, df_ac_, left_on='a', right_on='a_'))
# a b a_ c
# 0 a_1 b_1 a_1 c_1
# 1 a_2 b_2 a_2 c_2
print(pd.merge(df_ab, df_ac_, left_on='a', right_on='a_').drop(columns='a_'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(pd.merge(df_ab, df_ac, on='a', how='inner'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(pd.merge(df_ab, df_ac, on='a', how='left'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
# 2 a_3 b_3 NaN
print(pd.merge(df_ab, df_ac, on='a', how='right'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
# 2 a_4 NaN c_4
print(pd.merge(df_ab, df_ac, on='a', how='outer'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
# 2 a_3 b_3 NaN
# 3 a_4 NaN c_4
print(pd.merge(df_ab, df_ac, on='a', how='inner', indicator=True))
# a b c _merge
# 0 a_1 b_1 c_1 both
# 1 a_2 b_2 c_2 both
print(pd.merge(df_ab, df_ac, on='a', how='outer', indicator=True))
# a b c _merge
# 0 a_1 b_1 c_1 both
# 1 a_2 b_2 c_2 both
# 2 a_3 b_3 NaN left_only
# 3 a_4 NaN c_4 right_only
print(pd.merge(df_ab, df_ac, on='a', how='outer', indicator='indicator'))
# a b c indicator
# 0 a_1 b_1 c_1 both
# 1 a_2 b_2 c_2 both
# 2 a_3 b_3 NaN left_only
# 3 a_4 NaN c_4 right_only
df_ac_b = df_ac.rename(columns={'c': 'b'})
print(df_ac_b)
# a b
# 0 a_1 c_1
# 1 a_2 c_2
# 2 a_4 c_4
print(pd.merge(df_ab, df_ac_b, on='a'))
# a b_x b_y
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
print(pd.merge(df_ab, df_ac_b, on='a', suffixes=['_left', '_right']))
# a b_left b_right
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
df_abx = df_ab.assign(x=['x_2', 'x_2', 'x_3'])
df_acx = df_ac.assign(x=['x_1', 'x_2', 'x_2'])
print(df_abx)
# a b x
# 0 a_1 b_1 x_2
# 1 a_2 b_2 x_2
# 2 a_3 b_3 x_3
print(df_acx)
# a c x
# 0 a_1 c_1 x_1
# 1 a_2 c_2 x_2
# 2 a_4 c_4 x_2
print(pd.merge(df_abx, df_acx))
# a b x c
# 0 a_2 b_2 x_2 c_2
print(pd.merge(df_abx, df_acx, on=['a', 'x']))
# a b x c
# 0 a_2 b_2 x_2 c_2
print(pd.merge(df_abx, df_acx, on='a'))
# a b x_x c x_y
# 0 a_1 b_1 x_2 c_1 x_1
# 1 a_2 b_2 x_2 c_2 x_2
df_acx_ = df_acx.rename(columns={'x': 'x_'})
print(df_acx_)
# a c x_
# 0 a_1 c_1 x_1
# 1 a_2 c_2 x_2
# 2 a_4 c_4 x_2
print(pd.merge(df_abx, df_acx_, left_on=['a', 'x'], right_on=['a', 'x_']))
# a b x c x_
# 0 a_2 b_2 x_2 c_2 x_2
print(pd.merge(df_abx, df_acx, on=['a', 'x'], how='inner'))
# a b x c
# 0 a_2 b_2 x_2 c_2
print(pd.merge(df_abx, df_acx, on=['a', 'x'], how='left'))
# a b x c
# 0 a_1 b_1 x_2 NaN
# 1 a_2 b_2 x_2 c_2
# 2 a_3 b_3 x_3 NaN
print(pd.merge(df_abx, df_acx, on=['a', 'x'], how='right'))
# a b x c
# 0 a_2 b_2 x_2 c_2
# 1 a_1 NaN x_1 c_1
# 2 a_4 NaN x_2 c_4
print(pd.merge(df_abx, df_acx, on=['a', 'x'], how='outer'))
# a b x c
# 0 a_1 b_1 x_2 NaN
# 1 a_2 b_2 x_2 c_2
# 2 a_3 b_3 x_3 NaN
# 3 a_1 NaN x_1 c_1
# 4 a_4 NaN x_2 c_4
print(pd.merge(df_abx, df_acx, on=['a', 'x'], how='outer', sort=True))
# a b x c
# 0 a_1 NaN x_1 c_1
# 1 a_1 b_1 x_2 NaN
# 2 a_2 b_2 x_2 c_2
# 3 a_3 b_3 x_3 NaN
# 4 a_4 NaN x_2 c_4
df_ac_i = df_ac.set_index('a')
print(df_ac_i)
# c
# a
# a_1 c_1
# a_2 c_2
# a_4 c_4
print(pd.merge(df_ab, df_ac_i, left_on='a', right_index=True))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
df_ab_i = df_ab.set_index('a')
print(df_ab_i)
# b
# a
# a_1 b_1
# a_2 b_2
# a_3 b_3
print(pd.merge(df_ab_i, df_ac_i, left_index=True, right_index=True))
# b c
# a
# a_1 b_1 c_1
# a_2 b_2 c_2
print(df_ab_i)
# b
# a
# a_1 b_1
# a_2 b_2
# a_3 b_3
print(df_ac_i)
# c
# a
# a_1 c_1
# a_2 c_2
# a_4 c_4
print(df_ab_i.join(df_ac_i))
# b c
# a
# a_1 b_1 c_1
# a_2 b_2 c_2
# a_3 b_3 NaN
print(df_ab_i.join(df_ac_i, how='inner'))
# b c
# a
# a_1 b_1 c_1
# a_2 b_2 c_2
print(df_ab)
# a b
# 0 a_1 b_1
# 1 a_2 b_2
# 2 a_3 b_3
print(df_ab.join(df_ac_i, on='a'))
# a b c
# 0 a_1 b_1 c_1
# 1 a_2 b_2 c_2
# 2 a_3 b_3 NaN
df_ad_i = pd.DataFrame({'a': ['a_1', 'a_4', 'a_5'], 'd': ['d_1', 'd_4', 'd_5']}).set_index('a')
print(df_ad_i)
# d
# a
# a_1 d_1
# a_4 d_4
# a_5 d_5
print(df_ab_i.join([df_ac_i, df_ad_i]))
# b c d
# a
# a_1 b_1 c_1 d_1
# a_2 b_2 c_2 NaN
# a_3 b_3 NaN NaN
print(df_ac_i.join([df_ad_i, df_ab_i]))
# c d b
# a
# a_1 c_1 d_1 b_1
# a_2 c_2 NaN b_2
# a_4 c_4 d_4 NaN
| nkmk/python-snippets | notebook/pandas_merge_join.py | Python | mit | 5,590 |
'''
Created on Apr 28, 2011
@author: Bartosz Alchimowicz
'''
############################
# Collection #
############################
class ScreenSpec(object):
def __init__(self):
self.children = []
def append(self, screen):
assert isinstance(screen, Screen)
self.children.append(screen)
return self
############################
# Header #
############################
class Screen(object):
def __init__(self, name, children = None):
assert isinstance(name, basestring)
self.name = name
self.children = []
if children:
assert isinstance(children, list)
for child in children:
self.append(child)
def append(self, child):
assert isinstance(child, Component)
self.children.append(child)
return self
############################
# Component #
############################
class Component(object):
def __init__(self, identifier, name):
assert identifier is not None
self.identifier = identifier
self.name = name
class ComoundComponent(Component):
def __init__(self, identifier, name):
Component.__init__(self, identifier, name)
self.children = []
def append(self, child):
assert isinstance(child, Component)
self.children.append(child)
return self
class StaticValueContainer(object):
def __init__(self):
self._static_values = None
def _set_static_values(self, values):
raise Exception("Please overwrite")
def _get_static_values(self):
return self._static_values
class ComoundValuesContainer(object):
def __init__(self):
self._grid = None # 2d lisl
############################
# Basic Components #
############################
class Entity(Component):
def __init__(self, identifier):
Component.__init__(self, identifier, None)
class Button(Component, StaticValueContainer):
def __init__(self, identifier):
Component.__init__(self, identifier, "BUTTON")
StaticValueContainer.__init__(self)
def _set_static_values(self, values):
if len(values) != 1:
raise Exception("Wrong number of static values")
self._static_values = values
static_values = property(StaticValueContainer._get_static_values, _set_static_values)
class Link(Component, StaticValueContainer):
def __init__(self, identifier):
Component.__init__(self, identifier, "LINK")
StaticValueContainer.__init__(self)
def _set_static_values(self, values):
if len(values) != 1:
raise Exception("Wrong number of static values")
self._static_values = values
static_values = property(StaticValueContainer._get_static_values, _set_static_values)
class Image(Component):
def __init__(self, identifier):
Component.__init__(self, identifier, "IMAGE")
class StaticText(Component, StaticValueContainer):
def __init__(self, identifier):
Component.__init__(self, identifier, "STATIC_TEXT")
StaticValueContainer.__init__(self)
def _set_static_values(self, values):
if len(values) != 1:
raise Exception("Wrong number of static values")
self._static_values = values
static_values = property(StaticValueContainer._get_static_values, _set_static_values)
class DynamicText(Component):
def __init__(self, identifier):
Component.__init__(self, identifier, "DYNAMIC_TEXT")
class EditBox(Component, StaticValueContainer):
def __init__(self, identifier):
Component.__init__(self, identifier, "EDIT_BOX")
StaticValueContainer.__init__(self)
def _set_static_values(self, values):
if len(values) != 1:
raise Exception("Wrong number of static values")
self._static_values = values
static_values = property(StaticValueContainer._get_static_values, _set_static_values)
class CheckBox(Component):
def __init__(self, identifier):
Component.__init__(self, identifier, "CHECK_BOX")
class RadioButton(Component):
def __init__(self, identifier):
Component.__init__(self, identifier, "RADIO_BUTTON")
class TextArea(Component, StaticValueContainer):
def __init__(self, identifier):
Component.__init__(self, identifier, "TEXT_AREA")
StaticValueContainer.__init__(self)
def _set_static_values(self, values):
if len(values) == 0:
raise Exception("Wrong number of static values")
self._static_values = values
static_values = property(StaticValueContainer._get_static_values, _set_static_values)
class Password(Component):
def __init__(self, identifier):
Component.__init__(self, identifier, "PASSWORD")
class Custom(Component):
def __init__(self, identifier):
assert False and "Modify screenspec.parser to add custom components"
############################
# Semi-Compound Components #
############################
class ComboBox(ComoundComponent, StaticValueContainer):
def __init__(self, identifier):
ComoundComponent.__init__(self, identifier, "COMBO_BOX")
StaticValueContainer.__init__(self)
def _set_static_values(self, values):
if len(values) == 0:
raise Exception("Wrong number of static values")
self._static_values = values
static_values = property(StaticValueContainer._get_static_values, _set_static_values)
class ListBox(ComoundComponent, StaticValueContainer):
def __init__(self, identifier):
ComoundComponent.__init__(self, identifier, "LIST_BOX")
StaticValueContainer.__init__(self)
def _set_static_values(self, values):
if len(values) == 0:
raise Exception("Wrong number of static values")
self._static_values = values
static_values = property(StaticValueContainer._get_static_values, _set_static_values)
class RadioButtons(ComoundComponent, StaticValueContainer):
def __init__(self, identifier):
ComoundComponent.__init__(self, identifier, "RADIO_BUTTONS")
StaticValueContainer.__init__(self)
def _set_static_values(self, values):
if len(values) == 0:
raise Exception("Wrong number of static values")
self._static_values = values
static_values = property(StaticValueContainer._get_static_values, _set_static_values)
class CheckBoxes(ComoundComponent, StaticValueContainer):
def __init__(self, identifier):
ComoundComponent.__init__(self, identifier, "CHECK_BOXES")
StaticValueContainer.__init__(self)
def _set_static_values(self, values):
if len(values) == 0:
raise Exception("Wrong number of static values")
self._static_values = values
static_values = property(StaticValueContainer._get_static_values, _set_static_values)
############################
# Compound Components #
############################
class Simple(ComoundComponent, ComoundValuesContainer):
def __init__(self, identifier):
ComoundComponent.__init__(self, identifier, "SIMPLE")
ComoundValuesContainer.__init__(self)
class List(ComoundComponent, ComoundValuesContainer):
def __init__(self, identifier):
ComoundComponent.__init__(self, identifier, "LIST")
ComoundValuesContainer.__init__(self)
class Table(ComoundComponent, ComoundValuesContainer):
def __init__(self, identifier):
ComoundComponent.__init__(self, identifier, "TABLE")
ComoundValuesContainer.__init__(self)
############################
# StaticValues #
############################
class StaticValue(object):
def __init__(self, value, selected = False):
assert value != None
self.value = value
self.selected = selected
| perfidia/screensketch | src/screensketch/screenspec/model.py | Python | mit | 7,161 |
try:
__version__ = __import__('pkg_resources').require('booleanOperations')[0].version
except Exception:
__version__ = 'unknown'
| moyogo/booleanoperations | Lib/booleanOperations/version.py | Python | mit | 137 |
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _as_mat(x):
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def _ij_ik_il_to_jkl(a, b, c):
ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
return chainer.functions.matmul(_as_mat(ab).T, c).reshape(
a.shape[1], b.shape[1], c.shape[1])
def _ij_ik_jkl_to_il(a, b, c):
ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
c = c.reshape(-1, c.shape[-1]) # [jk]l
return chainer.functions.matmul(_as_mat(ab), c)
def _ij_il_jkl_to_ik(a, b, c):
return _ij_ik_jkl_to_il(a, b, chainer.functions.swapaxes(c, 1, 2))
def _ik_il_jkl_to_ij(a, b, c):
return _ij_ik_jkl_to_il(a, b, chainer.functions.rollaxis(c, 0, c.ndim))
class BilinearFunction(function_node.FunctionNode):
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 6:
raise type_check.InvalidType(
'{0} or {1}'.format(
in_types.size() == 3, in_types.size() == 6),
'{0} == {1}'.format(in_types.size(), n_in))
e1_type, e2_type, W_type = in_types[:3]
type_check_prod = type_check.make_variable(numpy.prod, 'prod')
type_check.expect(
e1_type.dtype == numpy.float32,
e1_type.ndim >= 2,
e2_type.dtype == numpy.float32,
e2_type.ndim >= 2,
e1_type.shape[0] == e2_type.shape[0],
W_type.dtype == numpy.float32,
W_type.ndim == 3,
type_check_prod(e1_type.shape[1:]) == W_type.shape[0],
type_check_prod(e2_type.shape[1:]) == W_type.shape[1],
)
if n_in == 6:
out_size = W_type.shape[2]
V1_type, V2_type, b_type = in_types[3:]
type_check.expect(
V1_type.dtype == numpy.float32,
V1_type.ndim == 2,
V1_type.shape[0] == W_type.shape[0],
V1_type.shape[1] == out_size,
V2_type.dtype == numpy.float32,
V2_type.ndim == 2,
V2_type.shape[0] == W_type.shape[1],
V2_type.shape[1] == out_size,
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == out_size,
)
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W = inputs[2]
xp = cuda.get_array_module(*inputs)
# optimize: y = xp.einsum('ij,ik,jkl->il', e1, e2, W)
y = xp.tensordot(xp.einsum('ij,ik->ijk', e1, e2), W, axes=2)
if len(inputs) == 6:
V1, V2, b = inputs[3:]
y += e1.dot(V1)
y += e2.dot(V2)
y += b
return y,
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
e1, e2, W = inputs[:3]
gy, = grad_outputs
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
return BilinearFunctionGrad().apply((e1, e2, W, V1, V2, gy))
return BilinearFunctionGrad().apply((e1, e2, W, gy))
class BilinearFunctionGrad(function_node.FunctionNode):
def forward(self, inputs):
self.retain_inputs(tuple(range(len(inputs))))
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W, gy = inputs[2], inputs[-1]
xp = cuda.get_array_module(*inputs)
# optimize: gW = xp.einsum('ij,ik,il->jkl', e1, e2, gy)
gW = xp.einsum('ij,ik->jki', e1, e2).dot(gy)
gy_W = xp.tensordot(gy, W, axes=(1, 2)) # 'il,jkl->ijk'
# optimize: ge1 = xp.einsum('ik,jkl,il->ij', e2, W, gy)
ge1 = xp.einsum('ik,ijk->ij', e2, gy_W)
# optimize: ge2 = xp.einsum('ij,jkl,il->ik', e1, W, gy)
ge2 = xp.einsum('ij,ijk->ik', e1, gy_W)
ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
gV1 = e1.T.dot(gy)
gV2 = e2.T.dot(gy)
gb = gy.sum(0)
ge1 += gy.dot(V1.T)
ge2 += gy.dot(V2.T)
ret += gV1, gV2, gb
return ret
def backward(self, indexes, grad_outputs):
inputs = self.get_retained_inputs()
e1 = _as_mat(inputs[0])
e2 = _as_mat(inputs[1])
W, gy = inputs[2], inputs[-1]
gge1 = _as_mat(grad_outputs[0])
gge2 = _as_mat(grad_outputs[1])
ggW = grad_outputs[2]
dge1_de2 = _ij_il_jkl_to_ik(gge1, gy, W)
dge1_dW = _ij_ik_il_to_jkl(gge1, e2, gy)
dge1_dgy = _ij_ik_jkl_to_il(gge1, e2, W)
dge2_de1 = _ik_il_jkl_to_ij(gge2, gy, W)
dge2_dW = _ij_ik_il_to_jkl(e1, gge2, gy)
dge2_dgy = _ij_ik_jkl_to_il(e1, gge2, W)
dgW_de1 = _ik_il_jkl_to_ij(e2, gy, ggW)
dgW_de2 = _ij_il_jkl_to_ik(e1, gy, ggW)
dgW_dgy = _ij_ik_jkl_to_il(e1, e2, ggW)
ge1 = dgW_de1 + dge2_de1
ge2 = dgW_de2 + dge1_de2
gW = dge1_dW + dge2_dW
ggy = dgW_dgy + dge1_dgy + dge2_dgy
if len(inputs) == 6:
V1, V2 = inputs[3], inputs[4]
ggV1, ggV2, ggb = grad_outputs[3:]
gV1 = chainer.functions.matmul(gge1, gy, transa=True)
gV2 = chainer.functions.matmul(gge2, gy, transa=True)
ge1 += chainer.functions.matmul(gy, ggV1, transb=True)
ge2 += chainer.functions.matmul(gy, ggV2, transb=True)
ggy += chainer.functions.matmul(gge1, V1)
ggy += chainer.functions.matmul(gge2, V2)
ggy += chainer.functions.matmul(e1, ggV1)
ggy += chainer.functions.matmul(e2, ggV2)
ggy += chainer.functions.broadcast_to(ggb, ggy.shape)
ge1 = ge1.reshape(inputs[0].shape)
ge2 = ge2.reshape(inputs[1].shape)
if len(inputs) == 6:
return ge1, ge2, gW, gV1, gV2, ggy
return ge1, ge2, gW, ggy
def bilinear(e1, e2, W, V1=None, V2=None, b=None):
"""Applies a bilinear function based on given parameters.
This is a building block of Neural Tensor Network (see the reference paper
below). It takes two input variables and one or four parameters, and
outputs one variable.
To be precise, denote six input arrays mathematically by
:math:`e^1\\in \\mathbb{R}^{I\\cdot J}`,
:math:`e^2\\in \\mathbb{R}^{I\\cdot K}`,
:math:`W\\in \\mathbb{R}^{J \\cdot K \\cdot L}`,
:math:`V^1\\in \\mathbb{R}^{J \\cdot L}`,
:math:`V^2\\in \\mathbb{R}^{K \\cdot L}`, and
:math:`b\\in \\mathbb{R}^{L}`,
where :math:`I` is mini-batch size.
In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear
parameters.
The output of forward propagation is calculated as
.. math::
y_{il} = \\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\
\\sum_{j} e^1_{ij} V^1_{jl} + \\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.
Note that V1, V2, b are optional. If these are not given, then this
function omits the last three terms in the above equation.
.. note::
This function accepts an input variable ``e1`` or ``e2`` of a non-matrix
array. In this case, the leading dimension is treated as the batch
dimension, and the other dimensions are reduced to one dimension.
.. note::
In the original paper, :math:`J` and :math:`K`
must be equal and the author denotes :math:`[V^1 V^2]`
(concatenation of matrices) by :math:`V`.
Args:
e1 (~chainer.Variable): Left input variable.
e2 (~chainer.Variable): Right input variable.
W (~chainer.Variable): Quadratic weight variable.
V1 (~chainer.Variable): Left coefficient variable.
V2 (~chainer.Variable): Right coefficient variable.
b (~chainer.Variable): Bias variable.
Returns:
~chainer.Variable: Output variable.
See:
`Reasoning With Neural Tensor Networks for Knowledge Base Completion
<https://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-
networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013].
"""
flags = [V1 is None, V2 is None, b is None]
if any(flags):
if not all(flags):
raise ValueError('All coefficients and bias for bilinear() must '
'be None, if at least one of them is None.')
return BilinearFunction().apply((e1, e2, W))[0]
return BilinearFunction().apply((e1, e2, W, V1, V2, b))[0]
| rezoo/chainer | chainer/functions/connection/bilinear.py | Python | mit | 8,648 |
__author__ = 'Emil E Nielsen'
start = 1
end = 17
for i in range (8):
for x in range(start, end):
print chr(x),
if x >= 128:
break
print "\n"
start = end + 1
end = start + 16 | EENielsen/school-python | ascii-tabel.py | Python | mit | 222 |
# A simple DOC file parser based on pyole
import os
import struct
import logging
import datetime
from pyole import *
class FIBBase(OLEBase):
wIdent = 0
nFib = 0
unused = 0
lid = 0
pnNext = 0
Flags1 = 0
fDot = 0
fGlsy = 0
fComplex = 0
fHasPic = 0
cQuickSaves = 0
fEncrypted = 0
fWhichTblStm = 0
fReadOnlyRecommended = 0
fWriteReservation = 0
fExtChar = 0
fLoadOverride = 0
fFarEast = 0
fObfuscated = 0
nFibBack = 0
lKey = 0
envr = 0
Flag2 = 0
fMac = 0
fEmptySpecial = 0
fLoadOverridePage = 0
reserved1 = 0
reserved2 = 0
fSpare0 = 0
reserved3 = 0
reserved4 = 0
reserved5 = 0
reserved6 = 0
def __init__(self, data):
self.wIdent = 0
self.nFib = 0
self.unused = 0
self.pnNext = 0
self.Flags1 = 0
self.fDot = 0
self.fGlsy = 0
self.fComplex = 0
self.fHasPic = 0
self.cQuickSaves = 0
self.fEncrypted = 0
self.fWhichTblStm = 0
self.fReadOnlyRecommended = 0
self.fWriteReservation = 0
self.fExtChar = 0
self.fLoadOverride = 0
self.fFarEast = 0
self.fObfuscated = 0
self.nFibBack = 0
self.lKey = 0
self.envr = 0
self.Flag2 = 0
self.fMac = 0
self.fEmptySpecial = 0
self.fLoadOverridePage = 0
self.reserved1 = 0
self.reserved2 = 0
self.fSpare0 = 0
self.reserved3 = 0
self.reserved4 = 0
self.reserved5 = 0
self.reserved6 = 0
self.wIdent = struct.unpack('<H', data[0x00:0x02])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.wIdent: ' + str(hex(self.wIdent)))
if self.wIdent != 0xA5EC:
self._raise_exception('DOC.FIB.FIBBase.wIdent has an abnormal value.')
self.nFib = struct.unpack('<H', data[0x02:0x04])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.nFib: ' + str(hex(self.nFib)))
if self.nFib != 0x00C1:
self._raise_exception('DOC.FIB.FIBBase.nFib has an abnormal value.')
self.unused = struct.unpack('<H', data[0x04:0x06])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.unused: ' + str(hex(self.unused)))
#if self.unused != 0:
# self.ole_logger.warning('DOC.FIB.FIBBase.unused is not zero.')
self.lid = struct.unpack('<H', data[0x06:0x08])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.lid: ' + str(hex(self.lid)))
self.pnNext = struct.unpack('<H', data[0x08:0x0A])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.pnNext: ' + str(hex(self.pnNext)))
if self.pnNext != 0:
self.ole_logger.warning('DOC.FIB.FIBBase.pnNext is not zero.')
self.Flags1 = struct.unpack('<H', data[0x0A:0x0C])[0]
self.fDot = self.Flags1 & 0x0001
self.ole_logger.debug('DOC.FIB.FIBBase.fDot: ' + str(self.fDot))
self.fGlsy = (self.Flags1 & 0x0002) >> 1
self.ole_logger.debug('DOC.FIB.FIBBase.fGlsy: ' + str(self.fGlsy))
self.fComplex = (self.Flags1 & 0x0004) >> 2
self.ole_logger.debug('DOC.FIB.FIBBase.fComplex: ' + str(self.fComplex))
self.fHasPic = (self.Flags1 & 0x0008) >> 3
self.ole_logger.debug('DOC.FIB.FIBBase.fHasPic: ' + str(self.fHasPic))
self.cQuickSaves = (self.Flags1 & 0x00F0) >> 4
self.ole_logger.debug('DOC.FIB.FIBBase.cQuickSaves: ' + str(self.cQuickSaves))
self.fEncrypted = (self.Flags1 & 0x0100) >> 8
self.ole_logger.debug('DOC.FIB.FIBBase.fEncrypted: ' + str(self.fEncrypted))
if self.fEncrypted == 1:
self.ole_logger.warning('File is encrypted.')
self.fWhichTblStm = (self.Flags1 & 0x0200) >> 9
self.ole_logger.debug('DOC.FIB.FIBBase.fWhichTblStm: ' + str(self.fWhichTblStm))
self.fReadOnlyRecommended = (self.Flags1 & 0x0400) >> 10
self.ole_logger.debug('DOC.FIB.FIBBase.fReadOnlyRecommended: ' + str(self.fReadOnlyRecommended))
self.fWriteReservation = (self.Flags1 & 0x0800) >> 11
self.ole_logger.debug('DOC.FIB.FIBBase.fWriteReservation: ' + str(self.fWriteReservation))
self.fExtChar = (self.Flags1 & 0x1000) >> 12
self.ole_logger.debug('DOC.FIB.FIBBase.fExtChar: ' + str(self.fExtChar))
if (self.Flags1 & 0x1000) >> 12 != 1:
self._raise_exception('DOC.FIB.FIBBase.fExtChar has an abnormal value.')
self.fLoadOverride = (self.Flags1 & 0x2000) >> 13
self.ole_logger.debug('DOC.FIB.FIBBase.fLoadOverride: ' + str(self.fLoadOverride))
self.fFarEast = (self.Flags1 & 0x4000) >> 14
self.ole_logger.debug('DOC.FIB.FIBBase.fFarEast: ' + str(self.fFarEast))
if self.fFarEast == 1:
self.ole_logger.warning('The installation language of the application that created the document was an East Asian language.')
self.fObfuscated = (self.Flags1 & 0x8000) >> 15
self.ole_logger.debug('DOC.FIB.FIBBase.fObfuscated: ' + str(self.fObfuscated))
if self.fObfuscated == 1:
if self.fEncrypted == 1:
self.ole_logger.warning('File is obfuscated by using XOR obfuscation.')
self.nFibBack = struct.unpack('<H', data[0x0C:0x0E])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.nFibBack: ' + str(hex(self.nFibBack)))
if self.nFibBack != 0x00BF and self.nFibBack != 0x00C1:
self._raise_exception('DOC.FIB.FIBBase.nFibBack has an abnormal value.')
self.lKey = struct.unpack('<I', data[0x0E:0x12])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.lKey: ' + str(hex(self.lKey)))
if self.fEncrypted == 1:
if self.fObfuscated == 1:
self.ole_logger.info('The XOR obfuscation key is: ' + str(hex(self.lKey)))
else:
if self.lKey != 0:
self._raise_exception('DOC.FIB.FIBBase.lKey has an abnormal value.')
self.envr = ord(data[0x12])
self.ole_logger.debug('DOC.FIB.FIBBase.envr: ' + str(hex(self.envr)))
if self.envr != 0:
self._raise_exception('DOC.FIB.FIBBase.envr has an abnormal value.')
self.Flag2 = ord(data[0x13])
self.fMac = self.Flag2 & 0x01
self.ole_logger.debug('DOC.FIB.FIBBase.fMac: ' + str(hex(self.fMac)))
if self.fMac != 0:
self._raise_exception('DOC.FIB.FIBBase.fMac has an abnormal value.')
self.fEmptySpecial = (self.Flag2 & 0x02) >> 1
self.ole_logger.debug('DOC.FIB.FIBBase.fEmptySpecial: ' + str(hex(self.fEmptySpecial)))
if self.fEmptySpecial != 0:
self.ole_logger.warning('DOC.FIB.FIBBase.fEmptySpecial is not zero.')
self.fLoadOverridePage = (self.Flag2 & 0x04) >> 2
self.ole_logger.debug('DOC.FIB.FIBBase.fLoadOverridePage: ' + str(hex(self.fLoadOverridePage)))
self.reserved1 = (self.Flag2 & 0x08) >> 3
self.ole_logger.debug('DOC.FIB.FIBBase.reserved1: ' + str(hex(self.reserved1)))
self.reserved2 = (self.Flag2 & 0x10) >> 4
self.ole_logger.debug('DOC.FIB.FIBBase.reserved2: ' + str(hex(self.reserved2)))
self.fSpare0 = (self.Flag2 & 0xE0) >> 5
self.ole_logger.debug('DOC.FIB.FIBBase.fSpare0: ' + str(hex(self.fSpare0)))
self.reserved3 = struct.unpack('<H', data[0x14:0x16])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.reserved3: ' + str(hex(self.reserved3)))
self.reserved4 = struct.unpack('<H', data[0x16:0x18])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.reserved4: ' + str(hex(self.reserved4)))
self.reserved5 = struct.unpack('<I', data[0x18:0x1C])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.reserved5: ' + str(hex(self.reserved5)))
self.reserved6 = struct.unpack('<I', data[0x1C:0x20])[0]
self.ole_logger.debug('DOC.FIB.FIBBase.reserved6: ' + str(hex(self.reserved6)))
class FibRgFcLcb(OLEBase):
fcSttbfAssoc = 0
lcbSttbfAssoc = 0
fcSttbfRMark = 0
lcbSttbfRMark = 0
fcSttbSavedBy = 0
lcbSttbSavedBy = 0
dwLowDateTime = 0
dwHighDateTime = 0
def __init__(self, data):
self.fcSttbfAssoc = 0
self.lcbSttbfAssoc = 0
self.fcSttbfRMark = 0
self.lcbSttbfRMark = 0
self.fcSttbSavedBy = 0
self.lcbSttbSavedBy = 0
self.dwLowDateTime = 0
self.dwHighDateTime = 0
self.fcSttbfAssoc = struct.unpack('<I', data[0x100:0x104])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.fcSttbfAssoc: ' + str(hex(self.fcSttbfAssoc)))
self.lcbSttbfAssoc = struct.unpack('<I', data[0x104:0x108])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.lcbSttbfAssoc: ' + str(hex(self.lcbSttbfAssoc)))
self.fcSttbfRMark = struct.unpack('<I', data[0x198:0x19C])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.fcSttbfRMark: ' + str(hex(self.fcSttbfRMark)))
self.lcbSttbfRMark = struct.unpack('<I', data[0x19C:0x1A0])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.lcbSttbfRMark: ' + str(hex(self.lcbSttbfRMark)))
self.fcSttbSavedBy = struct.unpack('<I', data[0x238:0x23C])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.fcSttbSavedBy: ' + str(hex(self.fcSttbSavedBy)))
self.lcbSttbSavedBy = struct.unpack('<I', data[0x23C:0x240])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.lcbSttbSavedBy: ' + str(hex(self.lcbSttbSavedBy)))
self.dwLowDateTime = struct.unpack('<I', data[0x2B8:0x2BC])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.dwLowDateTime: ' + str(hex(self.dwLowDateTime)))
self.dwHighDateTime = struct.unpack('<I', data[0x2BC:0x2C0])[0]
self.ole_logger.debug('DOC.FIB.FibRgFcLcb.dwHighDateTime: ' + str(hex(self.dwHighDateTime)))
class FIB(OLEBase):
FIBBase = None
csw = 0
fibRgW = ''
cslw = 0
fibRgLw = ''
cbRgFcLcb = 0
fibRgFcLcbBlob = ''
cswNew = 0
def __init__(self, data):
self.FIBBase = None
self.csw = 0
self.fibRgW = ''
self.cslw = 0
self.fibRgLw = ''
self.cbRgFcLcb = 0
self.fibRgFcLcbBlob = ''
self.cswNew = 0
self.ole_logger.debug('######## FIB ########')
self.FIBBase = FIBBase(data[0:0x20])
self.csw = struct.unpack('<H', data[0x20:0x22])[0]
self.ole_logger.debug('DOC.FIB.csw: ' + str(hex(self.csw)))
if self.csw != 0x000E:
self._raise_exception('DOC.FIB.csw has an abnormal value.')
self.fibRgW = data[0x22:0x3E]
self.cslw = struct.unpack('<H', data[0x3E:0x40])[0]
self.ole_logger.debug('DOC.FIB.cslw: ' + str(hex(self.cslw)))
if self.cslw != 0x0016:
self._raise_exception('DOC.FIB.cslw has an abnormal value.')
self.fibRgLw = data[0x40:0x98]
self.cbRgFcLcb = struct.unpack('<H', data[0x98:0x9A])[0]
self.ole_logger.debug('DOC.FIB.cbRgFcLcb: ' + str(hex(self.cbRgFcLcb)))
'''
if self.FIBBase.nFib == 0x00C1 and self.cbRgFcLcb != 0x005D:
self._raise_exception('DOC.FIB.cbRgFcLcb has an abnormal value.')
if self.FIBBase.nFib == 0x00D9 and self.cbRgFcLcb != 0x006C:
self._raise_exception('DOC.FIB.cbRgFcLcb has an abnormal value.')
if self.FIBBase.nFib == 0x0101 and self.cbRgFcLcb != 0x0088:
self._raise_exception('DOC.FIB.cbRgFcLcb has an abnormal value.')
if self.FIBBase.nFib == 0x010C and self.cbRgFcLcb != 0x00A4:
self._raise_exception('DOC.FIB.cbRgFcLcb has an abnormal value.')
if self.FIBBase.nFib == 0x0112 and self.cbRgFcLcb != 0x00B7:
self._raise_exception('DOC.FIB.cbRgFcLcb has an abnormal value.')
'''
self.fibRgFcLcbBlob = FibRgFcLcb(data[0x9A:0x9A+self.cbRgFcLcb*8])
self.cswNew = struct.unpack('<H', data[0x9A+self.cbRgFcLcb*8:0x9A+self.cbRgFcLcb*8+0x02])[0]
self.ole_logger.debug('DOC.FIB.cswNew: ' + str(hex(self.cswNew)))
class DOCFile(OLEBase):
OLE = None
FIB = None
SummaryInfo = None
DocumentSummaryInfo = None
def __init__(self, filename):
self.OLE = None
self.FIB = None
self.SummaryInfo = None
self.DocumentSummaryInfo = None
if os.path.isfile(filename) == False:
self._raise_exception('Invalid file: ' + filename)
self.OLE = OLEFile(filename)
self.ole_logger.debug('***** Parse Word Document *****')
self.FIB = FIB(self.OLE.find_object_by_name('WordDocument'))
def show_rmark_authors(self):
if self.FIB.fibRgFcLcbBlob.fcSttbfRMark != 0:
table_stream = ''
if self.FIB.FIBBase.fWhichTblStm == 1:
table_stream = self.OLE.find_object_by_name('1Table')
elif self.FIB.FIBBase.fWhichTblStm == 1:
table_stream = self.OLE.find_object_by_name('0Table')
else:
print 'DOC.FIB.FIBBase.fWhichTblStm has an abnormal value.'
return
if len(table_stream) > 0:
#print table_stream
offset = self.FIB.fibRgFcLcbBlob.fcSttbfRMark
length = self.FIB.fibRgFcLcbBlob.lcbSttbfRMark
SttbfRMark = table_stream[offset:offset+length]
fExtend = struct.unpack('<H', SttbfRMark[0x00:0x02])[0]
if fExtend != 0xFFFF:
print 'fExtend has an abnormal value.'
return
cbExtra = struct.unpack('<H', SttbfRMark[0x04:0x06])[0]
if cbExtra != 0:
print 'cbExtra has an abnormal value.'
return
cData = struct.unpack('<H', SttbfRMark[0x02:0x04])[0]
offset = 0
for i in range(0, cData):
cchData = struct.unpack('<H', SttbfRMark[0x06+offset:0x08+offset])[0]
Data = SttbfRMark[0x06+offset+0x02:0x08+offset+cchData*2]
print Data.decode('utf-16')
offset = offset + 0x02 + cchData*2
else:
print 'Failed to read the Table Stream.'
else:
print 'No revision marks or comments author information.'
if __name__ == '__main__':
init_logging(True)
try:
docfile = DOCFile('oletest.doc')
docfile.show_rmark_authors()
except Exception as e:
print e
| z3r0zh0u/pyole | pydoc.py | Python | mit | 14,460 |
from setuptools import setup
from os.path import join as join_path
from os import walk
def files_in(package, directory):
paths = []
for root, dirs, files in walk(join_path(package, directory)):
for file in files:
paths.append(join_path(root, file)[(len(package) + 1):])
return paths
additional_files = []
additional_files.extend(files_in('datdash', 'skeleton'))
additional_files.extend(files_in('datdash', 'javascript'))
setup(
name='DatDash',
version='0.1alpha',
packages=['datdash'],
package_data={'datdash': additional_files},
license='MIT',
long_description=open('README.md').read(),
scripts=['bin/datdash'],
install_requires=[
'Flask',
'CoffeeScript',
'requests',
'pyScss',
'docopt',
'pyScss',
]
)
| LuRsT/datdash | setup.py | Python | mit | 825 |
import json
import os
from collections import OrderedDict
from difflib import unified_diff
import pytest
from dash.development._py_components_generation import generate_class
from dash.development.component_generator import reserved_words
from . import _dir, expected_table_component_doc
@pytest.fixture
def component_class(load_test_metadata_json):
return generate_class(
typename="Table",
props=load_test_metadata_json["props"],
description=load_test_metadata_json["description"],
namespace="TableComponents",
)
@pytest.fixture
def component_written_class():
path = os.path.join(_dir, "metadata_required_test.json")
with open(path) as data_file:
json_string = data_file.read()
required_data = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(
json_string
)
return generate_class(
typename="TableRequired",
props=required_data["props"],
description=required_data["description"],
namespace="TableComponents",
)
def test_to_plotly_json(component_class):
c = component_class()
assert c.to_plotly_json() == {
"namespace": "TableComponents",
"type": "Table",
"props": {"children": None},
}
c = component_class(id="my-id")
assert c.to_plotly_json() == {
"namespace": "TableComponents",
"type": "Table",
"props": {"children": None, "id": "my-id"},
}
c = component_class(id="my-id", optionalArray=None)
assert c.to_plotly_json() == {
"namespace": "TableComponents",
"type": "Table",
"props": {"children": None, "id": "my-id", "optionalArray": None},
}
def test_arguments_become_attributes(component_class):
kwargs = {"id": "my-id", "children": "text children", "optionalArray": [[1, 2, 3]]}
component_instance = component_class(**kwargs)
for k, v in list(kwargs.items()):
assert getattr(component_instance, k) == v
def test_repr_single_default_argument(component_class):
c1 = component_class("text children")
c2 = component_class(children="text children")
assert repr(c1) == "Table('text children')"
assert repr(c2) == "Table('text children')"
def test_repr_single_non_default_argument(component_class):
c = component_class(id="my-id")
assert repr(c) == "Table(id='my-id')"
def test_repr_multiple_arguments(component_class):
# Note how the order in which keyword arguments are supplied is
# not always equal to the order in the repr of the component
c = component_class(id="my id", optionalArray=[1, 2, 3])
assert repr(c) == "Table(id='my id', optionalArray=[1, 2, 3])"
def test_repr_nested_arguments(component_class):
c1 = component_class(id="1")
c2 = component_class(id="2", children=c1)
c3 = component_class(children=c2)
assert repr(c3) == "Table(Table(children=Table(id='1'), id='2'))"
def test_repr_with_wildcards(component_class):
c = component_class(id="1", **{"data-one": "one", "aria-two": "two"})
data_first = "Table(id='1', data-one='one', aria-two='two')"
aria_first = "Table(id='1', aria-two='two', data-one='one')"
repr_string = repr(c)
assert repr_string == data_first or repr_string == aria_first
def test_docstring(component_class):
assert not list(
unified_diff(expected_table_component_doc, component_class.__doc__.splitlines())
)
def test_no_events(component_class):
assert not hasattr(component_class, "available_events")
def test_required_props(component_written_class):
with pytest.raises(Exception):
component_written_class()
component_written_class(id="test")
with pytest.raises(Exception):
component_written_class(id="test", lahlah="test")
with pytest.raises(Exception):
component_written_class(children="test")
def test_attrs_match_forbidden_props(component_class):
assert "_.*" in reserved_words, "props cannot have leading underscores"
# props are not added as attrs unless explicitly provided
# except for children, which is always set if it's a prop at all.
expected_attrs = set(reserved_words + ["children"]) - {"_.*"}
c = component_class()
base_attrs = set(dir(c))
extra_attrs = set(a for a in base_attrs if a[0] != "_")
assert (
extra_attrs == expected_attrs
), "component has only underscored and reserved word attrs"
# setting props causes them to show up as attrs
c2 = component_class("children", id="c2", optionalArray=[1])
prop_attrs = set(dir(c2))
assert base_attrs - prop_attrs == set([]), "no attrs were removed"
assert prop_attrs - base_attrs == {
"id",
"optionalArray",
}, "explicit props were added as attrs"
| plotly/dash | tests/unit/development/test_generate_class.py | Python | mit | 4,761 |
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Policy model (Order Based)
- Contains the parent policy model, to evaluate the best actions given a state
"""
from collections import OrderedDict
import logging
import math
from diplomacy_research.models.policy.base_policy_model import GREEDY_DECODER, TRAINING_DECODER, StatsKey, \
OrderProbTokenLogProbs, BasePolicyModel, load_args as load_parent_args
from diplomacy_research.models.state_space import ix_to_order, get_order_tokens, EOS_ID, EOS_TOKEN, PAD_TOKEN, \
POWER_VOCABULARY_IX_TO_KEY, POWER_VOCABULARY_LIST, NB_SUPPLY_CENTERS, STANDARD_TOPO_LOCS, TOKENS_PER_ORDER
# Constants
LOGGER = logging.getLogger(__name__)
def load_args():
""" Load possible arguments
:return: A list of tuple (arg_type, arg_name, arg_value, arg_desc)
"""
return load_parent_args()
class OrderBasedPolicyModel(BasePolicyModel):
""" Policy Model """
def __init__(self, dataset, hparams):
""" Initialization
:param dataset: The dataset that is used to iterate over the data.
:param hparams: A dictionary of hyper parameters with their values
:type dataset: diplomacy_research.models.datasets.supervised_dataset.SupervisedDataset
:type dataset: diplomacy_research.models.datasets.queue_dataset.QueueDataset
"""
from diplomacy_research.utils.tensorflow import tf
hps = lambda hparam_name: self.hparams[hparam_name]
BasePolicyModel.__init__(self, dataset, hparams)
# Learning rate
if not hasattr(self, 'learning_rate') or self.learning_rate is None:
with tf.device(self.cluster_config.worker_device if self.cluster_config else None):
self.learning_rate = tf.Variable(float(hps('learning_rate')), trainable=False, dtype=tf.float32)
# Optimizer
if not hasattr(self, 'optimizer') or self.optimizer is None:
self.optimizer = self.make_optimizer(self.learning_rate)
# Build ops
self.build_policy()
# Decay ops
if not hasattr(self, 'decay_learning_rate') or self.decay_learning_rate is None:
self.decay_learning_rate = self.learning_rate.assign(self.placeholders['learning_rate'])
@property
def _nb_evaluation_loops(self):
""" Contains the number of different evaluation tags we want to compute
This also represent the number of loops we should do over the validation set
Some model wants to calculate different statistics and require multiple pass to do that
A value of 1 indicates to only run in the main validation loop
A value > 1 indicates to run additional loops only for this model.
"""
return 2
@property
def _evaluation_tags(self):
""" List of evaluation tags (1 list of evaluation tag for each evaluation loop)
e.g. [['Acc_1', 'Acc_5', 'Acc_Tokens'], ['Gr_1', 'Gr_5', 'Gr_Tokens']]
"""
return [['[TF]X-Ent', '[TF]Perplexity', '[TF]Acc_1', '[TF]Acc_1_NoHold', '[TF]Acc_Tokens', '[TF]Acc_Player'],
['[Gr]Acc_1', '[Gr]Acc_1_NoHold', '[Gr]Acc_Tokens', '[Gr]Acc_Player']]
@property
def _early_stopping_tags(self):
""" List of tags to use to detect early stopping
The tags are a tuple of 1) 'min' or 'max' and 2) the tag's name
e.g. [('max', '[Gr]Acc_1'), ('min', '[TF]Perplexity')]
"""
return [('min', '[TF]Perplexity'), ('max', '[Gr]Acc_1')]
@property
def _placeholders(self):
""" Return a dictionary of all placeholders needed by the model """
from diplomacy_research.utils.tensorflow import tf, get_placeholder, get_placeholder_with_default
# Note: 'decoder_type' needs to have a batch_dim to be compatible with TF Serving
# but will be reduced to a scalar with tf.reduce_max
return {
'decoder_type': get_placeholder('decoder_type', shape=[None], dtype=tf.uint8),
'learning_rate': get_placeholder_with_default('learning_rate', 1e-4, shape=(), dtype=tf.float32),
'dropout_rate': get_placeholder_with_default('dropout_rate', 0., shape=(), dtype=tf.float32),
'is_training': get_placeholder_with_default('is_training', False, shape=(), dtype=tf.bool),
'stop_gradient_all': get_placeholder_with_default('stop_gradient_all', False, shape=(), dtype=tf.bool)
}
def _build_policy_initial(self):
""" Builds the policy model (initial step) """
raise NotImplementedError()
@staticmethod
def _get_optimizer(learning_rate):
""" Returns the optimizer to use for this model """
from diplomacy_research.utils.tensorflow import tf
LOGGER.info('Using tf.contrib.opt.LazyAdamOptimizer as the optimizer.')
return tf.contrib.opt.LazyAdamOptimizer(learning_rate=learning_rate)
def _get_session_args(self, decode=False, eval_loop_ix=None):
""" Returns a dict of kwargs to feed to session.run
Expected format: {fetches, feed_dict=None}
"""
hps = lambda hparam_name: self.hparams[hparam_name]
# Detecting if we are doing validation
in_validation, our_validation = False, False
if eval_loop_ix is not None:
in_validation = True
our_validation = eval_loop_ix in self.my_eval_loop_ixs
# --------- Fetches ---------------
train_fetches = {'optimizer_op': self.outputs['optimizer_op'],
'policy_loss': self.outputs['policy_loss']}
eval_fetches = {'policy_loss': self.outputs['policy_loss'],
'argmax_tokens': self.outputs['argmax_tokens'],
'log_probs': self.outputs['log_probs'],
'targets': self.outputs['targets'],
'current_power': self.features['current_power'],
'current_season': self.features['current_season'],
'in_retreat_phase': self.outputs['in_retreat_phase'],
'request_id': self.features['request_id']}
# --------- Feed dict --------------
# Building feed dict
feed_dict = {self.placeholders['decoder_type']: [TRAINING_DECODER], # Batch size of 1
self.placeholders['is_training']: True,
self.placeholders['stop_gradient_all']: False}
# Dropout disabled during debug (batch), validation, or decoding (stats)
if self.hparams['debug_batch'] or in_validation or decode:
feed_dict.update({self.placeholders['dropout_rate']: 0.})
else:
feed_dict.update({self.placeholders['dropout_rate']: hps('dropout_rate')})
# --------- Validation Loop --------------
# Validation Loop - Running one of our validation loops
if our_validation:
decoder_type = {0: TRAINING_DECODER, 1: GREEDY_DECODER}[self.my_eval_loop_ixs.index(eval_loop_ix)]
feed_dict[self.placeholders['decoder_type']] = [decoder_type] # Batch size of 1
feed_dict[self.placeholders['is_training']] = False
return {'fetches': eval_fetches, 'feed_dict': feed_dict}
# Validation Loop - Running someone else validation loop
if in_validation:
return {'feed_dict': feed_dict}
# --------- Training Loop --------------
# Training Loop - We want to decode the specific batch to display stats
if decode:
decoder_type = TRAINING_DECODER
feed_dict[self.placeholders['decoder_type']] = [decoder_type] # Batch size of 1
feed_dict[self.placeholders['is_training']] = False
return {'fetches': eval_fetches, 'feed_dict': feed_dict}
# Training Loop - Training the model
return {'fetches': train_fetches, 'feed_dict': feed_dict}
@staticmethod
def _decode(**fetches):
""" Performs decoding on the output (order_based model)
:param fetches: A dictionary of fetches from the model.
Keys can include:
- selected_tokens / argmax_tokens: [Required] The tokens from the model (Tensor [batch, decoder_length])
- log_probs: [Required] The log probs from the model (Tensor [batch, decoder_length])
- policy_loss: The policy loss for the batch.
- targets: The targets from the model (Tensor [batch, length]). Required for evaluation.
- current_power: The current_power from the model (Tensor [batch,]). Required for evaluation.
- current_season: The current_season from the model (Tensor [batch,]). Required for evaluation.
- in_retreat_phase: Boolean that indicates dislodged units are on the map. ([b,]). Required for evaluation.
- request_id: The unique request id for each item in the batch.
:return: A dictionary of decoded results, including
- 1) decoded_orders:
A list of dictionary (one per batch) where each dict has location as key and a
OrderProbTokenLogProbs tuple as value (i.e. an order, its prob, and the token log probs)
e.g. [{'PAR': (order, prob, log_probs),'MAR': (order, prob, log_probs)},
{'PAR': (order, prob, log_probs),'MAR': (order, prob, log_probs)}]
- 2) various other keys for evaluation
"""
# Missing the required fetches, returning an empty decoded results
if ('selected_tokens' not in fetches and 'argmax_tokens' not in fetches) or 'log_probs' not in fetches:
return {}
# tokens: [batch, dec_len]
# log_probs: [batch, dec_len]
# policy_loss: ()
# targets: [batch, dec_len]
# current_power: [batch]
# current_season: [batch]
# in_retreat_phase: [batch]
# request_ids: [batch]
tokens = fetches.get('selected_tokens', fetches.get('argmax_tokens'))
log_probs = fetches['log_probs']
policy_loss = fetches.get('policy_loss', None)
targets = fetches.get('targets', None)
current_power = fetches.get('current_power', None)
current_season = fetches.get('current_season', None)
in_retreat_phase = fetches.get('in_retreat_phase', None)
request_ids = fetches.get('request_id', None)
# Decoding orders
results = []
result_tokens = []
nb_batches = tokens.shape[0]
for batch_ix in range(nb_batches):
batch_results = OrderedDict()
batch_results_tokens = OrderedDict()
batch_tokens = tokens[batch_ix]
batch_log_probs = log_probs[batch_ix]
nb_waive = 0
# We didn't try to predict orders - Skipping
if not len(batch_tokens) or batch_tokens[0] == [0]: # pylint: disable=len-as-condition
results += [batch_results]
result_tokens += [batch_results_tokens]
continue
for token_ix, token in enumerate(batch_tokens):
if token <= EOS_ID:
continue
order = ix_to_order(token)
# WAIVE orders
if order == 'WAIVE':
loc = 'WAIVE_{}'.format(nb_waive)
nb_waive += 1
# Use normal location and skip if already stored
else:
loc = order.split()[1]
if loc in batch_results:
continue
loc = loc[:3]
# Storing order
batch_results[loc] = OrderProbTokenLogProbs(order=order,
probability=1.,
log_probs=[batch_log_probs[token_ix]])
batch_results_tokens[loc] = [token]
# Done with batch
results += [batch_results]
result_tokens += [batch_results_tokens]
# Returning
return {'decoded_orders': results,
'policy_loss': policy_loss,
'targets': targets,
'tokens': result_tokens,
'current_power': current_power,
'current_season': current_season,
'in_retreat_phase': in_retreat_phase,
'request_id': request_ids,
'log_probs': log_probs}
def _evaluate(self, decoded_results, feed_dict, eval_loop_ix, incl_detailed):
""" Calculates the accuracy of the model
:param decoded_results: The decoded results (output of _decode() function)
:param feed_dict: The feed dictionary that was given to session.run()
:param eval_loop_ix: The current evaluation loop index (-1 for training)
:param incl_detailed: is true if training is over, more statistics can be computed
:return: A tuple consisting of:
1) An ordered dictionary with result_name as key and (weight, value) as value (Regular results)
2) An ordered dictionary with result_name as key and a list of result values (Detailed results)
"""
# Detecting if it's our evaluation or not
if eval_loop_ix == -1:
eval_loop_ix = 0
else:
our_validation = eval_loop_ix in self.my_eval_loop_ixs
if not our_validation:
return OrderedDict(), OrderedDict()
eval_loop_ix = self.my_eval_loop_ixs.index(eval_loop_ix)
# Evaluating
policy_loss = decoded_results['policy_loss'] # Avg X-Ent per unit-order
perplexity = math.exp(policy_loss) if policy_loss <= 100 else float('inf')
targets = decoded_results['targets']
batch_size = targets.shape[0]
nb_locs_per_target = targets.shape[1]
decoded_orders = decoded_results['decoded_orders']
# Logging an error if perplexity is inf
if perplexity == float('inf'):
for request_id, log_probs in zip(decoded_results['request_id'], decoded_results['log_probs']):
if sum(log_probs) <= -100:
LOGGER.error('Request %s has log probs that causes a -inf perplexity.', request_id)
# Accuracy
acc_1_num, denom = 0., 0.
acc_1_no_hold_num, denom_no_hold = 0., 0.
nb_tokens_match, nb_tokens_total = 0., 0.
acc_player_num, denom_player = 0., 0.
# Decoding batch by batch, loc by loc
for batch_ix in range(batch_size):
player_order_mismatch = False
nb_waive = 0
# We didn't learn a policy - Skipping
if not len(targets[batch_ix]) or targets[batch_ix][0] == 0: # pylint: disable=len-as-condition
continue
for loc_ix in range(nb_locs_per_target):
decoded_target = targets[batch_ix][loc_ix]
decoded_target_order = ix_to_order(decoded_target) if decoded_target > EOS_ID else ''
if not decoded_target_order:
break
nb_tokens_total += TOKENS_PER_ORDER
if decoded_target_order == 'WAIVE':
loc = 'WAIVE_{}'.format(nb_waive)
is_hold_order = False
nb_waive += 1
else:
loc = decoded_target_order.split()[1][:3]
is_hold_order = len(decoded_target_order.split()) <= 2 or decoded_target_order.split()[2] == 'H'
# Computing Acc 1
denom += 1.
if not is_hold_order:
denom_no_hold += 1.
# Checking if the target is in the decoded results
if loc in decoded_orders[batch_ix] and decoded_orders[batch_ix][loc].order == decoded_target_order:
acc_1_num += 1.
if not is_hold_order:
acc_1_no_hold_num += 1.
else:
player_order_mismatch = True
# Computing Acc Tokens
tokenized_targets = get_order_tokens(decoded_target_order) + [EOS_TOKEN]
tokenized_targets += [PAD_TOKEN] * (TOKENS_PER_ORDER - len(tokenized_targets))
tokenized_results = [-1] * TOKENS_PER_ORDER
if loc in decoded_orders[batch_ix]:
tokenized_results = get_order_tokens(decoded_orders[batch_ix][loc].order) + [EOS_TOKEN]
tokenized_results += [PAD_TOKEN] * (TOKENS_PER_ORDER - len(tokenized_results))
nb_tokens_match += sum([1. for i in range(TOKENS_PER_ORDER)
if tokenized_targets[i] == tokenized_results[i]])
# Compute accuracy for this phase
if not player_order_mismatch:
acc_player_num += 1
denom_player += 1
# No orders at all
if not denom:
acc_1 = 1.
acc_1_no_hold = 1.
acc_tokens = 1.
acc_player = 1.
else:
acc_1 = acc_1_num / (denom + 1e-12)
acc_1_no_hold = acc_1_no_hold_num / (denom_no_hold + 1e-12)
acc_tokens = nb_tokens_match / (nb_tokens_total + 1e-12)
acc_player = acc_player_num / (denom_player + 1e-12)
# Computing detailed statistics
detailed_results = OrderedDict()
if incl_detailed:
detailed_results = self._get_detailed_results(decoded_results, feed_dict, eval_loop_ix)
# Validating decoder type
decoder_type = [value for tensor, value in feed_dict.items() if 'decoder_type' in tensor.name]
decoder_type = '' if not decoder_type else decoder_type[0][0]
# 0 - Teacher Forcing results
if eval_loop_ix == 0:
assert decoder_type == TRAINING_DECODER
return OrderedDict({'[TF]X-Ent': (denom, policy_loss),
'[TF]Perplexity': (denom, perplexity),
'[TF]Acc_1': (denom, 100. * acc_1),
'[TF]Acc_1_NoHold': (denom_no_hold, 100. * acc_1_no_hold),
'[TF]Acc_Tokens': (nb_tokens_total, 100. * acc_tokens),
'[TF]Acc_Player': (denom_player, 100. * acc_player)}), detailed_results
# 1 - Greedy Results
if eval_loop_ix == 1:
assert decoder_type == GREEDY_DECODER
return OrderedDict({'[Gr]Acc_1': (denom, 100. * acc_1),
'[Gr]Acc_1_NoHold': (denom_no_hold, 100. * acc_1_no_hold),
'[Gr]Acc_Tokens': (nb_tokens_total, 100. * acc_tokens),
'[Gr]Acc_Player': (denom_player, 100. * acc_player)}), detailed_results
# Otherwise, invalid evaluation_loop_ix
raise RuntimeError('Invalid evaluation_loop_ix - Got "%s"' % eval_loop_ix)
@staticmethod
def _get_detailed_results(decoded_results, feed_dict, evaluation_loop_ix):
""" Computes detailed accuracy statistics for the batch
:param decoded_results: The decoded results (output of _decode() function)
:param feed_dict: The feed dictionary that was given to session.run()
:param eval_loop_ix: The current evaluation loop index
:return: An ordered dictionary with result_name as key and a list of result values (Detailed results)
"""
del feed_dict # Unused args
targets = decoded_results['targets']
log_probs = decoded_results['log_probs']
request_ids = decoded_results['request_id']
batch_size = targets.shape[0]
nb_locs_per_target = targets.shape[1]
decoded_orders = decoded_results['decoded_orders']
# Extracting from additional info
for field_name in ['current_power', 'current_season', 'in_retreat_phase']:
if field_name not in decoded_results:
LOGGER.warning('The field "%s" is missing. Cannot compute stats', field_name)
return OrderedDict()
current_power_name = [POWER_VOCABULARY_IX_TO_KEY[current_power]
for current_power in decoded_results['current_power']]
current_season_name = ['SFW'[current_season] for current_season in decoded_results['current_season']]
in_retreat_phase = decoded_results['in_retreat_phase']
# Prefix
prefix = '[TF]' if evaluation_loop_ix == 0 else '[Gr]'
# Building results dict
results = OrderedDict()
results[prefix + 'Accuracy'] = []
results[prefix + 'LogProbsDetails'] = [{}] # {request_id: (log_probs, mismatch)}
for power_name in POWER_VOCABULARY_LIST:
results[prefix + power_name] = []
for order_type in ['H', '-', '- VIA', 'S', 'C', 'R', 'B', 'D', 'WAIVE']:
results[prefix + 'Order %s' % order_type] = []
for season in 'SFW': # Spring, Fall, Winter
results[prefix + 'Season %s' % season] = []
for phase in 'MRA': # Movement, Retreats, Adjustments
results[prefix + 'Phase %s' % phase] = []
for position in range(-1, NB_SUPPLY_CENTERS): # Position -1 is used for Adjustment phases
results[prefix + 'Position %d' % position] = []
for order_loc in sorted(STANDARD_TOPO_LOCS): # Order location
results[prefix + 'Loc %s' % order_loc] = []
# Computing accuracy
for batch_ix in range(batch_size):
request_id = request_ids[batch_ix]
player_orders_mismatch = False
nb_waive = 0
# We didn't learn a policy - Skipping
if not len(targets[batch_ix]) or targets[batch_ix][0] == 0: # pylint: disable=len-as-condition
continue
for loc_ix in range(nb_locs_per_target):
decoded_target = targets[batch_ix][loc_ix]
decoded_target_order = ix_to_order(decoded_target) if decoded_target > EOS_ID else ''
if not decoded_target_order:
break
if decoded_target_order == 'WAIVE':
loc = 'WAIVE_{}'.format(nb_waive)
order_type = 'WAIVE'
nb_waive += 1
else:
loc = decoded_target_order.split()[1][:3]
order_type = decoded_target_order.split()[2] if len(decoded_target_order.split()) > 2 else 'H'
if order_type == '-' and decoded_target_order.split()[-1] == 'VIA':
order_type = '- VIA'
# Determining categories
power_name = current_power_name[batch_ix]
season = current_season_name[batch_ix]
if in_retreat_phase[batch_ix]:
phase = 'R'
order_type = 'R' if order_type in ['-', '- VIA'] else order_type
else:
phase = {'H': 'M', '-': 'M', '- VIA': 'M', 'S': 'M', 'C': 'M',
'R': 'R',
'D': 'A', 'B': 'A', 'WAIVE': 'A'}[order_type]
# Use -1 as position for A phase
position = -1 if phase == 'A' else loc_ix
stats_key = StatsKey(prefix, power_name, order_type, season, phase, position)
# Computing accuracies
success = int(loc in decoded_orders[batch_ix]
and decoded_orders[batch_ix][loc].order == decoded_target_order)
if not success:
player_orders_mismatch = True
results[prefix + 'Accuracy'] += [success]
results[prefix + power_name] += [success]
results[prefix + 'Order %s' % order_type] += [success]
results[prefix + 'Season %s' % season] += [success]
results[prefix + 'Phase %s' % phase] += [success]
results[prefix + 'Position %d' % position] += [success]
if order_type != 'WAIVE':
results[prefix + 'Loc %s' % loc] += [success]
results[stats_key] = results.get(stats_key, []) + [success]
# Storing (log_probs, mismatch)
results[prefix + 'LogProbsDetails'][0][request_id] = (log_probs[batch_ix].sum(),
int(player_orders_mismatch))
# Returning results
return results
@staticmethod
def _post_process_results(detailed_results):
""" Perform post-processing on the detailed results
:param detailed_results: An dictionary which contains detailed evaluation statistics
:return: A dictionary with the post-processed statistics.
"""
# Adding [Gr]SearchFailure (== 1. iff. logprob(label) > logprob(greedy) and greedy != label)
# Adding [TF]Acc_Player and [Gr]Acc_Player
# Removing LogProbsDetails
# Make sure the detailed results have the correct key (i.e. they have not yet been post-processed)
for prefix in ['[TF]', '[Gr]']:
assert prefix + 'LogProbsDetails' in detailed_results
# Building a dictionary {request_id: (log_probs, mismatch)}
tf_items, gr_items = {}, {}
for tf_item in detailed_results['[TF]LogProbsDetails']:
tf_items.update(tf_item)
for gr_item in detailed_results['[Gr]LogProbsDetails']:
gr_items.update(gr_item)
# Making sure we have processed the same number of TF items and Gr items
tf_nb_items = len(tf_items)
gr_nb_items = len(gr_items)
if tf_nb_items != gr_nb_items:
LOGGER.warning('Got a different number of items between [TF] (%d items) and [Gr] (%d items)',
tf_nb_items, gr_nb_items)
# Computing search failure and mismatch
search_failure, gr_acc_player, tf_acc_player = [], [], []
for request_id in tf_items:
if request_id not in gr_items:
LOGGER.warning('Item %s was computed using [TF], but is missing for [Gr]. Skipping.', request_id)
continue
tf_logprobs, tf_mismatch = tf_items[request_id]
gr_logprobs, gr_mismatch = gr_items[request_id]
# Computing stats
if gr_mismatch:
search_failure += [int(tf_logprobs > gr_logprobs)]
tf_acc_player += [int(not tf_mismatch)]
gr_acc_player += [int(not gr_mismatch)]
# Removing extra keys and adding new keys
detailed_results['[Gr]SearchFailure'] = search_failure
detailed_results['[TF]Acc_Player'] = tf_acc_player
detailed_results['[Gr]Acc_Player'] = gr_acc_player
del detailed_results['[TF]LogProbsDetails']
del detailed_results['[Gr]LogProbsDetails']
# Returning post-processed results
return detailed_results
| diplomacy/research | diplomacy_research/models/policy/order_based/model.py | Python | mit | 28,290 |
"""
Django settings for sippa project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import environ
# three folder back (/a/b/c/ - 3 = /)
root = environ.Path(__file__) - 3
# set default values and casting
env = environ.Env(DEBUG=(bool, False),)
# reading .env file
environ.Env.read_env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core.apps.CoreConfig',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sippa.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sippa.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Fortaleza'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
AWS_AUTO_CREATE_BUCKET = True
AWS_STORAGE_BUCKET_NAME = 'sippa_files'
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_S3_FILE_OVERWRITE = False
AWS_QUERYSTRING_AUTH = False
AWS_S3_SECURE_URLS = False
BASE_AWS_URL = 'https://%s' % AWS_S3_CUSTOM_DOMAIN
STATICFILES_LOCATION = 'static'
STATIC_URL = '%s/%s/' % (BASE_AWS_URL, STATICFILES_LOCATION)
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = '%s/%s/' % (BASE_AWS_URL, MEDIAFILES_LOCATION)
# For custom admin
ADMIN_SITE_HEADER = 'SIPPA'
CORS_URLS_REGEX = r'^.*$'
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
)
| luissiqueira/sippa-no-api | sippa/settings.py | Python | mit | 4,223 |
# encoding: utf-8
from views import dogs, breeds, users
urls = [
('/dogs/', dogs.DogAPI.as_view('dog_api')),
('/user_dogs/', dogs.UserDogsAPI.as_view('user_dogs_api')),
('/breeds/', breeds.BreedAPI.as_view('breed_api')),
('/breed_dogs/', breeds.BreedDogsAPI.as_view('breed_dogs_api')),
('/users/', users.UserAPI.as_view('user_api')),
('/login/', users.LoginAPI.as_view('login_api')),
('/logout/', users.LogoutAPI.as_view('logout_api'))
]
| gabrielecker/DogAdoption-Backend | project/urls.py | Python | mit | 467 |
import json
from unittest import TestCase
from mock import Mock, patch, create_autospec
class CommandTestCase(TestCase):
"""A TestCase for unit testing bot requests and responses.
To use it:
* provide your bot in *bot_class* (and optionally a config).
* use self.send_message inside your test cases.
It returns what your command returns.
"""
bot_class = None
config = {}
def setUp(self):
self.bot = self.bot_class('slack_token', self.config.copy())
self.bot.name = str(self.bot_class)
self.bot.my_mention = None # we'll just send test messages by name, not mention.
# This patch is introspected to find command responses (and also prevents interaction with slack).
self.bot._handle_command_response = create_autospec(self.bot._handle_command_response)
self.ws = Mock()
self.slack_patcher = patch.object(self.bot, 'slack', autospec=True)
self.slack_mock = self.slack_patcher.start()
def tearDown(self):
self.slack_patcher.stop()
def send_message(self, command, message_delimiter=':', **event):
"""Return the bot's response to a given command.
:param command: the message to the bot.
Do not include the bot's name, just the part after the colon.
:param event: kwargs that will override the event sent to the bot.
Useful when your bot expects message from a certain user or channel.
"""
_event = {
'type': 'message',
'text': "%s%s%s" % (self.bot.name, message_delimiter, command),
'channel': None,
}
self.assertTrue(_event['text'].startswith("%s%s" % (self.bot.name, message_delimiter)))
_event.update(event)
self.bot._on_message(self.ws, json.dumps(_event))
args, _ = self.bot._handle_command_response.call_args
return args[0]
| venmo/slouch | slouch/testing.py | Python | mit | 1,912 |
import datetime
import requests
import numpy as np
import pandas as pd
import sys
# August 3, 2015: Updated the getNipa() method to accomodate possible differences in data availability for series in tables.
# Cleaned up and organized the code substantially.
class initialize:
def __init__(self,apiKey=None):
''' Saves the API key.'''
self.apiKey = apiKey
# 1. Methods for getting information about the available datasets, parameters, and parameter values.
def getDataSetList(self):
'''Method returns a list of describing the datasets available through the BEA API. No arguments'''
r = requests.get('http://www.bea.gov/api/data?&UserID='+self.apiKey+'&method=GETDATASETLIST&ResultFormat=JSON&')
rJson = r.json()
lines='Datasets available through the BEA API:\n\n'
n=1
dataSetList = []
for element in rJson['BEAAPI']['Results']['Dataset']:
if np.mod(n,5)==0:
lines = lines+str(n).ljust(4,' ')+element['DatasetName'].ljust(20,' ') +': '+element['DatasetDescription']+'\n\n'
dataSetList.append(element['DatasetName'])
else:
lines = lines+str(n).ljust(4,' ')+element['DatasetName'].ljust(20,' ') +': '+element['DatasetDescription']+'\n'
dataSetList.append(element['DatasetName'])
n+=1
print(lines)
self.dataSets = lines
self.dataSetList = dataSetList
def getParameterList(self,dataSetName):
'''Method returns a list of the parameters for a given dataset. Argument: one of the dataset names returned by getDataSetList().'''
r = requests.get('http://www.bea.gov/api/data?&UserID='+self.apiKey+'&method=GETPARAMETERLIST&datasetname='+dataSetName+'&ResultFormat=JSON&')
rJson = r.json()
lines = 'Parameters for the '+dataSetName+' dataset.\n\n'
strWidth = 25
descrWidth = 50
parameterList = []
def splitString(origString, maxLength):
splitLines = []
line = ''
for word in origString.split(' '):
if len(line)+1+len(word)<maxLength:
line = line+word+' '
else:
splitLines.append(line)
line = word+' '
if len(line) != 0:
splitLines.append(line)
return splitLines
for element in rJson['BEAAPI']['Results']['Parameter']:
elementKeys = list(element.keys())
lines = lines+'Parameter name'.ljust(strWidth,' ') +' '+element['ParameterName']+'\n'
split = splitString(element['ParameterDescription'],descrWidth)
for n,line in enumerate(split):
if n ==0:
lines = lines+'Description'.ljust(strWidth,' ') + ' '+line+'\n'
else:
lines = lines+' '.ljust(strWidth,' ') + ' '+line+'\n'
parameterList.append(element['ParameterName'])
if element['ParameterIsRequiredFlag']==0:
lines = lines+'Required?'.ljust(strWidth,' ') + ' No'+'\n'
else:
lines = lines+'Required?'.ljust(strWidth,' ') + ' Yes'+'\n'
if 'AllValue' in elementKeys:
if element['AllValue']=='':
lines = lines+'\"All\" Value'.ljust(strWidth,' ') + ' N/A'+'\n'
else:
lines = lines+'\"All\" Value'.ljust(strWidth,' ') +' '+element['AllValue']+'\n'
# if element['MultipleAcceptedFlag']==0:
# lines = lines+'Multiple (list) accepted?'.ljust(strWidth,' ') + ' No'+'\n'
# else:
# lines = lines+'Multiple (list) accepted?'.ljust(strWidth,' ') + ' Yes'+'\n'
lines = lines+'Data type'.ljust(strWidth,' ') + ' '+element['ParameterDataType']+'\n'
if 'ParameterDefaultValue' in elementKeys:
if element['ParameterDefaultValue']=='':
lines = lines+'Default value'.ljust(strWidth,' ') + ' N/A'+'\n\n\n'
else:
lines = lines+'Default value'.ljust(strWidth,' ') + ' '+element['ParameterDefaultValue']+'\n\n\n'
else:
lines = lines+'\n\n'
print(lines)
self.parameters = lines
self.parameterList = parameterList
def getParameterValues(self,dataSetName, parameterName):
'''Method returns a list of the values accepted for a given parameter of a dataset.
Arguments: one of the dataset names returned by getDataSetList() and a parameter returned by getParameterList().'''
r = requests.get('http://bea.gov/api/data?&UserID='+self.apiKey+'&method=GetParameterValues&datasetname='+dataSetName+'&ParameterName='+parameterName+'&')
rJson = r.json()
lines='Values accepted for '+parameterName+' in dataset '+dataSetName+':\n\n'
if dataSetName.lower() == 'nipa' and parameterName.lower() == 'showmillions' and 'ParamValue' not in rJson['BEAAPI']['Results'].keys():
lines+= 'ShowMillions'.ljust(20,' ')+': N\n'
lines+= 'Description'.ljust(20,' ')+': Units in billions of USD (default)\n\n'
lines+= 'ShowMillions'.ljust(20,' ')+': Y\n'
lines+= 'Description'.ljust(20,' ')+': Units in millions of USD\n\n'
else:
descrWidth = 50
def splitString(origString, maxLength):
splitLines = []
line = ''
for word in origString.split(' '):
if len(line)+1+len(word)<maxLength:
line = line+word+' '
else:
splitLines.append(line)
line = word+' '
if len(line) != 0:
splitLines.append(line)
return splitLines
columnNames = []
for n,element in enumerate(rJson['BEAAPI']['Results']['ParamValue']):
for key in element.keys():
if key not in columnNames:
columnNames.append(key)
data = np.zeros([n,len(columnNames)])
data[:] = np.nan
tempFrame = pd.DataFrame(data,columns = columnNames)
for n,element in enumerate(rJson['BEAAPI']['Results']['ParamValue']):
for key,value in element.items():
tempFrame.loc[n,key] = element[key]
# Sort tempFrame if the parameter falls into one of a few special categories
if dataSetName.lower() == 'nipa':
if parameterName.lower() =='tableid':
tempFrame.sort(columns = ['TableID'])
elif parameterName.lower() =='year':
tempFrame = tempFrame[['TableID','FirstAnnualYear','LastAnnualYear','FirstQuarterlyYear','LastQuarterlyYear','FirstMonthlyYear','LastMonthlyYear']]
tempFrame.sort(columns = ['TableID'])
elif dataSetName.lower() == 'fixedassets':
if parameterName.lower() =='tableid':
tempFrame.sort(columns = ['TableID'])
elif parameterName.lower() =='year':
tempFrame = tempFrame[['TableID','FirstAnnualYear','LastAnnualYear']]
tempFrame.sort(columns = ['TableID'])
elif dataSetName.lower() == 'gdpbyindustry':
if parameterName.lower() =='tableid':
tempFrame.sort(columns = ['Key'])
for i in tempFrame.index:
for c in tempFrame.columns:
split = splitString(tempFrame.loc[i,c],descrWidth)
for n, words in enumerate(split):
if n==0:
try:
lines+=c.ljust(20,' ')+': '+str(int(words))+'\n'
except:
lines+=c.ljust(20,' ')+': '+str(words)+'\n'
else:
try:
lines+=''.ljust(20,' ')+' '+str(words)+'\n'
except:
lines+=''.ljust(20,' ')+' '+str(words)+'\n'
lines+='\n'
print(lines)
self.parameterValues = lines
# 2. Methods for retreiving data.
# 2.1 Regional Data (statistics by state, county, and MSA)
def getRegionalData(self,KeyCode=None,GeoFips='STATE',Year='ALL'):
'''Retrieve state and regional data.
Name Type Required? Multiple values? "All" Value Default
KeyCode int yes no N/A
GeoFips str no yes 'STATE' or 'COUNTY' or 'MSA' STATE
Year int no yes "ALL" ALL
'''
# if type(KeyCode)==list:
# KeyCode = ','.join(KeyCode)
# if type(Year)==list:
# Year = [str(y) for y in Year]
# Year = ','.join(Year)
# if type(GeoFips)==list:
# GeoFips = ','.join(GeoFips)
uri = 'http://bea.gov/api/data/?UserID='+self.apiKey+'&method=GetData&datasetname=RegionalData&KeyCode='+str(KeyCode)+'&Year='+str(Year)+'&GeoFips='+str(GeoFips)+'&ResultFormat=JSON&'
r = requests.get(uri)
rJson = r.json()
dataDict = {}
# dates = []
# YearList = []
# name =''
columnNames = []
dates = []
try:
for element in rJson['BEAAPI']['Results']['Data']:
if element['GeoName'] not in columnNames:
columnNames.append(element['GeoName'])
date = convertDate(element['TimePeriod'],'A')
if date not in dates:
dates.append(date)
data = np.zeros([len(dates),len(columnNames)])
data[:] = np.nan
frame = pd.DataFrame(data,columns = columnNames, index = dates)
for element in rJson['BEAAPI']['Results']['Data']:
date = convertDate(element['TimePeriod'],'A')
if 'DataValue' in element.keys():
frame.loc[date,element['GeoName']] = float(element['DataValue'].replace(',',''))
frame = frame.sort_index()
note = rJson['BEAAPI']['Results']['PublicTable']+' - '+rJson['BEAAPI']['Results']['Statistic']+' - '+rJson['BEAAPI']['Results']['UnitOfMeasure']
return {'note':note,'data':frame}
except:
print('Invalid input.',sys.exc_info()[0])
# 2.2 NIPA (National Income and Product Accounts)
def getNipa(self,TableID=None,Frequency='A',Year='X',ShowMillions='N'):
'''Retrieve data from a NIPA table.
Name Type Required? "All" Value Default
TableID int yes N/A None
Frequency(A/Q) str yes N/A None
Year int yes "X" "X"
ShowMillions str no N/A 'N'
'''
if Frequency=='M':
print('Error: monthly Frequency available for NIPA tables.')
uri = 'http://bea.gov/api/data/?UserID='+self.apiKey+'&method=GetData&datasetname=NIPA&TableID='+str(TableID)+'&Frequency='+Frequency+'&Year='+str(Year)+'&ShowMillions='+ShowMillions+'&ResultFormat=JSON&'
r = requests.get(uri)
rJson = r.json()
columnNames = []
dates = []
try:
for element in rJson['BEAAPI']['Results']['Data']:
if element['LineDescription'] not in columnNames:
columnNames.append(element['LineDescription'])
date = convertDate(element['TimePeriod'],Frequency)
if date not in dates:
dates.append(date)
data = np.zeros([len(dates),len(columnNames)])
data[:] = np.nan
frame = pd.DataFrame(data,columns = columnNames, index = dates)
for element in rJson['BEAAPI']['Results']['Data']:
date = convertDate(element['TimePeriod'],Frequency)
frame.loc[date,element['LineDescription']] = float(element['DataValue'].replace(',',''))
frame = frame.sort_index()
note = rJson['BEAAPI']['Results']['Notes'][0]['NoteText']
return {'note':note,'data':frame}
except:
print('Error: invalid input.')
# # 3.3 NIUnderlyingDetail (National Income and Product Accounts)
# def getNIUnderlyingDetail(self,TableID,Frequency='A',Year='X'):
# if type(Year)==list:
# Year = [str(y) for y in Year]
# Year = ','.join(Year)
# uri = 'http://bea.gov/api/data/?UserID='+apiKey+'&method=GetData&datasetname=NIUnderlyingDetail&TableID='+str(TableID)+'&Year='+str(Year)+'&Frequency='+str(Frequency)+'&ResultFormat=JSON&'
# r = requests.get(uri)
# rJson = r.json()
# columnNames = []
# dates = []
# try:
# 3.4 Fixed Assets
def getFixedAssets(self,TableID=None,Year='X'):
uri = 'http://bea.gov/api/data/?UserID='+self.apiKey+'&method=GetData&datasetname=FixedAssets&TableID='+str(TableID)+'&Year='+str(Year)+'&ResultFormat=JSON&'
r = requests.get(uri)
rJson = r.json()
columnNames = []
dates = []
try:
for element in rJson['BEAAPI']['Results']['Data']:
if element['LineDescription'] not in columnNames:
columnNames.append(element['LineDescription'])
date = convertDate(element['TimePeriod'],'A')
if date not in dates:
dates.append(date)
data = np.zeros([len(dates),len(columnNames)])
data[:] = np.nan
frame = pd.DataFrame(data,columns = columnNames, index = dates)
for element in rJson['BEAAPI']['Results']['Data']:
date = convertDate(element['TimePeriod'],'A')
frame.loc[date,element['LineDescription']] = float(element['DataValue'].replace(',',''))
frame = frame.sort_index()
note = rJson['BEAAPI']['Results']['Notes'][0]['NoteText']
return {'note':note,'data':frame}
except:
print('Error: invalid input.')
# 3.5
# def getMne(self,DirectionOfInvestment=None,OwnershipLevel=None,NonbankAffiliatesOnly=None,Classification=None,Country='all',Industry='all',Year='all',State='all',SeriesID=0):
# 3.6 Gross domestic product by industry
def getGdpByIndustry(self,TableID =None, Industry='ALL',Frequency='A',Year = 'ALL'):
uri = 'http://bea.gov/api/data/?UserID='+self.apiKey+'&method=GetData&datasetname=GDPbyIndustry&TableID='+str(TableID)+'&Industry='+str(Industry)+'&Frequency='+str(Frequency)+'&Year='+str(Year)+'&ResultFormat=JSON&'
r = requests.get(uri)
rJson = r.json()
columnNames = []
dates = []
try:
for element in rJson['BEAAPI']['Results']['Data']:
if element['IndustrYDescription'] not in columnNames:
columnNames.append(element['IndustrYDescription'])
date = convertDate(element['Year'],Frequency)
if date not in dates:
dates.append(date)
data = np.zeros([len(dates),len(columnNames)])
data[:] = np.nan
frame = pd.DataFrame(data,columns = columnNames, index = dates)
for element in rJson['BEAAPI']['Results']['Data']:
date = convertDate(element['Year'],Frequency)
frame.loc[date,element['IndustrYDescription']] = float(element['DataValue'].replace(',',''))
frame = frame.sort_index()
note = rJson['BEAAPI']['Results']['Notes'][0]['NoteText']
return {'note':note,'data':frame}
except:
print('Error: invalid input.')
# 3.7 ITA: International transactions
def getIta(self,Indicator=None,AreaOrCountry='ALL',Frequency='A',Year='ALL'):
if Indicator=='ALL' and 'ALL' in AreaOrCountry:
print('Warning: You may not select \'ALL\' for both Indicator and AreaOrCountry')
else:
uri = 'http://bea.gov/api/data/?UserID='+self.apiKey+'&method=GetData&datasetname=ita&Indicator='+str(Indicator)+'&AreaOrCountry='+str(AreaOrCountry)+'&Year='+str(Year)+'&ResultFormat=JSON&'
r = requests.get(uri)
rJson = r.json()
columnNames = []
dates = []
try:
if AreaOrCountry.lower() == 'all':
columnNames = []
dates = []
for element in rJson['BEAAPI']['Results']['Data']:
if element['AreaOrCountry'] not in columnNames:
columnNames.append(element['AreaOrCountry'])
date = convertDate(element['Year'],Frequency)
if date not in dates:
dates.append(date)
data = np.zeros([len(dates),len(columnNames)])
data[:] = np.nan
frame = pd.DataFrame(data,columns = columnNames, index = dates)
for element in rJson['BEAAPI']['Results']['Data']:
date = convertDate(element['Year'],Frequency)
if len(element['DataValue'].replace(',',''))>0:
frame.loc[date,element['AreaOrCountry']] = float(element['DataValue'].replace(',',''))
else:
frame.loc[date,element['AreaOrCountry']] = np.nan
else:
columnNames = []
dates = []
for element in rJson['BEAAPI']['Results']['Data']:
if element['Indicator'] not in columnNames:
columnNames.append(element['Indicator'])
date = convertDate(element['Year'],Frequency)
if date not in dates:
dates.append(date)
data = np.zeros([len(dates),len(columnNames)])
data[:] = np.nan
frame = pd.DataFrame(data,columns = columnNames, index = dates)
for element in rJson['BEAAPI']['Results']['Data']:
date = convertDate(element['Year'],Frequency)
if len(element['DataValue'].replace(',',''))>0:
frame.loc[date,element['Indicator']] = float(element['DataValue'].replace(',',''))
else:
frame.loc[date,element['Indicator']] = np.nan
frame = frame.sort_index()
units = rJson['BEAAPI']['Results']['Data'][0]['CL_UNIT']
mult = rJson['BEAAPI']['Results']['Data'][0]['UNIT_MULT']
if int(mult) == 3:
units = 'Thousands of '+units
elif int(mult) == 6:
units = 'Millions of '+units
elif int(mult) == 9:
units = 'Billions of '+units
if Frequency.lower() == 'q':
Notes = rJson['BEAAPI']['Results']['Notes']
for note in Notes:
if note['NoteRef'] == 'Q':
noteQ = note['NoteText']
units = units + ', '+ noteQ
return {'note':units,'data':frame}
except:
print(rJson['BEAAPI']['Error']['ErrorDetail']['Description'])
# 3.8 IIP: International investment position
def getIip(self,TypeOfInvestment=None,Component=None,Frequency='A',Year='ALL'):
uri = 'http://bea.gov/api/data/?UserID='+self.apiKey+'&method=GetData&datasetname=IIP&TypeOfInvestment='+str(TypeOfInvestment)+'&Component='+str(Component)+'&Year='+str(Year)+'&Frequency='+str(Frequency)+'&ResultFormat=JSON&'
r = requests.get(uri)
rJson = r.json()
columnNames = []
dates = []
try:
for element in rJson['BEAAPI']['Data']:
if element['TimeSeriesDescription'] not in columnNames:
columnNames.append(element['TimeSeriesDescription'])
date = convertDate(element['TimePeriod'],Frequency)
if date not in dates:
dates.append(date)
data = np.zeros([len(dates),len(columnNames)])
data[:] = np.nan
frame = pd.DataFrame(data,columns = columnNames, index = dates)
for element in rJson['BEAAPI']['Data']:
date = convertDate(element['TimePeriod'],Frequency)
if len(element['DataValue'].replace(',','')) ==0:
frame.loc[date,element['TimeSeriesDescription']] = np.nan
else:
frame.loc[date,element['TimeSeriesDescription']] = float(element['DataValue'].replace(',',''))
frame = frame.sort_index()
units = rJson['BEAAPI']['Data'][0]['CL_UNIT']
mult = rJson['BEAAPI']['Data'][0]['UNIT_MULT']
if int(mult) == 3:
units = 'Thousands of '+units
elif int(mult) == 6:
units = 'Millions of '+units
elif int(mult) == 9:
units = 'Billions of '+units
return {'note':units,'date':frame}
except:
print('Error: invalid input.')
# 3.9 Regional Income: detailed regional income and employment data sets.
def getRegionalIncome(self,TableName=None,LineCode=None,GeoFips=None,Year ='ALL'):
'''GeoFips can equal STATE
COUNTY
MSA
MIC
PORT
DIV
CSA'''
uri = 'http://bea.gov/api/data/?UserID='+self.apiKey+'&method=GetData&datasetname=RegionalIncome&TableName='+str(TableName)+'&LineCode='+str(LineCode)+'&Year='+str(Year)+'&GeoFips='+str(GeoFips)+'&ResultFormat=JSON&'
r = requests.get(uri)
rJson = r.json()
columnNames = []
dates = []
Frequency = 'A'
try:
for element in rJson['BEAAPI']['Results']['Data']:
if element['GeoName'] not in columnNames:
columnNames.append(element['GeoName'])
date = convertDate(element['TimePeriod'],Frequency)
if date not in dates:
dates.append(date)
data = np.zeros([len(dates),len(columnNames)])
data[:] = np.nan
frame = pd.DataFrame(data,columns = columnNames, index = dates)
for element in rJson['BEAAPI']['Results']['Data']:
date = convertDate(element['TimePeriod'],Frequency)
if len(element['DataValue'].replace(',','')) ==0:
frame.loc[date,element['GeoName']] = np.nan
else:
frame.loc[date,element['GeoName']] = float(element['DataValue'].replace(',',''))
frame = frame.sort_index()
units = rJson['BEAAPI']['Results']['UnitOfMeasure']
return {'notes':units,'data':frame}
except:
print('Error: invalid input.')
# 3.10 Regional product: detailed state and MSA product data sets
def getRegionalProduct(self,Component=None,IndustryId=1,GeoFips='State',Year ='ALL'):
'''GeoFips can equal either STATE or MSA'''
uri = 'http://bea.gov/api/data/?UserID='+self.apiKey+'&method=GetData&datasetname=regionalProduct&Component='+str(Component)+'&IndustryId='+str(IndustryId)+'&Year='+str(Year)+'&GeoFips='+str(GeoFips)+'&ResultFormat=JSON&'
r = requests.get(uri)
rJson = r.json()
columnNames = []
dates = []
Frequency = 'A'
try:
for element in rJson['BEAAPI']['Results']['Data']:
if element['GeoName'] not in columnNames:
columnNames.append(element['GeoName'])
date = convertDate(element['TimePeriod'],Frequency)
if date not in dates:
dates.append(date)
data = np.zeros([len(dates),len(columnNames)])
data[:] = np.nan
frame = pd.DataFrame(data,columns = columnNames, index = dates)
for element in rJson['BEAAPI']['Results']['Data']:
date = convertDate(element['TimePeriod'],Frequency)
if len(element['DataValue'].replace(',','')) ==0:
frame.loc[date,element['GeoName']] = np.nan
else:
frame.loc[date,element['GeoName']] = float(element['DataValue'].replace(',',''))
frame = frame.sort_index()
note = rJson['BEAAPI']['Results']['Data'][0]['CL_UNIT']
return {'note':note,'date':frame}
except:
print('Error: invalid input.')
# Auxiliary function.
def convertDate(dateString,Frequency):
'''Function for converting the date strings from BEA with quarter indicators into datetime format'''
if Frequency=='A':
month='01'
elif Frequency=='Q':
if dateString[-1]=='1':
month='01'
elif dateString[-1]=='2':
month='04'
elif dateString[-1]=='3':
month='07'
else:
month='10'
return datetime.datetime.strptime(dateString[0:4]+'-'+month+'-01','%Y-%m-%d') | letsgoexploring/beapy-package | beapy.py | Python | mit | 25,953 |
# vim:ts=4:sts=4:sw=4:expandtab
"""The public API of satori.events.
"""
import hashlib
import random
import traceback
from satori.objects import Object
from satori.events.misc import Namespace
from .dispatcher import Dispatcher
from .protocol import KeepAlive, Disconnect, Attach, Detach, Map, Unmap, Send, Receive
__all__ = (
'Event',
'MappingId', 'QueueId',
'Manager',
)
class Event(Namespace):
"""Describes an event.
"""
pass
class MappingId(str): # pylint: disable-msg=R0904
"""A (globally-unique) identifier of a mapping.
"""
def __new__(cls, value=None):
if value is None:
value = hashlib.md5(str(random.getrandbits(512))).hexdigest()
return str.__new__(cls, value)
class QueueId(str): # pylint: disable-msg=R0904
"""A (globally-unique) identifier of an event queue.
"""
pass
class Manager(Object):
"""Abstract. Manages Clients within a single process.
"""
def __init__(self):
self.dispatcher = Dispatcher()
def run(self):
"""Execute this Manager's event loop.
"""
handlers = {
KeepAlive: self._handleKeepAlive,
Attach: self._handleAttach,
Detach: self._handleDetach,
Map: self._handleMap,
Unmap: self._handleUnmap,
Send: self._handleSend,
Receive: self._handleReceive,
Disconnect: self._handleDisconnect,
}
while True:
client = self.scheduler.next()
if client is None:
break
try:
command = client.recvCommand()
except StopIteration:
command = Disconnect()
except Exception:
print 'Exception in client, removing from queue'
traceback.print_exc()
self.scheduler.remove(client)
else:
handlers[command.__class__](command, client)
def _handleKeepAlive(self, command, sender):
raise NotImplementedError()
def _handleDisconnect(self, command, sender):
raise NotImplementedError()
def _handleAttach(self, command, sender):
raise NotImplementedError()
def _handleDetach(self, command, sender):
raise NotImplementedError()
def _handleMap(self, command, sender):
raise NotImplementedError()
def _handleUnmap(self, command, sender):
raise NotImplementedError()
def _handleSend(self, command, sender):
raise NotImplementedError()
def _handleReceive(self, command, sender):
raise NotImplementedError()
| zielmicha/satori | satori.events/satori/events/api.py | Python | mit | 2,752 |
# Copyright (C) 2002, Thomas Hamelryck ([email protected])
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from types import StringType
from Bio.Alphabet import ProteinAlphabet
from Bio.Seq import Seq
from Bio.SCOP.Raf import to_one_letter_code
from Bio.PDB.PDBExceptions import PDBException
from Bio.PDB.Residue import Residue, DisorderedResidue
from Vector import calc_dihedral, calc_angle
__doc__="""
Polypeptide related classes (construction and representation).
Example:
>>> ppb=PPBuilder()
>>> for pp in ppb.build_peptides(structure):
>>> print pp.get_sequence()
"""
standard_aa_names=["ALA", "CYS", "ASP", "GLU", "PHE", "GLY", "HIS", "ILE", "LYS",
"LEU", "MET", "ASN", "PRO", "GLN", "ARG", "SER", "THR", "VAL",
"TRP", "TYR"]
aa1="ACDEFGHIKLMNPQRSTVWY"
aa3=standard_aa_names
d1_to_index={}
dindex_to_1={}
d3_to_index={}
dindex_to_3={}
# Create some lookup tables
for i in range(0, 20):
n1=aa1[i]
n3=aa3[i]
d1_to_index[n1]=i
dindex_to_1[i]=n1
d3_to_index[n3]=i
dindex_to_3[i]=n3
def index_to_one(index):
"""
Index to corresponding one letter amino acid name.
For example: 0 to A.
"""
return dindex_to_1[index]
def one_to_index(s):
"""
One letter code to index.
For example: A to 0.
"""
return d1_to_index[s]
def index_to_three(i):
"""
Index to corresponding three letter amino acid name.
For example: 0 to ALA.
"""
return dindex_to_3[i]
def three_to_index(s):
"""
Three letter code to index.
For example: ALA to 0.
"""
return d3_to_index[s]
def three_to_one(s):
"""
Three letter code to one letter code.
For example: ALA to A.
"""
i=d3_to_index[s]
return dindex_to_1[i]
def one_to_three(s):
"""
One letter code to three letter code.
For example: A to ALA.
"""
i=d1_to_index[s]
return dindex_to_3[i]
def is_aa(residue, standard=0):
"""
Return 1 if residue object/string is an amino acid.
@param residue: a L{Residue} object OR a three letter amino acid code
@type residue: L{Residue} or string
@param standard: flag to check for the 20 AA (default false)
@type standard: boolean
"""
if not type(residue)==StringType:
residue=residue.get_resname()
residue=residue.upper()
if standard:
return d3_to_index.has_key(residue)
else:
return to_one_letter_code.has_key(residue)
class Polypeptide(list):
"""
A polypeptide is simply a list of L{Residue} objects.
"""
def get_ca_list(self):
"""
@return: the list of C-alpha atoms
@rtype: [L{Atom}, L{Atom}, ...]
"""
ca_list=[]
for res in self:
ca=res["CA"]
ca_list.append(ca)
return ca_list
def get_phi_psi_list(self):
"""
Return the list of phi/psi dihedral angles
"""
ppl=[]
lng=len(self)
for i in range(0, lng):
res=self[i]
try:
n=res['N'].get_vector()
ca=res['CA'].get_vector()
c=res['C'].get_vector()
except:
# Some atoms are missing
# Phi/Psi cannot be calculated for this residue
ppl.append((None, None))
res.xtra["PHI"]=None
res.xtra["PSI"]=None
continue
# Phi
if i>0:
rp=self[i-1]
try:
cp=rp['C'].get_vector()
phi=calc_dihedral(cp, n, ca, c)
except:
phi=None
else:
# No phi for residue 0!
phi=None
# Psi
if i<(lng-1):
rn=self[i+1]
try:
nn=rn['N'].get_vector()
psi=calc_dihedral(n, ca, c, nn)
except:
psi=None
else:
# No psi for last residue!
psi=None
ppl.append((phi, psi))
# Add Phi/Psi to xtra dict of residue
res.xtra["PHI"]=phi
res.xtra["PSI"]=psi
return ppl
def get_tau_list(self):
"""
Return list of tau torsions angles for all 4 consecutive
Calpha atoms.
"""
ca_list=self.get_ca_list()
tau_list=[]
for i in range(0, len(ca_list)-3):
atom_list=[ca_list[i], ca_list[i+1], ca_list[i+2], ca_list[i+3]]
vector_list=map(lambda a: a.get_vector(), atom_list)
v1, v2, v3, v4=vector_list
tau=calc_dihedral(v1, v2, v3, v4)
tau_list.append(tau)
# Put tau in xtra dict of residue
res=ca_list[i+2].get_parent()
res.xtra["TAU"]=tau
return tau_list
def get_theta_list(self):
"""
Return list of theta angles for all 3 consecutive
Calpha atoms.
"""
theta_list=[]
ca_list=self.get_ca_list()
for i in range(0, len(ca_list)-2):
atom_list=[ca_list[i], ca_list[i+1], ca_list[i+2]]
vector_list=map(lambda a: a.get_vector(), atom_list)
v1, v2, v3=vector_list
theta=calc_angle(v1, v2, v3)
theta_list.append(theta)
# Put tau in xtra dict of residue
res=ca_list[i+1].get_parent()
res.xtra["THETA"]=theta
return theta_list
def get_sequence(self):
"""
Return the AA sequence.
@return: polypeptide sequence
@rtype: L{Seq}
"""
s=""
for res in self:
resname=res.get_resname()
if to_one_letter_code.has_key(resname):
resname=to_one_letter_code[resname]
else:
resname='X'
s=s+resname
seq=Seq(s, ProteinAlphabet)
return seq
def __repr__(self):
"""
Return <Polypeptide start=START end=END>, where START
and END are sequence identifiers of the outer residues.
"""
start=self[0].get_id()[1]
end=self[-1].get_id()[1]
s="<Polypeptide start=%s end=%s>" % (start, end)
return s
class _PPBuilder:
"""
Base class to extract polypeptides.
It checks if two consecutive residues in a chain
are connected. The connectivity test is implemented by a
subclass.
"""
def __init__(self, radius):
"""
@param radius: distance
@type radius: float
"""
self.radius=radius
def _accept(self, residue):
"Check if the residue is an amino acid."
if is_aa(residue):
return 1
else:
if "CA" in residue.child_dict:
#It has an alpha carbon...
#We probably need to update the hard coded list of
#non-standard residues, see function is_aa for details.
import warnings
warnings.warn("Assuming residue %s is an unknown modified "
"amino acid" % residue.get_resname())
return 1
# not a standard AA so skip
return 0
def build_peptides(self, entity, aa_only=1):
"""
Build and return a list of Polypeptide objects.
@param entity: polypeptides are searched for in this object
@type entity: L{Structure}, L{Model} or L{Chain}
@param aa_only: if 1, the residue needs to be a standard AA
@type aa_only: int
"""
is_connected=self._is_connected
accept=self._accept
level=entity.get_level()
# Decide wich entity we are dealing with
if level=="S":
model=entity[0]
chain_list=model.get_list()
elif level=="M":
chain_list=entity.get_list()
elif level=="C":
chain_list=[entity]
else:
raise PDBException("Entity should be Structure, Model or Chain.")
pp_list=[]
for chain in chain_list:
chain_it=iter(chain)
prev=chain_it.next()
pp=None
for next in chain_it:
if aa_only and not accept(prev):
prev=next
continue
if is_connected(prev, next):
if pp is None:
pp=Polypeptide()
pp.append(prev)
pp_list.append(pp)
pp.append(next)
else:
pp=None
prev=next
return pp_list
class CaPPBuilder(_PPBuilder):
"""
Use CA--CA distance to find polypeptides.
"""
def __init__(self, radius=4.3):
_PPBuilder.__init__(self, radius)
def _is_connected(self, prev, next):
for r in [prev, next]:
if not r.has_id("CA"):
return 0
n=next["CA"]
p=prev["CA"]
# Unpack disordered
if n.is_disordered():
nlist=n.disordered_get_list()
else:
nlist=[n]
if p.is_disordered():
plist=p.disordered_get_list()
else:
plist=[p]
for nn in nlist:
for pp in plist:
if (nn-pp)<self.radius:
return 1
return 0
class PPBuilder(_PPBuilder):
"""
Use C--N distance to find polypeptides.
"""
def __init__(self, radius=1.8):
_PPBuilder.__init__(self, radius)
def _is_connected(self, prev, next):
if not prev.has_id("C"):
return 0
if not next.has_id("N"):
return 0
test_dist=self._test_dist
c=prev["C"]
n=next["N"]
# Test all disordered atom positions!
if c.is_disordered():
clist=c.disordered_get_list()
else:
clist=[c]
if n.is_disordered():
nlist=n.disordered_get_list()
else:
nlist=[n]
for nn in nlist:
for cc in clist:
# To form a peptide bond, N and C must be
# within radius and have the same altloc
# identifier or one altloc blank
n_altloc=nn.get_altloc()
c_altloc=cc.get_altloc()
if n_altloc==c_altloc or n_altloc==" " or c_altloc==" ":
if test_dist(nn, cc):
# Select the disordered atoms that
# are indeed bonded
if c.is_disordered():
c.disordered_select(c_altloc)
if n.is_disordered():
n.disordered_select(n_altloc)
return 1
return 0
def _test_dist(self, c, n):
"Return 1 if distance between atoms<radius"
if (c-n)<self.radius:
return 1
else:
return 0
if __name__=="__main__":
import sys
from Bio.PDB.PDBParser import PDBParser
p=PDBParser(PERMISSIVE=1)
s=p.get_structure("scr", sys.argv[1])
ppb=PPBuilder()
print "C-N"
for pp in ppb.build_peptides(s):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]["A"]):
print pp.get_sequence()
for pp in ppb.build_peptides(s):
for phi, psi in pp.get_phi_psi_list():
print phi, psi
ppb=CaPPBuilder()
print "CA-CA"
for pp in ppb.build_peptides(s):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]):
print pp.get_sequence()
for pp in ppb.build_peptides(s[0]["A"]):
print pp.get_sequence()
| NirBenTalLab/proorigami-cde-package | cde-root/usr/lib64/python2.4/site-packages/Bio/PDB/Polypeptide.py | Python | mit | 11,950 |
# This is an example of adding a custom plugin to Projeny
# If you uncomment this then initialize a new project (for eg. "prj -p MyProject -bf")
# Then after that completes there should be a new file at UnityProjects/MyProject/MyP-win/MyExampleFile.txt
#import mtm.ioc.Container as Container
#from mtm.ioc.Inject import Inject
#class CustomProjectInitHandler:
#_varMgr = Inject('VarManager')
#def onProjectInit(self, projectName, platform):
#outputPath = self._varMgr.expand('[ProjectPlatformRoot]/MyExampleFile.txt')
#with open(outputPath, 'w') as f:
#f.write("This is a sample of configuring the generated project directory")
#Container.bind('ProjectInitHandlers').toSingle(CustomProjectInitHandler)
| modesttree/Projeny | Source/prj/plugins/ExamplePlugin.py | Python | mit | 745 |
####
#
# This module allows for easy integration with Slack (https://slack.com/). The
# messages are sent via JSON over plaintext, so don't use them for transmitting
# anything sensitive.
#
####
## Imports
import json
import urllib2
####
#
# API Description
#
# These are all of the Slack API fields supported in this module.
supported_fields = [
# The content of the message. This can be plaintext or Markdown text.
'text',
# The name of the bot posting hte webhook integration message.
'username',
# The link to an image for the bot's avatar.
'icon_url',
# An emoji to use as the bot's image. Overrides 'icon_url'.
'icon_emoji',
# Where to post the message.
'channel',
# Whether to allow Markdown formatting in the 'text' field.
'mrkdwn',
# A list of attachments.
'attachments'
]
# These fields are supported as 'attachments' subfields.
supported_attachments_subfields = [
# The title of the attachment.
'title',
# The pretext.
'pretext',
# The actual text.
'text',
# Where to allow Markdown. Valid options are: ["pretext", "text", "fields"].
'mrkdwn_in'
]
class IncomingWebhooksSender(object):
"""
The IncomingWebhooksSender is an object to facilitate using a bot to post
to the Slack team communication platform.
Slack defines an API of available calls for "incoming webhooks" here:
https://api.slack.com/incoming-webhooks
This implementation is meant to be fully-featured, but also provides high-
level methods that abstract away most of the configuration to make use in
scripts easier. (Plus it's easier to read and document.)
"""
def __init__(self, integration_url, bot_name=None, icon_url=None, icon_emoji=None, channel=None, markdown=None):
"""
Creates a IncomingWebhooksSender object to send messages to a given
Slack team.
:param integration_url: The incoming webhook integration URL. This must
be supplied at creation (or else the bot is
useless).
:param bot_name: The name the bot will use when posting.
:param icon_url: A URL to use as the bot's icon.
:param icon_emoji: A colon emoji to use as the bot's icon. This
overrides 'icon_url'.
:param channel: The default channel for this bot to post to.
:param markdown: Whether to allow markdown (defaults to True if
not specified).
"""
self.url = integration_url
self.username = bot_name
self.icon_url = icon_url
self.icon_emoji = icon_emoji
self.channel = channel
self.mrkdwn = markdown
# Check if the channel has a '#' or '@' at the beginning. If not,
# throw an error.
if not self.username and self.username is not None:
raise ValueError("Null username specified.")
if not self.channel and self.channel is not None:
raise ValueError("Null channel specified.")
if (channel is not None and not self.channel.startswith('#')
and not self.channel.startswith('@')):
raise ValueError(
"Invalid channel. Need a '#' for channels or '@' for direct " +
"messages."
)
############################################################################
# Public methods.
def send_message(self, message):
"""
Sends a message to the default channel for this webhook (which is
determined by the URL passed in during object construction).
:param message: Message text you want to send.
"""
data = {'text': str(message)}
self.__prep_and_send_data(data)
def success(self, message=None):
"""
Sends a check mark with a message (if desired).
:param message: An optional string to include.
"""
send_message = ":white_check_mark:"
if message:
send_message += " " + str(message)
data = {'text': str(send_message)}
self.__prep_and_send_data(data)
def warning(self, message=None):
"""
Sends a yellow warning sign with a message (if desired).
:param message: An optional string to include.
"""
send_message = ":warning:"
if message:
send_message += " " + str(message)
data = {'text': str(send_message)}
self.__prep_and_send_data(data)
warn = warning
def error(self, message=None):
"""
Sends a red circle with a message (if desired).
:param message: An optional string to include.
"""
send_message = ":red_circle:"
if message:
send_message += " " + str(message)
data = {'text': str(send_message)}
self.__prep_and_send_data(data)
critical = error
def send_message_to_channel(self, message, channel):
"""
Sends a message to a specific channel.
Use '#' for channels and private groups, and '@' for direct messages.
For example:
#general
#my-private-group
@someguy
:param message: Message text you want to send.
:param channel: The channel to which you want to send the data.
"""
data = {
'text': str(message),
'channel': channel
}
self.__prep_and_send_data(data)
def send_dictionary(self, dictionary):
"""
Takes any dictionary and sends it through. It will be verified first, so
the dictionary must only use the available fields in the Slack API.
Note that with this method, you can send any message with any name to
any channel, et cetera.
:param dictionary: A dictionary of values you want to send.
"""
self.__prep_and_send_data(dictionary)
############################################################################
# Private methods.
def __prep_and_send_data(self, data):
"""
Takes a dictionary and prepares it for transmission, then sends it.
:param data: A map of Slack API fields to desired values.
:type data: dict
"""
data = self.__update_data(data)
self.__send_json(self.__prep_json_from_data(data))
def __update_data(self, data):
"""
Automatically updates the contents of the 'data' object with any fields
that are set in the object but weren't specified in the data. This makes
method calls simpler.
This method will also verify the data in the dictionary.
:param data: A map of Slack API fields to desired values.
:type data: dict
:returns: A copy of the `data` dictionary, but with extra values if they
were specified in the object constructor.
"""
# Duplicate the data to make this method non-destructive.
return_data = dict(data)
# Iterate over each of the supported fields.
for field in supported_fields:
# Let's see if we have a value defined for that attribute.
# Note that this requires the object's attributes to have the same
# name as the Slack API fields.
try:
value = getattr(self, field)
except AttributeError:
# Didn't have it, but let's not throw an error. Just continue.
continue
# If the field isn't already in the data, add it.
# This ensure that overriding calls are not overridden themselves.
if value is not None and not field in return_data:
return_data[field] = value
# Ensure the dictionary is good-to-go.
self.__verify_data(data)
return return_data
def __verify_data(self, data):
"""
Verifies that all of the fields in the `data` dictionary are valid. If
any field is found that isn't considered a supported field, an error is
raised.
This also checks inside the list of attachments (if it's present) to be
sure nothing is wrong.
:param data: A map of Slack API fields to desired values.
:type data: dict
"""
# Check it's a dictionary.
if not isinstance(data, dict):
raise ValueError("Received a non-dictionary form of data.")
# Iterate over every key.
for key in data:
# If the key isn't supported, that's a problem!
if not key in supported_fields:
raise ValueError("Bad key in data: {}".format(key))
# The 'attachments' key should contain a list.
if key == 'attachments':
# Verify it's really a list.
if not isinstance(data[key], list):
raise ValueError("'attachments' field in data must be a list.")
# Ensure there are no rogue values.
for subkey in data[key]:
if not subkey in supported_attachments_subfields:
raise ValueError("Bad key in 'attachments': {}".format(subkey))
def __prep_json_from_data(self, data):
"""
Given data, this updates the contents and then gives back the string
form of the JSON data.
:param data: A map of Slack API fields to desired values.
:type data: dict
:returns: A string form of the dictionary.
"""
# Update all the data.
data = self.__update_data(data)
# Return the JSON string form of the data.
return self.__get_json_from_data(data)
def __get_json_from_data(self, data):
"""
Just gives back a string form of the data. This is just a wrapper so the
'json' module doesn't have to be loaded in addition to this one.
:param data: A map of Slack API fields to desired values.
:type data: dict
:returns: The string format returned by `json.dumps(data)`.
"""
return json.dumps(data)
def __send_json(self, data):
"""
Sends the given JSON data across an HTTP connection. This does not check
if the data is valid. This is by design to ensure that if I ever mess
something up with the `supported_fields` list or something, the object
can still be used to send anything.
:param data: JSON representation of a map of Slack API fields to desired
values.
:type data: str
"""
# Form the HTTP PUT request.
request = urllib2.Request(self.url, data)
# Send the data!
urllib2.urlopen(request)
| univ-of-utah-marriott-library-apple/management_tools | management_tools/slack.py | Python | mit | 10,760 |
from __future__ import division
def run () :
import libtbx.load_env # import dependency
for module in libtbx.env.module_list :
print module.name
if (__name__ == "__main__") :
run()
| hickerson/bbn | fable/fable_sources/libtbx/command_line/list_modules.py | Python | mit | 194 |
from django.core.management.base import BaseCommand
from openkamer.verslagao import create_verslagen_algemeen_overleg
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('year', nargs='+', type=int)
parser.add_argument('--max', type=int, help='The max number of documents to create, used for testing.', default=None)
def handle(self, *args, **options):
year = options['year'][0]
max_n = options['max']
create_verslagen_algemeen_overleg(year, max_n, skip_if_exists=False)
| openkamer/openkamer | openkamer/management/commands/create_verslagen_ao.py | Python | mit | 553 |
"""group_mod tests."""
from unittest import TestCase
from pyof.v0x04.controller2switch.group_mod import GroupMod
from pyof.v0x04.common.action import (
ActionExperimenter, ActionSetField, ListOfActions)
from pyof.v0x04.common.flow_match import OxmClass, OxmOfbMatchField, OxmTLV
from pyof.v0x04.common.port import PortNo
from pyof.v0x04.controller2switch.common import Bucket
from pyof.v0x04.controller2switch.group_mod import ListOfBuckets
class TestGroupMod(TestCase):
"""group_mod tests."""
def test_min_size(self):
"""Test minimum struct size."""
self.assertEqual(16, GroupMod().get_size())
class TestBucket(TestCase):
"""bucket tests."""
def test_min_size(self):
"""Test minimum struct size."""
self.assertEqual(16, Bucket().get_size())
class TestListBuckets(TestCase):
def setUp(self):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUp()
self.oxmtlv1 = OxmTLV(oxm_class=OxmClass.OFPXMC_OPENFLOW_BASIC,
oxm_field=OxmOfbMatchField.OFPXMT_OFB_METADATA,
oxm_hasmask=False,
oxm_value=b'\x00\x00\x00\x00\x00\x00\x00\x01')
self.oxmtlv2 = OxmTLV(oxm_class=OxmClass.OFPXMC_OPENFLOW_BASIC,
oxm_field=OxmOfbMatchField.OFPXMT_OFB_METADATA,
oxm_hasmask=False,
oxm_value=b'\x00\x00\x00\x00\x00\x00\x00\x02')
self.action1 = ActionSetField(field=self.oxmtlv1)
self.action2 = ActionSetField(field=self.oxmtlv2)
self.action3 = ActionExperimenter(length=16, experimenter=0x00002320,
body=b'\x00\x0e\xff\xf8\x28\x00\x00\x00')
self.action4 = ActionExperimenter(length=16, experimenter=0x00001223,
body=b'\x00\x0e\xff\xff\x28\x00\x00\x00')
def test_bucket_list(self):
bucket1 = Bucket(length=48, weight=1, watch_port=PortNo.OFPP_ANY,
watch_group=PortNo.OFPP_ANY,
actions=ListOfActions([self.action1, self.action2]))
bucket2 = Bucket(length=80, weight=2, watch_port=PortNo.OFPP_ANY,
watch_group=PortNo.OFPP_ANY,
actions=ListOfActions([self.action1, self.action2,
self.action3, self.action4]))
bucket3 = Bucket(length=48, weight=3, watch_port=PortNo.OFPP_ANY,
watch_group=PortNo.OFPP_ANY,
actions=ListOfActions([self.action3, self.action4]))
# Packing buckets
buckets = ListOfBuckets([bucket1, bucket2, bucket3])
buff = packed_buff = buckets.pack()
# Unpacking buckets bytes
unpacked_buckets = ListOfBuckets()
unpacked_buckets.unpack(buff)
self.assertEqual(len(unpacked_buckets), 3)
self.assertEqual(unpacked_buckets[0].length, 48)
self.assertEqual(unpacked_buckets[0].weight, 1)
self.assertEqual(len(unpacked_buckets[0].actions), 2)
self.assertEqual(unpacked_buckets[0].actions[0].field.oxm_value,
self.oxmtlv1.oxm_value)
self.assertEqual(unpacked_buckets[0].actions[1].field.oxm_value,
self.oxmtlv2.oxm_value)
self.assertEqual(unpacked_buckets[1].length, 80)
self.assertEqual(unpacked_buckets[1].weight, 2)
self.assertEqual(len(unpacked_buckets[1].actions), 4)
self.assertEqual(unpacked_buckets[1].actions[0].field.oxm_value,
self.oxmtlv1.oxm_value)
self.assertEqual(unpacked_buckets[1].actions[1].field.oxm_value,
self.oxmtlv2.oxm_value)
self.assertEqual(unpacked_buckets[1].actions[2].body,
self.action3.body)
self.assertEqual(unpacked_buckets[1].actions[3].body,
self.action4.body)
self.assertEqual(unpacked_buckets[2].length, 48)
self.assertEqual(unpacked_buckets[2].weight, 3)
self.assertEqual(len(unpacked_buckets[2].actions), 2)
self.assertEqual(unpacked_buckets[2].actions[0].body,
self.action3.body)
self.assertEqual(unpacked_buckets[2].actions[1].body,
self.action4.body)
def test_buckets_one_item(self):
bucket1 = Bucket(length=48, weight=1, watch_port=PortNo.OFPP_ANY,
watch_group=PortNo.OFPP_ANY,
actions=ListOfActions([self.action1, self.action2]))
# Packing buckets
buckets = ListOfBuckets([bucket1])
buff = packed_buff = buckets.pack()
# Unpacking buckets bytes
unpacked_buckets = ListOfBuckets()
unpacked_buckets.unpack(buff)
self.assertEqual(len(unpacked_buckets), 1)
self.assertEqual(unpacked_buckets[0].length, 48)
self.assertEqual(unpacked_buckets[0].weight, 1)
self.assertEqual(len(unpacked_buckets[0].actions), 2)
self.assertEqual(unpacked_buckets[0].actions[0].field.oxm_value,
self.oxmtlv1.oxm_value)
self.assertEqual(unpacked_buckets[0].actions[1].field.oxm_value,
self.oxmtlv2.oxm_value)
def test_buckets_no_action(self):
bucket1 = Bucket(length=48, weight=1, watch_port=PortNo.OFPP_ANY,
watch_group=PortNo.OFPP_ANY,
actions=ListOfActions([self.action1]))
# Packing buckets
buckets = ListOfBuckets([bucket1])
buff = packed_buff = buckets.pack()
# Unpacking buckets bytes
unpacked_buckets = ListOfBuckets()
unpacked_buckets.unpack(buff)
self.assertEqual(len(unpacked_buckets), 1)
self.assertEqual(unpacked_buckets[0].length, 48)
self.assertEqual(unpacked_buckets[0].weight, 1)
self.assertEqual(len(unpacked_buckets[0].actions), 1)
self.assertEqual(unpacked_buckets[0].actions[0].field.oxm_value,
self.oxmtlv1.oxm_value)
| kytos/python-openflow | tests/unit/v0x04/test_controller2switch/test_group_mod.py | Python | mit | 6,137 |
#-*- coding: utf-8 -*-
""" EOSS catalog system
Implementation of urthecast catalog access
https://developers.urthecast.com/sign-in
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "[email protected]"
__status__ = "Production"
import requests
from manage import ICatalog
from model.plain_models import Catalog_Dataset
from shapely.geometry import Polygon
from shapely.wkt import dumps as wkt_dumps
class UrthecastCatalog(ICatalog):
def __init__(self):
# api_key = os.environ['UC_API_KEY']
# api_secret = os.environ['UC_API_SECRET']
self.api_key = 'B47EAFC6559748D4AD62'
self.api_secret = 'D796AF0410DB4580876C66B72F790192'
self.url = 'https://api.urthecast.com/v1/archive/scenes'
def find(self, platforms, aoi, date_start, date_stop, cloud_ratio=0.2):
url = self.url
poly = Polygon(aoi)
geometry = wkt_dumps(poly)
params = {
'api_key': self.api_key,
'api_secret': self.api_secret,
'cloud_coverage_lte': cloud_ratio,
'acquired_gte': date_start.isoformat(),
'acquired_lte': date_stop.isoformat(),
'geometry_intersects': geometry,
# 'sensor_platform': 'deimos-1,deimos-2,theia'
'sensor_platform': ",".join(platforms)
}
result = requests.get(url, params=params)
datasets = set()
for r in result.json()['payload']:
ds = Catalog_Dataset()
ds.entity_id = r['owner_scene_id']
ds.acq_time = r['acquired']
ds.sensor = r['sensor_platform']
# ds.tile_identifier = r['tile_identifier']
ds.clouds = r['cloud_coverage']
# ds.level = r['level']
if int(ds.clouds) > 0:
ds.daynight = 'day'
else:
ds.daynight = 'night'
datasets.add(ds)
return datasets
def register(self, ds):
raise Exception('Cannot register dataset in repository %s' % self.__class__.__name__)
| eoss-cloud/madxxx_catalog_api | catalog/manage/urthecastcatalog.py | Python | mit | 2,226 |
import sqlalchemy
class Connection(object):
def __init__(self, auth):
url = self._build_url(auth)
self.engine = sqlalchemy.create_engine(url, convert_unicode=True)
self.connection = None
self.transaction = None
def connect(self):
self.connection = self.engine.connect()
def disconnect(self):
self.connection.close()
self.connection = None
def execute(self, sql_query):
if not self.is_connected():
self.connect()
return self.connection.execute(sql_query)
def begin_transaction(self):
if not self.is_connected():
self.connect()
self.transaction = self.connection.begin()
def end_transaction(self):
self.transaction.commit()
self.transaction = None
def is_connected(self):
return self.connection is not None
def _build_url(self, auth):
def get_value(data, keys, default=None):
result = None
for k in keys:
result = data.get(k)
if result is not None:
return result
if result is None:
return default
db_type = get_value(auth, ['type', 'db_type'], 'pgsql')
if db_type == 'mysql':
default_port = 3306
elif db_type == 'pgsql':
default_port = 5432
ctx = (get_value(auth, ['user']),
get_value(auth, ['passwd', 'password', 'pass']),
get_value(auth, ['host', 'server'], 'localhost'),
get_value(auth, ['port'], default_port),
get_value(auth, ['database', 'db_name', 'database_name', 'db']))
if db_type == 'pgsql':
url_tpl = 'postgresql+psycopg2://%s:%s@%s:%s/%s' % ctx
elif db_type == 'mysql':
url_tpl = 'mysql+mysqldb://%s:%s@%s:%s/%s' % ctx
else:
RaiseValue('db_type must be eighter "mysql" or "pgsql"')
return url_tpl % auth
| trackingwire/witchcraft | witchcraft/connection.py | Python | mit | 2,023 |
#dice guideline
from random import randint
#FOR program module
moduleName="Dice Module"
#moduleName="The Lusty Orc Dice Module"
#FOR dice rolls
#mulligan_yes_or_no=True not implemented
the_lowest_possible_roll=1
the_number_of_sides_on_a_die=6
the_number_of_rolls_in_a_set=4
reroll_if_equal_or_less=0
number_of_lowest_rolls_to_drop_in_a_set=1
number_of_highest_rolls_to_drop_in_a_set=1
#Rules for attribute rolls, Do not alter anything past this line
'''
def dicerollbatch():
WorkingBatch=[]
for setNumber in range(self.dieSets):
rolls=[]
dropped=[]
#creates another recursion loop that runs for die_number_per_set times and addes a random roll result to the rolls and dropped lists using the x variable, each roll is between the values lowest_possible_roll and number_of_sides_on_die
for roll in range(self.setDieNumber):
r=(randint(lowest_possible_roll, self.dieSides))
rolls.append(r)
dropped.append(r)
#after the rolls are done, normally 4 of them, the set is added to the rollBatch variable container as well as adding to the dropped sets container
self.rollBatch.append(rolls)
dropped.remove(min(dropped))
self.droppedBatch.append(dropped)
#after the roll sets have been added to the batch the batch count is incremented up one
self.batch+=1
#after the numbers have been generated and appended to the batch the sets are printed out vertically
print("number of batch attempts:"+str(self.batch)+"\nStat Rolls")
for batchSets in range(len(self.rollBatch)):
at=0
for batchRolls in range(len(self.droppedBatch[batchSets])):at+=self.droppedBatch[batchSets][batchRolls]
self.attributeResults.append(at)
print((self.rollBatch[batchSets]), (self.attributeResults[batchSets]))
'''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#test_4_rolls_print
from random import randint
The_number_of_rolls_in_a_set=4
The_number_of_sides_on_a_die=6
the_lowest_possible_roll=1
#the followign 6 lines of code roll 4 numbers between 1 and 6 and then prints them out vertically
def roll_set_of_dice():
for roll in range(The_number_of_rolls_in_a_set):
roll_result=(randint(the_lowest_possible_roll, The_number_of_sides_on_a_die))
print("%s" % roll_result)
return
#roll_set_of_dice()
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#test_4_rolls_output_to_list_print_list
from random import randint
the_number_of_rolls_in_a_set=4
the_number_of_sides_on_a_die=6
the_lowest_possible_roll=1
#the following 8 lines of code rolls 4 6 sided dice and copies the reuslts to a lsit and then print's the list
def roll_set_of_dice():
set_of_dice_rolls=[]
for roll in range(the_number_of_rolls_in_a_set):
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
set_of_dice_rolls.append(roll_result)
print(set_of_dice_rolls)
return
#roll_set_of_dice()
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#test_4_rolls with reroll and output
from random import randint
the_number_of_rolls_in_a_set=4
the_number_of_sides_on_a_die=6
the_lowest_possible_roll=1
reroll_if_equal_or_less=5
#rolls 4 dice between 1 and 6 and rerolls all results that are 5 or less then outputs them to roll_set_of_dice
def roll_set_of_dice():
set_of_dice_rolls=[]
for roll in range(the_number_of_rolls_in_a_set):
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
while roll_result<=reroll_if_equal_or_less:
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
print("reroll %s" %roll_result)
else:set_of_dice_rolls.append(roll_result)
print(set_of_dice_rolls)
return
#roll_set_of_dice()
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#test_4_rolls if drop lowest or highest is greater than zero copy set_of_dice_rolls to set_of_dice_rolls_adjusted
from random import randint
the_number_of_rolls_in_a_set=4
the_number_of_sides_on_a_die=6
the_lowest_possible_roll=1
reroll_if_equal_or_less=0
number_of_lowest_rolls_to_drop_in_a_set=1
number_of_highest_rolls_to_drop_in_a_set=0
#rolls 4 dice between 1 and 6 and rerolls all results that are 5 or less then outputs them to roll_set_of_dice
def roll_set_of_dice():
set_of_dice_rolls=[]
set_of_dice_rolls_adjusted=[]
for roll in range(the_number_of_rolls_in_a_set):
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
while roll_result<=reroll_if_equal_or_less:
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
print("reroll %s" %roll_result)
else:set_of_dice_rolls.append(roll_result)
if (number_of_lowest_rolls_to_drop_in_a_set>0) or (number_of_highest_rolls_to_drop_in_a_set>0):
for roll_results in range(len(set_of_dice_rolls)):
set_of_dice_rolls_adjusted.append(set_of_dice_rolls[roll_results])
print(set_of_dice_rolls_adjusted)
print(set_of_dice_rolls)
return
#roll_set_of_dice()
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#test_4_rolls drop highest and lowest
from random import randint
the_number_of_rolls_in_a_set=4
the_number_of_sides_on_a_die=6
the_lowest_possible_roll=1
reroll_if_equal_or_less=0
number_of_lowest_rolls_to_drop_in_a_set=1
number_of_highest_rolls_to_drop_in_a_set=1
#rolls 4 dice between 1 and 6 and rerolls all results that are 5 or less then outputs them to roll_set_of_dice
def roll_set_of_dice():
set_of_dice_rolls=[]
set_of_dice_rolls_adjusted=[]
for roll in range(the_number_of_rolls_in_a_set):
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
while roll_result<=reroll_if_equal_or_less:
roll_result=(randint(the_lowest_possible_roll, the_number_of_sides_on_a_die))
print("reroll %s" %roll_result)
else:set_of_dice_rolls.append(roll_result)
for roll_results in range(len(set_of_dice_rolls)):
set_of_dice_rolls_adjusted.append(set_of_dice_rolls[roll_results])
print("\n////Break////\n%s\n%s\n////Break////\n" % (set_of_dice_rolls, set_of_dice_rolls_adjusted))
if (number_of_lowest_rolls_to_drop_in_a_set>0) or (number_of_highest_rolls_to_drop_in_a_set>0):
if number_of_lowest_rolls_to_drop_in_a_set>0:
drop_counter=0
drop_counter+=number_of_lowest_rolls_to_drop_in_a_set
#print(set_of_dice_rolls_adjusted)
#print(drop_counter)
while drop_counter>0:
set_of_dice_rolls_adjusted.remove(min(set_of_dice_rolls_adjusted))
#print(set_of_dice_rolls_adjusted)
drop_counter-=1
#print(drop_counter)
if number_of_highest_rolls_to_drop_in_a_set>0:
drop_counter=0
drop_counter+=number_of_highest_rolls_to_drop_in_a_set
#print(set_of_dice_rolls_adjusted)
#print(drop_counter)
while drop_counter>0:
set_of_dice_rolls_adjusted.remove(max(set_of_dice_rolls_adjusted))
#print(set_of_dice_rolls_adjusted)
drop_counter-=1
#print(drop_counter)
print("\n////Break////\n%s\n%s\n////Break////\n" % (set_of_dice_rolls, set_of_dice_rolls_adjusted))
return
roll_set_of_dice()
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' | RZMC/dndCharGen | dice guideline.py | Python | mit | 7,477 |
import os,sys
#import ez_setup
#ez_setup.use_setuptools()
from setuptools import setup, find_packages
import numpy
from Cython.Build import cythonize
# not compatible with distribute
#from distutils.extension import Extension
#from Cython.Distutils import build_ext
version = '0.7.0'
README = os.path.join(os.path.dirname(__file__), 'README')
long_description = open(README).read() + '\n\n'
classifiers = """\
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Operating System :: Unix
"""
entry_points = """
[console_scripts]
seqview = seqtool.frontend.command:seqview
get_genbank = seqtool.frontend.command:get_genbank
geneview = seqtool.frontend.command:geneview
tssview = seqtool.frontend.command:tssview
primers = seqtool.frontend.command:primers
sequencing = seqtool.frontend.command:sequencing
seqtooldb = seqtool.db:seqdb_command
abiview = seqtool.frontend.command:abiview
convert_bs = seqtool.bowtie.convert_bs:main
virtualpcr = seqtool.bowtie.virtualpcr:main
primer = seqtool.nucleotide.primer:main
probe = seqtool.nucleotide.primer:probe
bsa_figure = seqtool.script.bsa_figure:main
pdesign = seqtool.script.pdesign:main
bisulfite = seqtool.script.bisulfite:main
rpm = seqtool.script.cf:main
translate = seqtool.script.translate:main
server = seqtool.server.server:main
"""
setup(
name='seqtool',
version=version,
description=("small scripts visualizing PCR products for molecular biology experimetnts."),
classifiers = filter(None, classifiers.split("\n")),
keywords='pcr biology bisulfite',
author='mizuy',
author_email='[email protected]',
url='http://github.com/mizuy/seqtool',
license='MIT',
packages=find_packages(),
install_requires=['biopython','numpy', 'sqlalchemy', 'cython', 'appdirs', 'mysql-connector-python'],
test_suite='nose.collector',
ext_modules = cythonize('seqtool/seqtool/nucleotide/sw_c.pyx'),
include_dirs = [numpy.get_include()],
entry_points=entry_points
)
| mizuy/seqtool | setup.py | Python | mit | 2,123 |
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="bgcolor", parent_name="pie.hoverlabel", **kwargs):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/pie/hoverlabel/_bgcolor.py | Python | mit | 499 |
###############
### imports ###
###############
from fabric.api import cd, env, lcd, put, prompt, local, sudo
from fabric.contrib.files import exists
##############
### config ###
##############
local_app_dir = './flask_project'
local_config_dir = './config'
remote_app_dir = '/home/www'
remote_git_dir = '/home/git'
remote_flask_dir = remote_app_dir + '/flask_project'
remote_nginx_dir = '/etc/nginx/sites-enabled'
remote_supervisor_dir = '/etc/supervisor/conf.d'
env.hosts = ['add_ip_or_domain'] # replace with IP address or hostname
env.user = 'newuser'
# env.password = 'blah!'
#############
### tasks ###
#############
def install_requirements():
""" Install required packages. """
sudo('apt-get update')
sudo('apt-get install -y python')
sudo('apt-get install -y python-pip')
sudo('apt-get install -y python-virtualenv')
sudo('apt-get install -y nginx')
sudo('apt-get install -y gunicorn')
sudo('apt-get install -y supervisor')
sudo('apt-get install -y git')
def install_flask():
"""
1. Create project directories
2. Create and activate a virtualenv
3. Copy Flask files to remote host
"""
if exists(remote_app_dir) is False:
sudo('mkdir ' + remote_app_dir)
if exists(remote_flask_dir) is False:
sudo('mkdir ' + remote_flask_dir)
with lcd(local_app_dir):
with cd(remote_app_dir):
sudo('virtualenv env')
sudo('source env/bin/activate')
sudo('pip install Flask==0.10.1')
with cd(remote_flask_dir):
put('*', './', use_sudo=True)
def configure_nginx():
"""
1. Remove default nginx config file
2. Create new config file
3. Setup new symbolic link
4. Copy local config to remote config
5. Restart nginx
"""
sudo('/etc/init.d/nginx start')
if exists('/etc/nginx/sites-enabled/default'):
sudo('rm /etc/nginx/sites-enabled/default')
if exists('/etc/nginx/sites-enabled/flask_project') is False:
sudo('touch /etc/nginx/sites-available/flask_project')
sudo('ln -s /etc/nginx/sites-available/flask_project' +
' /etc/nginx/sites-enabled/flask_project')
with lcd(local_config_dir):
with cd(remote_nginx_dir):
put('./flask_project', './', use_sudo=True)
sudo('/etc/init.d/nginx restart')
def configure_supervisor():
"""
1. Create new supervisor config file
2. Copy local config to remote config
3. Register new command
"""
if exists('/etc/supervisor/conf.d/flask_project.conf') is False:
with lcd(local_config_dir):
with cd(remote_supervisor_dir):
put('./flask_project.conf', './', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl update')
def configure_git():
"""
1. Setup bare Git repo
2. Create post-receive hook
"""
if exists(remote_git_dir) is False:
sudo('mkdir ' + remote_git_dir)
with cd(remote_git_dir):
sudo('mkdir flask_project.git')
with cd('flask_project.git'):
sudo('git init --bare')
with lcd(local_config_dir):
with cd('hooks'):
put('./post-receive', './', use_sudo=True)
sudo('chmod +x post-receive')
def run_app():
""" Run the app! """
with cd(remote_flask_dir):
sudo('supervisorctl start flask_project')
def deploy():
"""
1. Copy new Flask files
2. Restart gunicorn via supervisor
"""
with lcd(local_app_dir):
local('git add -A')
commit_message = prompt("Commit message?")
local('git commit -am "{0}"'.format(commit_message))
local('git push production master')
sudo('supervisorctl restart flask_project')
def rollback():
"""
1. Quick rollback in case of error
2. Restart gunicorn via supervisor
"""
with lcd(local_app_dir):
local('git revert master --no-edit')
local('git push production master')
sudo('supervisorctl restart flask_project')
def status():
""" Is our app live? """
sudo('supervisorctl status')
def create():
install_requirements()
install_flask()
configure_nginx()
configure_supervisor()
configure_git() | kaiocesar/automation-fab | fabfile.py | Python | mit | 4,325 |
from django.contrib import admin
from api_boilerplate.models import ApiKey
admin.site.register(ApiKey)
| kippt/django-api-boilerplate | api_boilerplate/admin.py | Python | mit | 105 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-24 22:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('samples', '0004_sample'),
]
operations = [
migrations.CreateModel(
name='CollectionType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('method_name', models.CharField(max_length=255, verbose_name='Método de coleta')),
('is_primary', models.BooleanField(default=True, verbose_name='Principal?')),
],
),
migrations.AlterField(
model_name='fluvaccine',
name='was_applied',
field=models.NullBooleanField(verbose_name='Recebeu vacina contra gripe?'),
),
migrations.AddField(
model_name='sample',
name='collection_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='samples.CollectionType'),
),
]
| gcrsaldanha/fiocruz | samples/migrations/0005_auto_20170424_1903.py | Python | mit | 1,172 |
# -*- coding:utf-8 -*-
import os
def split_screen():
val = os.system("xrandr --output HDMI1 --right-of VGA1 --auto")
def run_sensorcmd_web():
path = "/home/suyf/swork/git/sensorcmd-web/web/"
os.chdir(path)
val = os.system("./apprun.py")
def run_cas():
path = "/home/suyf/swork/git/cas/web/"
os.chdir(path)
val = os.system("./casrun.py")
def run_windpower():
path = "/home/suyf/swork/git/windpower/web"
os.chdir(path)
val = os.system("python apprun.py")
if __name__ == '__main__':
choose = input("Please choose your project(int):\n 1:cas\n 2:sensorcmd-web\n 3:windpower\n")
if int(choose) == 1:
cas_val = run_cas()
print val
if int(choose) == 2:
cas_val = run_cas()
val = run_sensorcmd_web()
print val,cas_val
if int(choose) == 3:
val = run_windpower()
print val | myyyy/wiki | shell/work_start.py | Python | mit | 791 |
"""
from https://codelab.interviewbit.com/problems/symmetry/
"""
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param A : root node of tree
# @return an integer
def isSymmetric(self, A):
return self.are_symmetric(A.left, A.right)
def are_symmetric(self, tree1, tree2):
if tree1 is None or tree2 is None: # when checking the bottom
return tree1 == tree2
return tree1.val == tree2.val and self.are_symmetric(tree1.left, tree2.right) and self.are_symmetric(
tree1.right, tree2.left)
# Could
| JuanCTorres/interview-prep-solutions | codelab/symmetric_trees.py | Python | mit | 694 |
from PyQt4 import QtGui, QtCore
from PyQt4 import uic
form_class = uic.loadUiType('logWindow.ui')[0]
class LogWindow(QtGui.QDialog, form_class):
def __init__(self, parent=None):
super(LogWindow, self).__init__(parent)
self.setup_ui()
def setup_ui(self):
self.setupUi(self)
self.show()
def addLog(self, msg):
self.list_log.addItem(msg)
| RaynoldKim/MyTrade | LogWindow.py | Python | mit | 394 |
import _plotly_utils.basevalidators
class DataValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="data", parent_name="layout.template", **kwargs):
super(DataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Data"),
data_docs=kwargs.pop(
"data_docs",
"""
area
A tuple of :class:`plotly.graph_objects.Area`
instances or dicts with compatible properties
barpolar
A tuple of
:class:`plotly.graph_objects.Barpolar`
instances or dicts with compatible properties
bar
A tuple of :class:`plotly.graph_objects.Bar`
instances or dicts with compatible properties
box
A tuple of :class:`plotly.graph_objects.Box`
instances or dicts with compatible properties
candlestick
A tuple of
:class:`plotly.graph_objects.Candlestick`
instances or dicts with compatible properties
carpet
A tuple of :class:`plotly.graph_objects.Carpet`
instances or dicts with compatible properties
choroplethmapbox
A tuple of
:class:`plotly.graph_objects.Choroplethmapbox`
instances or dicts with compatible properties
choropleth
A tuple of
:class:`plotly.graph_objects.Choropleth`
instances or dicts with compatible properties
cone
A tuple of :class:`plotly.graph_objects.Cone`
instances or dicts with compatible properties
contourcarpet
A tuple of
:class:`plotly.graph_objects.Contourcarpet`
instances or dicts with compatible properties
contour
A tuple of
:class:`plotly.graph_objects.Contour` instances
or dicts with compatible properties
densitymapbox
A tuple of
:class:`plotly.graph_objects.Densitymapbox`
instances or dicts with compatible properties
funnelarea
A tuple of
:class:`plotly.graph_objects.Funnelarea`
instances or dicts with compatible properties
funnel
A tuple of :class:`plotly.graph_objects.Funnel`
instances or dicts with compatible properties
heatmapgl
A tuple of
:class:`plotly.graph_objects.Heatmapgl`
instances or dicts with compatible properties
heatmap
A tuple of
:class:`plotly.graph_objects.Heatmap` instances
or dicts with compatible properties
histogram2dcontour
A tuple of :class:`plotly.graph_objects.Histogr
am2dContour` instances or dicts with compatible
properties
histogram2d
A tuple of
:class:`plotly.graph_objects.Histogram2d`
instances or dicts with compatible properties
histogram
A tuple of
:class:`plotly.graph_objects.Histogram`
instances or dicts with compatible properties
image
A tuple of :class:`plotly.graph_objects.Image`
instances or dicts with compatible properties
indicator
A tuple of
:class:`plotly.graph_objects.Indicator`
instances or dicts with compatible properties
isosurface
A tuple of
:class:`plotly.graph_objects.Isosurface`
instances or dicts with compatible properties
mesh3d
A tuple of :class:`plotly.graph_objects.Mesh3d`
instances or dicts with compatible properties
ohlc
A tuple of :class:`plotly.graph_objects.Ohlc`
instances or dicts with compatible properties
parcats
A tuple of
:class:`plotly.graph_objects.Parcats` instances
or dicts with compatible properties
parcoords
A tuple of
:class:`plotly.graph_objects.Parcoords`
instances or dicts with compatible properties
pie
A tuple of :class:`plotly.graph_objects.Pie`
instances or dicts with compatible properties
pointcloud
A tuple of
:class:`plotly.graph_objects.Pointcloud`
instances or dicts with compatible properties
sankey
A tuple of :class:`plotly.graph_objects.Sankey`
instances or dicts with compatible properties
scatter3d
A tuple of
:class:`plotly.graph_objects.Scatter3d`
instances or dicts with compatible properties
scattercarpet
A tuple of
:class:`plotly.graph_objects.Scattercarpet`
instances or dicts with compatible properties
scattergeo
A tuple of
:class:`plotly.graph_objects.Scattergeo`
instances or dicts with compatible properties
scattergl
A tuple of
:class:`plotly.graph_objects.Scattergl`
instances or dicts with compatible properties
scattermapbox
A tuple of
:class:`plotly.graph_objects.Scattermapbox`
instances or dicts with compatible properties
scatterpolargl
A tuple of
:class:`plotly.graph_objects.Scatterpolargl`
instances or dicts with compatible properties
scatterpolar
A tuple of
:class:`plotly.graph_objects.Scatterpolar`
instances or dicts with compatible properties
scatter
A tuple of
:class:`plotly.graph_objects.Scatter` instances
or dicts with compatible properties
scatterternary
A tuple of
:class:`plotly.graph_objects.Scatterternary`
instances or dicts with compatible properties
splom
A tuple of :class:`plotly.graph_objects.Splom`
instances or dicts with compatible properties
streamtube
A tuple of
:class:`plotly.graph_objects.Streamtube`
instances or dicts with compatible properties
sunburst
A tuple of
:class:`plotly.graph_objects.Sunburst`
instances or dicts with compatible properties
surface
A tuple of
:class:`plotly.graph_objects.Surface` instances
or dicts with compatible properties
table
A tuple of :class:`plotly.graph_objects.Table`
instances or dicts with compatible properties
treemap
A tuple of
:class:`plotly.graph_objects.Treemap` instances
or dicts with compatible properties
violin
A tuple of :class:`plotly.graph_objects.Violin`
instances or dicts with compatible properties
volume
A tuple of :class:`plotly.graph_objects.Volume`
instances or dicts with compatible properties
waterfall
A tuple of
:class:`plotly.graph_objects.Waterfall`
instances or dicts with compatible properties
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/layout/template/_data.py | Python | mit | 8,076 |
from apscheduler.schedulers.blocking import BlockingScheduler
from manage import update_live_points, update_for_gw, update_test, update_epl_players
from tweetbot import TweetBot
sched = BlockingScheduler()
# @sched.scheduled_job('cron', day_of_week='wed-sun', hour='22', minute='50-59/1', timezone='America/New_York')
# def test_job():
# update_test()
# @sched.scheduled_job('cron', day_of_week='fri', hour='15', timezone='America/New_York')
# def update_gw_data():
# update_for_gw()
# @sched.scheduled_job('cron', day_of_week='fri', hour='15-17', minute='10-59/10', timezone='America/New_York')
# def update_live():
# update_live_points()
@sched.scheduled_job('cron', hour='20', minute='32', timezone='America/New_York')
def update_gw_data():
update_epl_players()
twt_bot = TweetBot()
twt_bot.tweet_price_changes()
@sched.scheduled_job('cron', day_of_week='sat', hour='9', timezone='America/New_York')
def update_gw_data():
update_for_gw()
@sched.scheduled_job('cron', day_of_week='sat-sun', hour='9-15', minute='10-59/10', timezone='America/New_York')
def update_live():
update_live_points()
sched.start()
| code247/FPL_FFC | jobs.py | Python | mit | 1,124 |
from __future__ import absolute_import, division, print_function
import warnings
import re
import py
import pytest
from _pytest.recwarn import WarningsRecorder
def test_recwarn_functional(testdir):
reprec = testdir.inline_runsource("""
import warnings
def test_method(recwarn):
warnings.warn("hello")
warn = recwarn.pop()
assert isinstance(warn.message, UserWarning)
""")
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
class TestWarningsRecorderChecker(object):
def test_recording(self):
rec = WarningsRecorder()
with rec:
assert not rec.list
py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
assert len(rec.list) == 1
py.std.warnings.warn(DeprecationWarning("hello"))
assert len(rec.list) == 2
warn = rec.pop()
assert str(warn.message) == "hello"
l = rec.list
rec.clear()
assert len(rec.list) == 0
assert l is rec.list
pytest.raises(AssertionError, "rec.pop()")
def test_typechecking(self):
from _pytest.recwarn import WarningsChecker
with pytest.raises(TypeError):
WarningsChecker(5)
with pytest.raises(TypeError):
WarningsChecker(('hi', RuntimeWarning))
with pytest.raises(TypeError):
WarningsChecker([DeprecationWarning, RuntimeWarning])
def test_invalid_enter_exit(self):
# wrap this test in WarningsRecorder to ensure warning state gets reset
with WarningsRecorder():
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
rec.__exit__(None, None, None) # can't exit before entering
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
with rec:
with rec:
pass # can't enter twice
class TestDeprecatedCall(object):
"""test pytest.deprecated_call()"""
def dep(self, i, j=None):
if i == 0:
py.std.warnings.warn("is deprecated", DeprecationWarning,
stacklevel=1)
return 42
def dep_explicit(self, i):
if i == 0:
py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
filename="hello", lineno=3)
def test_deprecated_call_raises(self):
with pytest.raises(AssertionError) as excinfo:
pytest.deprecated_call(self.dep, 3, 5)
assert 'Did not produce' in str(excinfo)
def test_deprecated_call(self):
pytest.deprecated_call(self.dep, 0, 5)
def test_deprecated_call_ret(self):
ret = pytest.deprecated_call(self.dep, 0)
assert ret == 42
def test_deprecated_call_preserves(self):
onceregistry = py.std.warnings.onceregistry.copy()
filters = py.std.warnings.filters[:]
warn = py.std.warnings.warn
warn_explicit = py.std.warnings.warn_explicit
self.test_deprecated_call_raises()
self.test_deprecated_call()
assert onceregistry == py.std.warnings.onceregistry
assert filters == py.std.warnings.filters
assert warn is py.std.warnings.warn
assert warn_explicit is py.std.warnings.warn_explicit
def test_deprecated_explicit_call_raises(self):
with pytest.raises(AssertionError):
pytest.deprecated_call(self.dep_explicit, 3)
def test_deprecated_explicit_call(self):
pytest.deprecated_call(self.dep_explicit, 0)
pytest.deprecated_call(self.dep_explicit, 0)
@pytest.mark.parametrize('mode', ['context_manager', 'call'])
def test_deprecated_call_no_warning(self, mode):
"""Ensure deprecated_call() raises the expected failure when its block/function does
not raise a deprecation warning.
"""
def f():
pass
msg = 'Did not produce DeprecationWarning or PendingDeprecationWarning'
with pytest.raises(AssertionError, matches=msg):
if mode == 'call':
pytest.deprecated_call(f)
else:
with pytest.deprecated_call():
f()
@pytest.mark.parametrize('warning_type', [PendingDeprecationWarning, DeprecationWarning])
@pytest.mark.parametrize('mode', ['context_manager', 'call'])
@pytest.mark.parametrize('call_f_first', [True, False])
@pytest.mark.filterwarnings('ignore')
def test_deprecated_call_modes(self, warning_type, mode, call_f_first):
"""Ensure deprecated_call() captures a deprecation warning as expected inside its
block/function.
"""
def f():
warnings.warn(warning_type("hi"))
return 10
# ensure deprecated_call() can capture the warning even if it has already been triggered
if call_f_first:
assert f() == 10
if mode == 'call':
assert pytest.deprecated_call(f) == 10
else:
with pytest.deprecated_call():
assert f() == 10
@pytest.mark.parametrize('mode', ['context_manager', 'call'])
def test_deprecated_call_exception_is_raised(self, mode):
"""If the block of the code being tested by deprecated_call() raises an exception,
it must raise the exception undisturbed.
"""
def f():
raise ValueError('some exception')
with pytest.raises(ValueError, match='some exception'):
if mode == 'call':
pytest.deprecated_call(f)
else:
with pytest.deprecated_call():
f()
def test_deprecated_call_specificity(self):
other_warnings = [Warning, UserWarning, SyntaxWarning, RuntimeWarning,
FutureWarning, ImportWarning, UnicodeWarning]
for warning in other_warnings:
def f():
warnings.warn(warning("hi"))
with pytest.raises(AssertionError):
pytest.deprecated_call(f)
with pytest.raises(AssertionError):
with pytest.deprecated_call():
f()
class TestWarns(object):
def test_strings(self):
# different messages, b/c Python suppresses multiple identical warnings
source1 = "warnings.warn('w1', RuntimeWarning)"
source2 = "warnings.warn('w2', RuntimeWarning)"
source3 = "warnings.warn('w3', RuntimeWarning)"
pytest.warns(RuntimeWarning, source1)
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(UserWarning, source2))
pytest.warns(RuntimeWarning, source3)
def test_function(self):
pytest.warns(SyntaxWarning,
lambda msg: warnings.warn(msg, SyntaxWarning), "syntax")
def test_warning_tuple(self):
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w1', RuntimeWarning))
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w2', SyntaxWarning))
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(
(RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w3', UserWarning)))
def test_as_contextmanager(self):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
with pytest.warns(UserWarning):
warnings.warn("user", UserWarning)
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(RuntimeWarning):
warnings.warn("user", UserWarning)
excinfo.match(r"DID NOT WARN. No warnings of type \(.+RuntimeWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[UserWarning\('user',\)\].")
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(UserWarning):
warnings.warn("runtime", RuntimeWarning)
excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[RuntimeWarning\('runtime',\)\].")
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(UserWarning):
pass
excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[\].")
warning_classes = (UserWarning, FutureWarning)
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(warning_classes) as warninfo:
warnings.warn("runtime", RuntimeWarning)
warnings.warn("import", ImportWarning)
message_template = ("DID NOT WARN. No warnings of type {0} was emitted. "
"The list of emitted warnings is: {1}.")
excinfo.match(re.escape(message_template.format(warning_classes,
[each.message for each in warninfo])))
def test_record(self):
with pytest.warns(UserWarning) as record:
warnings.warn("user", UserWarning)
assert len(record) == 1
assert str(record[0].message) == "user"
def test_record_only(self):
with pytest.warns(None) as record:
warnings.warn("user", UserWarning)
warnings.warn("runtime", RuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
def test_record_by_subclass(self):
with pytest.warns(Warning) as record:
warnings.warn("user", UserWarning)
warnings.warn("runtime", RuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
class MyUserWarning(UserWarning):
pass
class MyRuntimeWarning(RuntimeWarning):
pass
with pytest.warns((UserWarning, RuntimeWarning)) as record:
warnings.warn("user", MyUserWarning)
warnings.warn("runtime", MyRuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
def test_double_test(self, testdir):
"""If a test is run again, the warning should still be raised"""
testdir.makepyfile('''
import pytest
import warnings
@pytest.mark.parametrize('run', [1, 2])
def test(run):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*2 passed in*'])
| MichaelAquilina/pytest | testing/test_recwarn.py | Python | mit | 10,978 |
from scudcloud.resources import Resources
from PyQt5 import QtCore
from PyQt5.QtCore import QUrl
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWebKitWidgets import QWebView
class LeftPane(QWebView):
def __init__(self, window):
QWebView.__init__(self)
self.window = window
with open(Resources.get_path("leftpane.js"), "r") as f:
self.js = f.read()
# We don't want plugins for this simple pane
self.settings().setAttribute(QWebSettings.PluginsEnabled, False)
self.reset()
def reset(self):
self.setFixedWidth(0)
self.setVisible(False)
self.setUrl(QUrl.fromLocalFile(Resources.get_path("leftpane.html")))
self.page().currentFrame().addToJavaScriptWindowObject("leftPane", self)
self.page().currentFrame().evaluateJavaScript(self.js)
def show(self):
self.setFixedWidth(65)
self.setVisible(True)
def hide(self):
self.setFixedWidth(0)
self.setVisible(False)
def addTeam(self, id, name, url, icon, active=False):
if active is True:
checked = "true"
else:
checked = "false"
self.page().currentFrame().evaluateJavaScript('LeftPane.addTeam("{}","{}","{}","{}","{}");'.format(id, name, url, icon, checked))
def click(self, i):
self.page().currentFrame().evaluateJavaScript('LeftPane.click({});'.format(i))
def alert(self, teamId, messages):
if teamId is not None:
self.page().currentFrame().evaluateJavaScript('LeftPane.alert("{}","{}");'.format(teamId, messages))
def unread(self, teamId):
self.page().currentFrame().evaluateJavaScript('LeftPane.unread("{}");'.format(teamId))
def stopAlert(self, team):
if team is not None:
self.page().currentFrame().evaluateJavaScript('LeftPane.stopAlert("{}");'.format(team))
def stopUnread(self, teamId):
self.page().currentFrame().evaluateJavaScript('LeftPane.stopUnread("{}");'.format(teamId))
def clickNext(self, direction):
self.page().currentFrame().evaluateJavaScript('LeftPane.clickNext("{}");'.format(direction))
@QtCore.pyqtSlot(str)
def switchTo(self, url):
self.window.switchTo(url)
def contextMenuEvent(self, event):
if self.window.debug:
menu = self.page().createStandardContextMenu()
menu.exec_(event.globalPos())
| raelgc/scudcloud | scudcloud/leftpane.py | Python | mit | 2,425 |
import urllib
import urllib2
import string
import json
import websocket
import thread
import time
#// Grab hitbox ip and socket id //////////////////#
site = "http://api.hitbox.tv/chat/servers.json?redis=true"
lines = json.load(urllib2.urlopen(site))#.read()
for line in lines:
ip = ".".join(line['server_ip'].split(".")[0].split("-")[1:])
print "usable ip:", ip
site = "http://"+ip+"/socket.io/1/"
lines = urllib2.urlopen(site).read()
socketid = lines.split(":")[0]
print "socket id:", socketid
socketstring = "ws://"+ip+"/socket.io/1/websocket/"+socketid
#// Grab token ///////////////////////////////////#
bot = json.load(open("botvalues.json"))
print "Hitbox username:", bot['name']
values = {'login' : bot['name'],
'pass' : bot['password'],
'app' : 'desktop' }
url = 'http://api.hitbox.tv/auth/token'
try:
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
the_page = json.load(response)
token = the_page["authToken"]
print "authToken:", token
except Exception, e:
print "Error: Are correct bot credentials in botvalues.json?"
raise e
#// Hitbox Websocket Code ////////////////////////#
join_msg = ("5:::{\"name\":\"message\",\"args\":[{\"method\":\"joinChannel\",\"params\":{\"channel\":\""
+bot['channel']+"\",\"name\":\""+bot['name']+"\",\"token\":\"" + token + "\",\"isAdmin\":false}}]}")
def hitbox_send_message(ws, message):
ws.send("5:::{\"name\":\"message\",\"args\":[{\"method\":\"chatMsg\",\"params\":{\"channel\":\""
+bot['channel']+"\",\"name\":\""+bot['name']+"\",\"nameColor\":\"FA5858\",\"text\":\""+message+"\"}}]}")
def on_message(ws, message):
print "message:",message
if message.startswith("5:::"):
m = json.loads(message[4:])['args'][0]
m2 = json.loads(m)
inmessage = m2['params']['text']
print inmessage
if m2['params']['name'] != bot['name']:
#PLACE BOT FUNCTIONALITY HERE
hitbox_send_message(ws, "BOT - " + inmessage)
if message == "2::":
ws.send("2::")
def on_error(ws, error):
raise error
def on_close(ws):
print "### closed ###"
def on_open(ws):
print "open"
time.sleep(1)
ws.send(join_msg)
time.sleep(1)
hitbox_send_message(ws, "BOT IS ONLINE")
# # alternate script, demonstrating a multithreaded approach to ws events.
# def run(*args):
# for i in range(30000):
# time.sleep(1)
# ws.send("Hello %d" % i)
# time.sleep(1)
# ws.close()
# print "thread terminating..."
# thread.start_new_thread(run, ())
if __name__ == "__main__":
websocket.enableTrace(True) #True prints out the handshake, any characters sent, and any errors in debug handling
ws = websocket.WebSocketApp(socketstring,
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever() | cgwennap/pyHitBot | testsocket.py | Python | mit | 3,056 |
from octopy.utils import *
from ru.parallel.octotron.core.logic import Response
from ru.parallel.octotron.generators.tmpl import ReactionTemplate
from ru.parallel.octotron.generators.tmpl import ReactionAction as Reaction
def ReactsFromDict(reactions_dict):
res = []
for name, reaction in reactions_dict.items():
if len(reaction) > 1:
raise RuntimeError("duplicated reaction: " + name + " : " + str(reaction))
res.append(ReactionTemplate(name, reaction[0]))
return res
def ConvertReacts(var):
return ReactsFromDict(MergeDicts(var))
def Info(tag, message):
return Response("INFO", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_info")
def Warning(tag, message):
return Response("WARNING", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_warning")
def Danger(tag, message):
return Response("DANGER", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_danger")
def Critical(tag, message):
return Response("CRITICAL", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_critical")
def RInfo(tag, message):
return Response("RECOVER_INFO", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_info")
def RWarning(tag, message):
return Response("RECOVER_WARNING", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_warning")
def RDanger(tag, message):
return Response("RECOVER_DANGER", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_danger")
def RCritical(tag, message):
return Response("RECOVER_CRITICAL", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_critical")
def GenRStatus(status):
return {
Info : RInfo
, Warning : RWarning
, Danger : RDanger
, Critical : RCritical
}[status]
def Prophecy(tag, message):
return Response("PROPHECY", []).Msg(tag, message).Msg("_id", "{_id}").Exec("on_prophecy")
| srcc-msu/octotron | octopy/react_wrapper.py | Python | mit | 1,761 |
import sys, os
from Grafo import Grafo
if __name__ == '__main__':
try:
arquivo = open('grafo_teste_listar.txt', 'r')
arquivo_saida = open('teste_listar.txt', 'w')
except IOError:
sys.stderr.write('Erro ao tentar ler ou criar o arquivo, verifique se estão válidos.\n')
sys.exit()
grafo = Grafo()
grafo.ler_arquivo(arquivo)
grafo.lista(arquivo_saida)
try:
arquivo.close()
arquivo_saida.close()
except IOError:
sys.stderr.write('Erro ao tentar fechar os arquivos.\n')
sys.exit() | Razborges/algGrafos | TrabalhoFinal/testes/teste_listar.py | Python | mit | 573 |
import mock
from st2tests.base import BaseActionTestCase
from runfolder_service import ArteriaRunfolderServiceAction
from tests.pack_test_utils import FakeResponse
class RunfolderServiceServiceTest(BaseActionTestCase):
action_cls = ArteriaRunfolderServiceAction
def test_get_state(self):
expected_data = {"foo": "bar"}
with mock.patch('requests.get',
return_value=FakeResponse(status_code=200, text=expected_data)) as mock_get:
exit_flag, result = self.get_action_instance().run(cmd="get_state",
url="http://foo/")
self.assertEqual(result, expected_data)
self.assertTrue(exit_flag)
def test_set_state(self):
with mock.patch('requests.post',
return_value=FakeResponse(status_code=200, text="")) as mock_post:
exit_flag = self.get_action_instance().run(cmd="set_state",
url="http://foo/",
state="done",
runfolder="my_fav_runfolder")
self.assertTrue(exit_flag)
| arteria-project/arteria-packs | tests/test_action_runfolder_service.py | Python | mit | 1,239 |
from keras_code import predict, models
import numpy as np
import os
import math
from PIL import Image
from keras import backend as K
from utilities.logging import print_n_log
import utilities.paths as paths
DRIVE = paths.get_drive()
def build_img(array):
array = np.squeeze(array)
# In Pixels
padding = 5
max_width = 2000
# Normalise the array
assert np.min(array) >= 0
array = array/np.max(array)
shape = np.shape(array)
if K.image_data_format() == 'channels_first':
c = shape[0]
w = shape[1]
h = shape[2]
else:
c = shape[2]
w = shape[0]
h = shape[1]
assert w == h
# How many filters wide can we fit?
num_filters_wide = int(math.floor(float(max_width)/(w+padding)))
num_filters_high = int(math.ceil(float(c)/num_filters_wide))
img = np.zeros(((num_filters_wide*(w+padding)+padding),(num_filters_high*(h+padding)+padding),1),dtype=np.float32)
img[:,:,0] = 200.0/255.0
for filter in range(c):
col = filter % num_filters_wide
row = int(math.floor(float(filter)/num_filters_wide))
if K.image_data_format() == 'channels_first':
img[padding+(padding+w)*col:(padding+w)*(col+1),padding+(padding+h)*row:(padding+h)*(row+1),:] = np.expand_dims(array[filter,:,:],2)
else:
img[padding+(padding+w)*col:(padding+w)*(col+1),padding+(padding+h)*row:(padding+h)*(row+1),:] = np.expand_dims(array[:,:,filter],2)
img_show = Image.fromarray(np.tile(np.uint8(img * 255),3))
img_show.show()
# model_id = 'MVK_50_31'
# identifier = '00001'
# load_epoch = 4
model_id = 'MVSK_51_37'
identifier = '00001'
load_epoch = 1
model_path = paths.get_model_path('KERAS')
split = 'val'
batch_size = 2
save_path = None
force_noneq = False
# set the channel order correctly
if K.backend() == 'theano':
K.set_image_dim_ordering('th')
K.set_image_data_format('channels_first')
else:
K.set_image_dim_ordering('tf')
K.set_image_data_format('channels_last')
if not os.path.exists(model_path):
os.makedirs(model_path)
model, output_classes = models.get_model_from_id(model_id, identifier, batch_size, load_epoch=load_epoch)
dataset = models.get_dataset(model_id, split, force_noneq=force_noneq, batch_size=batch_size)
dataset.get_dataset_statistics(None)
# class_weights = model.get_layer('fc1').get_weights()[0] # [0] # [(512,7),(7,)]
# class_biases = model.get_layer('fc1').get_weights()[1] # [0] # [(512,7),(7,)]
# print(np.shape(class_biases))
# print(np.shape(class_weights))
# print np.shape(model.layers[-1].get_weights()[0])\
# class_weights = np.reshape(class_weights, (512, 14, 14, 7))
# final_conv_layer = model.get_layer('block5_pool')
for batch_count in range(0, 1):
x, y, sid = dataset.get_batch_xy(True)
# p = model.predict_on_batch(x)
for i in range(len(y)):
print(np.shape(x[i]))
get_output = K.function([model.layers[0].input], [model.get_layer('attention_with_context_1').a, model.layers[-1].output])
[conv_outputs, predictions] = get_output([np.expand_dims(x[i],0)])
print(conv_outputs)
print(model.get_layer('attention_with_context_1').a.get_value())
# build_img(conv_outputs)
p = np.argmax(predictions)
print(np.argmax(y[i]))
print(p)
break
#
# for li in range(len(model.layers)-1,0,-1):
# print(li)
# print(model.layers[li].name)
# if len(model.layers[li].get_weights()) > 0:
# print(np.shape(model.layers[li].get_weights()[0]))
#
# weights = class_weights[:, p]
# weighted_conv = conv_outputs * weights | HaydenFaulkner/phd | visualisations/networks/activations.py | Python | mit | 3,664 |
import asyncio
from arq import create_pool
from arq.connections import RedisSettings
async def the_task(ctx):
print('running the task with id', ctx['job_id'])
async def main():
redis = await create_pool(RedisSettings())
# no id, random id will be generated
job1 = await redis.enqueue_job('the_task')
print(job1)
"""
> <arq job 99edfef86ccf4145b2f64ee160fa3297>
"""
# random id again, again the job will be enqueued and a job will be returned
job2 = await redis.enqueue_job('the_task')
print(job2)
"""
> <arq job 7d2163c056e54b62a4d8404921094f05>
"""
# custom job id, job will be enqueued
job3 = await redis.enqueue_job('the_task', _job_id='foobar')
print(job3)
"""
> <arq job foobar>
"""
# same custom job id, job will not be enqueued and enqueue_job will return None
job4 = await redis.enqueue_job('the_task', _job_id='foobar')
print(job4)
"""
> None
"""
class WorkerSettings:
functions = [the_task]
if __name__ == '__main__':
asyncio.run(main())
| samuelcolvin/arq | docs/examples/job_ids.py | Python | mit | 1,073 |
from django.conf.urls import url
from Overlay import views
urlpatterns = [
url(r'^$',
views.main,
name='main'),
url(r'^checks/$',
views.checks,
name='checks'),
url(r'^results/$',
views.results,
name='results'),
url(r'^upload/$',
views.upload,
name='upload')
]
| DalenWBrauner/FloridaDataOverlay | Website/Florida_Data_Overlay/Overlay/urls.py | Python | mit | 344 |
import itertools as it
from conference_scheduler.resources import Shape, Constraint
from conference_scheduler.lp_problem import utils as lpu
def _schedule_all_events(events, slots, X, summation_type=None, **kwargs):
shape = Shape(len(events), len(slots))
summation = lpu.summation_functions[summation_type]
label = 'Event either not scheduled or scheduled multiple times'
for event in range(shape.events):
yield Constraint(
f'{label} - event: {event}',
summation(X[event, slot] for slot in range(shape.slots)) == 1
)
def _max_one_event_per_slot(events, slots, X, summation_type=None, **kwargs):
shape = Shape(len(events), len(slots))
summation = lpu.summation_functions[summation_type]
label = 'Slot with multiple events scheduled'
for slot in range(shape.slots):
yield Constraint(
f'{label} - slot: {slot}',
summation(X[(event, slot)] for event in range(shape.events)) <= 1
)
def _events_available_in_scheduled_slot(events, slots, X, **kwargs):
"""
Constraint that ensures that an event is scheduled in slots for which it is
available
"""
slot_availability_array = lpu.slot_availability_array(slots=slots,
events=events)
label = 'Event scheduled when not available'
for row, event in enumerate(slot_availability_array):
for col, availability in enumerate(event):
if availability == 0:
yield Constraint(
f'{label} - event: {row}, slot: {col}',
X[row, col] <= availability
)
def _events_available_during_other_events(
events, slots, X, summation_type=None, **kwargs
):
"""
Constraint that ensures that an event is not scheduled at the same time as
another event for which it is unavailable. Unavailability of events is
either because it is explicitly defined or because they share a tag.
"""
summation = lpu.summation_functions[summation_type]
event_availability_array = lpu.event_availability_array(events)
label = 'Event clashes with another event'
for slot1, slot2 in lpu.concurrent_slots(slots):
for row, event in enumerate(event_availability_array):
if events[row].unavailability:
for col, availability in enumerate(event):
if availability == 0:
yield Constraint(
f'{label} - event: {row} and event: {col}',
summation(
(X[row, slot1], X[col, slot2])
) <= 1 + availability
)
def _upper_bound_on_event_overflow(
events, slots, X, beta, summation_type=None, **kwargs
):
"""
This is an artificial constraint that is used by the objective function
aiming to minimise the maximum overflow in a slot.
"""
label = 'Artificial upper bound constraint'
for row, event in enumerate(events):
for col, slot in enumerate(slots):
yield Constraint(
f'{label} - slot: {col} and event: {row}',
event.demand * X[row, col] - slot.capacity <= beta)
def all_constraints(events, slots, X, beta=None, summation_type=None):
kwargs = {
'events': events,
'slots': slots,
'X': X,
'beta': beta,
'summation_type': summation_type
}
generators = [
_schedule_all_events,
_max_one_event_per_slot,
_events_available_in_scheduled_slot,
_events_available_during_other_events,
]
if beta is not None:
generators.append(_upper_bound_on_event_overflow)
for generator in generators:
for constraint in generator(**kwargs):
yield constraint
| PyconUK/ConferenceScheduler | src/conference_scheduler/lp_problem/constraints.py | Python | mit | 3,874 |
# 倒序排
a = sorted(['bob', 'about', 'Zoo', 'Credit'], key=str.lower, reverse=True)
print(a)
# 按成绩从高到低排序:
L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
def by_score(t):
return t[1]
L2 = sorted(L, key=by_score)
print(L2)
# [('Bart', 66), ('Bob', 75), ('Lisa', 88), ('Adam', 92)] | longze/my-cellar | web/articles/python/demo/09-sort.py | Python | mit | 322 |
from util import *
@responses.activate
def test_getters(client, dummy_data):
assert client.host() == dummy_data.host
assert client.api_host() == dummy_data.api_host
@responses.activate
def test_setters(client, dummy_data):
try:
client.host('host.nexmo.com')
client.api_host('host.nexmo.com')
assert client.host() != dummy_data.host
assert client.api_host() != dummy_data.api_host
except:
assert False
@responses.activate
def test_fail_setter_url_format(client, dummy_data):
try:
client.host('1000.1000')
assert False
except:
assert True | Nexmo/nexmo-python | tests/test_getters_setters.py | Python | mit | 627 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tracking', '0007_auto_20160313_1725'),
]
operations = [
migrations.AddField(
model_name='organization',
name='creation_time',
field=models.DateTimeField(blank=True, verbose_name='Creation Timestamp', null=True),
),
migrations.AddField(
model_name='organization',
name='modification_time',
field=models.DateTimeField(blank=True, verbose_name='Modification Timestamp', null=True),
),
migrations.AddField(
model_name='patientvisit',
name='creation_time',
field=models.DateTimeField(blank=True, verbose_name='Creation Timestamp', null=True),
),
migrations.AddField(
model_name='patientvisit',
name='modification_time',
field=models.DateTimeField(blank=True, verbose_name='Modification Timestamp', null=True),
),
migrations.AddField(
model_name='referringentity',
name='creation_time',
field=models.DateTimeField(blank=True, verbose_name='Creation Timestamp', null=True),
),
migrations.AddField(
model_name='referringentity',
name='modification_time',
field=models.DateTimeField(blank=True, verbose_name='Modification Timestamp', null=True),
),
migrations.AddField(
model_name='treatingprovider',
name='creation_time',
field=models.DateTimeField(blank=True, verbose_name='Creation Timestamp', null=True),
),
migrations.AddField(
model_name='treatingprovider',
name='modification_time',
field=models.DateTimeField(blank=True, verbose_name='Modification Timestamp', null=True),
),
]
| Heteroskedastic/Dr-referral-tracker | tracking/migrations/0008_adding_creation_modification_time.py | Python | mit | 1,984 |
Subsets and Splits