repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
680k
|
---|---|---|---|---|
kem-group/rigidRegistration | build/lib/rigidregistration/__init__.py | cd6bef208d4b475954e2b3970d6ec11c15f61d70 | from . import utils
from . import display
from . import save
from . import FFTW
from . import stackregistration
__version__="0.2.1" | [] |
rudaoshi/metrics | torchmetrics/retrieval/retrieval_fallout.py | c018348619bd7e375cb86abf7dfcaddb7208a36d | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional
import pangu.core.backend as B
from pangu.core.backend import Tensor, tensor
from torchmetrics.functional.retrieval.fall_out import retrieval_fall_out
from torchmetrics.retrieval.retrieval_metric import RetrievalMetric
from torchmetrics.utilities.data import get_group_indexes
class RetrievalFallOut(RetrievalMetric):
"""Computes `Fall-out`_.
Works with binary target data. Accepts float predictions from a model output.
Forward accepts:
- ``preds`` (float tensor): ``(N, ...)``
- ``target`` (long or bool tensor): ``(N, ...)``
- ``indexes`` (long tensor): ``(N, ...)``
``indexes``, ``preds`` and ``target`` must have the same dimension.
``indexes`` indicate to which query a prediction belongs.
Predictions will be first grouped by ``indexes`` and then `Fall-out` will be computed as the mean
of the `Fall-out` over each query.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a negative ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
k: consider only the top k elements for each query (default: None, which considers them all)
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects
the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state. When `None`, DDP
will be used to perform the allgather. default: None
Raises:
ValueError:
If ``k`` parameter is not `None` or an integer larger than 0
Example:
>>> from torchmetrics import RetrievalFallOut
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> fo = RetrievalFallOut(k=2)
>>> fo(preds, target, indexes=indexes)
tensor(0.5000)
"""
higher_is_better = False
def __init__(
self,
empty_target_action: str = "pos",
k: int = None,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
) -> None:
super().__init__(
empty_target_action=empty_target_action,
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
if (k is not None) and not (isinstance(k, int) and k > 0):
raise ValueError("`k` has to be a positive integer or None")
self.k = k
def compute(self) -> Tensor:
"""First concat state `indexes`, `preds` and `target` since they were stored as lists.
After that, compute list of groups that will help in keeping together predictions about the same query. Finally,
for each group compute the `_metric` if the number of negative targets is at least 1, otherwise behave as
specified by `self.empty_target_action`.
"""
indexes = B.cat(self.indexes, dim=0)
preds = B.cat(self.preds, dim=0)
target = B.cat(self.target, dim=0)
res = []
groups = get_group_indexes(indexes)
for group in groups:
mini_preds = preds[group]
mini_target = target[group]
if not (1 - mini_target).sum():
if self.empty_target_action == "error":
raise ValueError("`compute` method was provided with a query with no negative target.")
if self.empty_target_action == "pos":
res.append(tensor(1.0))
elif self.empty_target_action == "neg":
res.append(tensor(0.0))
else:
# ensure list containt only float tensors
res.append(self._metric(mini_preds, mini_target))
return B.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds)
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_fall_out(preds, target, k=self.k)
| [((4312, 4338), 'pangu.core.backend.cat', 'B.cat', (['self.indexes'], {'dim': '(0)'}), '(self.indexes, dim=0)\n', (4317, 4338), True, 'import pangu.core.backend as B\n'), ((4355, 4379), 'pangu.core.backend.cat', 'B.cat', (['self.preds'], {'dim': '(0)'}), '(self.preds, dim=0)\n', (4360, 4379), True, 'import pangu.core.backend as B\n'), ((4397, 4422), 'pangu.core.backend.cat', 'B.cat', (['self.target'], {'dim': '(0)'}), '(self.target, dim=0)\n', (4402, 4422), True, 'import pangu.core.backend as B\n'), ((4458, 4484), 'torchmetrics.utilities.data.get_group_indexes', 'get_group_indexes', (['indexes'], {}), '(indexes)\n', (4475, 4484), False, 'from torchmetrics.utilities.data import get_group_indexes\n'), ((5315, 5358), 'torchmetrics.functional.retrieval.fall_out.retrieval_fall_out', 'retrieval_fall_out', (['preds', 'target'], {'k': 'self.k'}), '(preds, target, k=self.k)\n', (5333, 5358), False, 'from torchmetrics.functional.retrieval.fall_out import retrieval_fall_out\n'), ((5213, 5224), 'pangu.core.backend.tensor', 'tensor', (['(0.0)'], {}), '(0.0)\n', (5219, 5224), False, 'from pangu.core.backend import Tensor, tensor\n'), ((4887, 4898), 'pangu.core.backend.tensor', 'tensor', (['(1.0)'], {}), '(1.0)\n', (4893, 4898), False, 'from pangu.core.backend import Tensor, tensor\n'), ((4987, 4998), 'pangu.core.backend.tensor', 'tensor', (['(0.0)'], {}), '(0.0)\n', (4993, 4998), False, 'from pangu.core.backend import Tensor, tensor\n')] |
onnheimm/pydlm | pydlm/tests/base/testKalmanFilter.py | 4693af6e621e3b75feda7ca15327b69a4ca622a7 | import numpy as np
import unittest
from pydlm.modeler.trends import trend
from pydlm.modeler.seasonality import seasonality
from pydlm.modeler.builder import builder
from pydlm.base.kalmanFilter import kalmanFilter
class testKalmanFilter(unittest.TestCase):
def setUp(self):
self.kf1 = kalmanFilter(discount=[1])
self.kf0 = kalmanFilter(discount=[1e-10])
self.kf11 = kalmanFilter(discount=[1, 1])
self.trend0 = trend(degree=0, discount=1, w=1.0)
self.trend0_90 = trend(degree=0, discount=0.9, w=1.0)
self.trend0_98 = trend(degree=0, discount=0.98, w=1.0, name='a')
self.trend1 = trend(degree=1, discount=1, w=1.0)
def testForwardFilter(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
self.kf1.predict(dlm.model)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
# the prior on the mean is zero, but observe 1, with
# discount = 1, one should expect the filterd mean to be 0.5
self.kf1.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs, 0.5)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
self.assertAlmostEqual(dlm.model.sysVar, 0.375)
self.kf1.predict(dlm.model)
self.assertAlmostEqual(dlm.model.obs, 0.5)
self.assertAlmostEqual(dlm.model.prediction.obs, 0.5)
dlm.initialize()
self.kf0.predict(dlm.model)
self.assertAlmostEqual(dlm.model.prediction.obs, 0)
# the prior on the mean is zero, but observe 1, with discount = 0
# one should expect the filtered mean close to 1
self.kf0.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1)
self.assertAlmostEqual(dlm.model.prediction.obs[0, 0], 0)
self.assertAlmostEqual(dlm.model.sysVar[0, 0], 0.5)
self.kf0.predict(dlm.model)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1)
self.assertAlmostEqual(dlm.model.prediction.obs[0, 0], 1)
def testForwardFilterMultiDim(self):
dlm = builder()
dlm.add(seasonality(period=2, discount=1, w=1.0))
dlm.initialize()
self.kf11.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.state[0][0, 0], 0.33333333333)
self.assertAlmostEqual(dlm.model.state[1][0, 0], -0.33333333333)
self.kf11.forwardFilter(dlm.model, -1)
self.assertAlmostEqual(dlm.model.state[0][0, 0], -0.5)
self.assertAlmostEqual(dlm.model.state[1][0, 0], 0.5)
def testBackwardSmoother(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
# with mean being 0 and observe 1 and 0 consectively, one shall
# expect the smoothed mean at 1 will be 1/3, for discount = 1
self.kf1.forwardFilter(dlm.model, 1)
self.kf1.forwardFilter(dlm.model, 0)
self.kf1.backwardSmoother(dlm.model, \
np.matrix([[0.5]]), \
np.matrix([[0.375]]))
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0/3)
self.assertAlmostEqual(dlm.model.sysVar[0, 0], 0.18518519)
# second order trend with discount = 1. The smoothed result should be
# equal to a direct fit on the three data points, 0, 1, -1. Thus, the
# smoothed observation should be 0.0
def testBackwardSmootherMultiDim(self):
dlm = builder()
dlm.add(self.trend1)
dlm.initialize()
self.kf11.forwardFilter(dlm.model, 1)
state1 = dlm.model.state
cov1 = dlm.model.sysVar
self.kf11.forwardFilter(dlm.model, -1)
self.kf11.backwardSmoother(dlm.model, \
rawState = state1, \
rawSysVar = cov1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 0.0)
def testMissingData(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
self.kf0.forwardFilter(dlm.model, 1)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0], 1.0)
self.kf0.forwardFilter(dlm.model, None)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0]/1e10, 0.5)
self.kf0.forwardFilter(dlm.model, None)
self.assertAlmostEqual(dlm.model.obs[0, 0], 1.0)
self.assertAlmostEqual(dlm.model.obsVar[0, 0]/1e10, 0.5)
self.kf0.forwardFilter(dlm.model, 0)
self.assertAlmostEqual(dlm.model.obs[0, 0], 0.0)
def testMissingEvaluation(self):
dlm = builder()
dlm.add(self.trend0)
dlm.initialize()
dlm.model.evaluation = np.matrix([[None]])
self.kf1.forwardFilter(dlm.model, 1.0, dealWithMissingEvaluation = True)
self.assertAlmostEqual(dlm.model.obs, 0.0)
self.assertAlmostEqual(dlm.model.transition, 1.0)
def testEvolveMode(self):
dlm = builder()
dlm.add(self.trend0_90)
dlm.add(self.trend0_98)
dlm.initialize()
kf2 = kalmanFilter(discount=[0.9, 0.98],
updateInnovation='component',
index=dlm.componentIndex)
kf2.forwardFilter(dlm.model, 1.0)
self.assertAlmostEqual(dlm.model.innovation[0, 1], 0.0)
self.assertAlmostEqual(dlm.model.innovation[1, 0], 0.0)
if __name__ == '__main__':
unittest.main()
| [((5442, 5457), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5455, 5457), False, 'import unittest\n'), ((303, 329), 'pydlm.base.kalmanFilter.kalmanFilter', 'kalmanFilter', ([], {'discount': '[1]'}), '(discount=[1])\n', (315, 329), False, 'from pydlm.base.kalmanFilter import kalmanFilter\n'), ((349, 379), 'pydlm.base.kalmanFilter.kalmanFilter', 'kalmanFilter', ([], {'discount': '[1e-10]'}), '(discount=[1e-10])\n', (361, 379), False, 'from pydlm.base.kalmanFilter import kalmanFilter\n'), ((400, 429), 'pydlm.base.kalmanFilter.kalmanFilter', 'kalmanFilter', ([], {'discount': '[1, 1]'}), '(discount=[1, 1])\n', (412, 429), False, 'from pydlm.base.kalmanFilter import kalmanFilter\n'), ((452, 486), 'pydlm.modeler.trends.trend', 'trend', ([], {'degree': '(0)', 'discount': '(1)', 'w': '(1.0)'}), '(degree=0, discount=1, w=1.0)\n', (457, 486), False, 'from pydlm.modeler.trends import trend\n'), ((512, 548), 'pydlm.modeler.trends.trend', 'trend', ([], {'degree': '(0)', 'discount': '(0.9)', 'w': '(1.0)'}), '(degree=0, discount=0.9, w=1.0)\n', (517, 548), False, 'from pydlm.modeler.trends import trend\n'), ((574, 621), 'pydlm.modeler.trends.trend', 'trend', ([], {'degree': '(0)', 'discount': '(0.98)', 'w': '(1.0)', 'name': '"""a"""'}), "(degree=0, discount=0.98, w=1.0, name='a')\n", (579, 621), False, 'from pydlm.modeler.trends import trend\n'), ((644, 678), 'pydlm.modeler.trends.trend', 'trend', ([], {'degree': '(1)', 'discount': '(1)', 'w': '(1.0)'}), '(degree=1, discount=1, w=1.0)\n', (649, 678), False, 'from pydlm.modeler.trends import trend\n'), ((728, 737), 'pydlm.modeler.builder.builder', 'builder', ([], {}), '()\n', (735, 737), False, 'from pydlm.modeler.builder import builder\n'), ((2076, 2085), 'pydlm.modeler.builder.builder', 'builder', ([], {}), '()\n', (2083, 2085), False, 'from pydlm.modeler.builder import builder\n'), ((2586, 2595), 'pydlm.modeler.builder.builder', 'builder', ([], {}), '()\n', (2593, 2595), False, 'from pydlm.modeler.builder import builder\n'), ((3417, 3426), 'pydlm.modeler.builder.builder', 'builder', ([], {}), '()\n', (3424, 3426), False, 'from pydlm.modeler.builder import builder\n'), ((3903, 3912), 'pydlm.modeler.builder.builder', 'builder', ([], {}), '()\n', (3910, 3912), False, 'from pydlm.modeler.builder import builder\n'), ((4628, 4637), 'pydlm.modeler.builder.builder', 'builder', ([], {}), '()\n', (4635, 4637), False, 'from pydlm.modeler.builder import builder\n'), ((4724, 4743), 'numpy.matrix', 'np.matrix', (['[[None]]'], {}), '([[None]])\n', (4733, 4743), True, 'import numpy as np\n'), ((4980, 4989), 'pydlm.modeler.builder.builder', 'builder', ([], {}), '()\n', (4987, 4989), False, 'from pydlm.modeler.builder import builder\n'), ((5094, 5189), 'pydlm.base.kalmanFilter.kalmanFilter', 'kalmanFilter', ([], {'discount': '[0.9, 0.98]', 'updateInnovation': '"""component"""', 'index': 'dlm.componentIndex'}), "(discount=[0.9, 0.98], updateInnovation='component', index=dlm.\n componentIndex)\n", (5106, 5189), False, 'from pydlm.base.kalmanFilter import kalmanFilter\n'), ((2102, 2142), 'pydlm.modeler.seasonality.seasonality', 'seasonality', ([], {'period': '(2)', 'discount': '(1)', 'w': '(1.0)'}), '(period=2, discount=1, w=1.0)\n', (2113, 2142), False, 'from pydlm.modeler.seasonality import seasonality\n'), ((2964, 2982), 'numpy.matrix', 'np.matrix', (['[[0.5]]'], {}), '([[0.5]])\n', (2973, 2982), True, 'import numpy as np\n'), ((3020, 3040), 'numpy.matrix', 'np.matrix', (['[[0.375]]'], {}), '([[0.375]])\n', (3029, 3040), True, 'import numpy as np\n')] |
arcapix/gpfsapi-examples | change_threshold_migration.py | 15bff7fda7b0a576209253dee48eb44e4c0d565f | from arcapix.fs.gpfs.policy import PlacementPolicy
from arcapix.fs.gpfs.rule import MigrateRule
# load placement policy for mmfs1
policy = PlacementPolicy('mmfs1')
# create a new migrate rule for 'sata1'
r = MigrateRule(source='sata1', threshold=(90, 50))
# add rule to start of the policy
policy.rules.insert(r, 0)
# save changes
policy.save()
| [((140, 164), 'arcapix.fs.gpfs.policy.PlacementPolicy', 'PlacementPolicy', (['"""mmfs1"""'], {}), "('mmfs1')\n", (155, 164), False, 'from arcapix.fs.gpfs.policy import PlacementPolicy\n'), ((210, 257), 'arcapix.fs.gpfs.rule.MigrateRule', 'MigrateRule', ([], {'source': '"""sata1"""', 'threshold': '(90, 50)'}), "(source='sata1', threshold=(90, 50))\n", (221, 257), False, 'from arcapix.fs.gpfs.rule import MigrateRule\n')] |
tjol/advent-of-code-2021 | 1/puzzle1.py | 16def395df091d5a8ae9ceb66ba3370554bdf40b | #!/usr/bin/env python3
import sys
depths = list(map(int, sys.stdin))
increased = [a > b for (a, b) in zip(depths[1:], depths[:-1])]
print(sum(increased))
| [] |
An0nYm0u5101/Pastebin | project/app/paste/controllers.py | aef35abee69ce7ce240d3a3f64bb19446468d30d | from flask import Blueprint, request, render_template, \
flash, g, session, redirect, url_for, jsonify
from app import db, requires_auth
from flask_cors import CORS
from .models import Paste
import uuid
from datetime import datetime
from app.user.models import User
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import HtmlFormatter
from functools import wraps
from datetime import datetime
from dateutil import parser
def requires_admin(f):
@wraps(f)
def decorated(*args, **kwargs):
if 'user_id' not in session:
return jsonify(message="Unauthorized", success=False), 401
user_id = session['user_id']
user = User.query.filter(User.id == user_id).first()
if(user.user_type != 2):
return jsonify(message="Unauthorized", success=False), 401
return f(*args, **kwargs)
return decorated
mod_paste = Blueprint('paste', __name__)
CORS(mod_paste)
def is_active(paste):
return parser.parse(paste.expire_time) > datetime.now()
@mod_paste.route('/create_paste', methods=['GET'])
@requires_auth
def create_form():
curr_id = session['user_id']
user = User.query.filter(User.id == curr_id).first()
return render_template('user.html', username=user.username)
@mod_paste.route('/create_paste', methods=['POST'])
def create_paste():
title = request.form['title']
text = request.form['text']
paste_type = request.form['type']
if 'user_id' in session:
user_id = session['user_id']
else:
user = User.query.filter(User.username == 'Guest').first()
user_id = user.id
lang = request.form['lang']
time_form = request.form['time']
expire_time = str(time_form)
add_time = str(datetime.now())
url = str(uuid.uuid4())
report_count = 0
try:
paste = Paste(title, text, lang, add_time,
expire_time, user_id, url, report_count, paste_type)
user = User.query.filter(User.id == user_id).first()
x = user.paste_count
user.paste_count = x + 1
db.session.add(paste)
db.session.commit()
# jsonify(success=True, paste=paste.to_dict())
return jsonify({'url': url}), 200
except:
return jsonify({'error': 'Error while creating Paste, Please check if all fields are filled'}), 400
@mod_paste.route('/paste', methods=['GET'])
@requires_auth
def get_all_pastes():
# user_id = session['user_id']
# pastes = paste.query.filter(paste.user_id == user_id).all()
if 'user_id' in session:
curr_id = session['user_id']
user = User.query.filter(curr_id == User.id).first()
if user.user_type == 2:
return render_template('admin_mypaste.html')
return render_template("mypaste.html")
else:
return jsonify({'error': 'Please Login to Continue'}), 400
# return jsonify(success=True, pastes=[paste.to_dict() for paste in
# pastes])
@mod_paste.route('/api/paste', methods=['POST'])
@requires_auth
def get_all_pastes_object():
user_id = session['user_id']
user = User.query.filter(user_id == User.id).first()
pastes = Paste.query.filter(Paste.user_id == user_id).all()
active = []
for paste in pastes:
if is_active(paste):
active.append(paste.to_dict())
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(userid_to_red == User.id)
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return jsonify({'paste_list': active, 'username': user.username}), 200
@mod_paste.route('/<url>/embed', methods=['GET'])
def embed_code_form(url):
paste = Paste.query.filter(Paste.url == url).first()
if is_active(paste):
return render_template('embed.html', paste_text=paste.text, paste_link="http://127.0.0.1:8080/" + url)
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
# @mod_paste.route('/<url>/embed', methods=['POST'])
# def embed_code(url):
# paste = Paste.query.filter(Paste.url == url).first()
# return jsonify(paste_text = paste.text,paste_link = url)
@mod_paste.route('/<url>/embed/output', methods=['GET'])
def embed_code_disp(url):
paste = Paste.query.filter(Paste.url == url).first()
if is_active(paste):
return render_template('embed_output.html')
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
# @mod_paste.route('/paste', methods=['GET'])
# @requires_auth
# def get_all_pastes():
# # user_id = session['user_id']
# # pastes = paste.query.filter(paste.user_id == user_id).all()
# curr_id = session['user_id']
# user = User.query.filter(User.id == curr_id).first()
# paste_list = Paste.query.filter(curr_id == Paste.user_id).all()
# url_pre = "/"
# for paste in paste_list:
# paste.url = url_pre + paste.url
# if user.user_type == 1:
# return render_template('mypaste.html', paste_list=paste_list)
# return render_template('admin_mypaste.html',paste_list = paste_list)
# # return jsonify(success=True, pastes=[paste.to_dict() for paste in
# # pastes])
#
#
# @mod_paste.route('/api/paste', methods=['POST'])
# @requires_auth
# def get_all_pastes_object():
# user_id = session['user_id']
# user = User.query.filter(user_id == User.id).first()
# pastes = Paste.query.filter(Paste.user_id == user_id).all()
# active = []
# for paste in pastes:
# temp_paste = {}
# if paste.is_active():
# temp_paste['title'] = paste.title
# temp_paste['add_time']=paste.add_time
# temp_paste['expire_time']=paste.expire_time
# temp_paste['lang']=paste.lang
# temp_paste['url']=paste.url
# active.append(temp_paste)
#
# return jsonify({'paste_list':active,'username':user.username}),200
# @mod_paste.route('/paste/<id>', methods=['GET'])
# @requires_auth
# def get_paste(id):
# user_id = session['user_id']
# paste = paste.query.filter(
# Paste.id == id, Paste.user_id == user_id).first()
# if paste is None:
# return render_template("index.html"),4044
# else:
# return jsonify(success=True, paste=paste.to_dict())
# @mod_paste.route('/paste/<id>', methods=['POST'])
# @requires_auth
# def edit_paste(id):
# user_id = session['user_id']
# paste = Paste.query.filter(
# Paste.id == id, Paste.user_id == user_id).first()
# if paste is None:
# return render_template("index.html"),4044
# else:
# paste.title = request.form['title']
# paste.text = request.form['text']
# paste.color = request.form['color']
# paste.lang = request.form['lang']
# db.session.commit()
# return jsonify(success=True)
@mod_paste.route('/<url>/delete', methods=['POST'])
@requires_auth
def delete_paste(url):
user_id = session['user_id']
# print(user_id)
paste = Paste.query.filter(Paste.url == url).first()
user = User.query.filter(User.id == user_id).first()
if paste is None:
return render_template("index.html"), 404
if is_active(paste):
if paste.user_id == user_id or user.user_type == 2:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return jsonify(success=True, user_type=user.user_type), 200
else:
return jsonify(success=False), 400
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
# @mod_paste.route('/<url>', methods=['GET'])
# def display_paste(url):
# paste = Paste.query.filter(Paste.url == url).first()
# style = HtmlFormatter().get_style_defs('.highlight')
# lexer = get_lexer_by_name(paste.lang)
# formatter = HtmlFormatter(linenos=True, cssclass="highlight")
# result = highlight(paste.text, lexer, formatter)
# return render_template("view_paste.html", paste_title=paste.title,
# paste_lang=paste.lang, highlight_style=style,
@mod_paste.route('/<url>', methods=['GET'])
# paste_text=result,paste_rawdata = paste.text)
def display_paste(url):
paste = Paste.query.filter(Paste.url == url).first()
if Paste.query.filter(Paste.url == url).first() != None:
if is_active(paste):
if 'user_id' in session:
if(paste.paste_type == "1" and session['user_id'] != paste.user_id):
return render_template("index.html"), 200
user_id = session['user_id']
user = User.query.filter(User.id == user_id).first()
if user.user_type == 1:
return render_template('view_paste.html')
if user.user_type == 2:
return render_template('view_paste_admin.html')
return render_template("view_paste_guest.html")
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
else:
return render_template("index.html"), 404
@mod_paste.route('/api/<url>', methods=['POST'])
def ret_paste(url):
paste = Paste.query.filter(Paste.url == url).first()
user = User.query.filter(paste.user_id == User.id).first()
if is_active(paste):
return jsonify({'paste_owner': user.username, 'paste_text': paste.text, 'paste_title': paste.title, 'paste_lang': paste.lang, 'paste_add': paste.add_time, 'paste_expire': paste.expire_time}), 200
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
# @mod_paste.route('/<url>/add_report', methods=['POST'])
# @requires_auth
# def to_delete(url):
# paste_to_delete = Paste.query.filter(Paste.url == url).first()
# if paste_to_delete.report_count > 5:
# db.session.delete(paste_to_delete)
# else:
# paste_to_delete.report_count = paste_to_delete.report_count + 1
# db.session.commit()
# curr_id = session['user_id']
# paste_list = Paste.query.filter(Paste.user_id == curr_id).all()
# url_pre = "/"
# for paste in paste_list:
# paste.url = url_pre + paste.url
# return render_template('mypaste.html', paste_list=paste_list)
@mod_paste.route('/<url>/edit', methods=['GET'])
@requires_auth
def edit_form(url):
if 'user_id' in session:
user_id = session['user_id']
paste = Paste.query.filter(Paste.url == url).first()
if is_active(paste):
if paste.user_id == user_id:
return render_template('editpaste.html')
return jsonify(success=False, reply="Not Authorized"), 400
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
return jsonify(success=False, reply="Please Login"), 400
@mod_paste.route('/<url>/edit', methods=['POST'])
@requires_auth
def edit_paste(url):
if 'user_id' in session:
user_id = session['user_id']
paste = Paste.query.filter(Paste.url == url).first()
if not is_active(paste):
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template('index.html'), 404
if paste.user_id != user_id:
return jsonify(success=False, reply="Not Authorized"), 400
title = request.form['title']
text = request.form['text']
lang = request.form['lang']
time_form = request.form['time']
paste_type = request.form['type']
expire_time = str(time_form)
paste.title = title
paste.text = text
paste.lang = lang
paste.expire_time = expire_time
paste.paste_type = paste_type
db.session.commit()
return jsonify(success=True, url=url)
return jsonify(success=False, reply="Please Login")
@mod_paste.route('/admin/pastes', methods=['GET'])
@requires_admin
def all_pastes():
paste_list = db.session.all()
url_pre = "/"
for paste in paste_list:
if is_active(paste):
paste.url = url_pre + paste.url
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template('allpaste.html', paste_list=paste_list)
@mod_paste.route('/<username>/paste', methods=['GET'])
@requires_admin
def get_user_pastes(username):
# user_id = session['user_id']
# pastes = paste.query.filter(paste.user_id == user_id).all()
if 'user_id' in session:
return render_template('user_paste.html')
else:
return jsonify({'error': 'Please Login to Continue'}), 400
# return jsonify(success=True, pastes=[paste.to_dict() for paste in
# pastes])
@mod_paste.route('/<username>/api/paste', methods=['POST'])
#@requires_admin
def get_user_pastes_object(username):
# admin_id = session['user_id']
# admin = User.query.filter(admin_id == User.id).first()
user = User.query.filter(User.username == username).first()
pastes = Paste.query.filter(Paste.user_id == user.id).all()
active = []
for paste in pastes:
if is_active(paste):
active.append(paste.to_dict())
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return jsonify({'paste_list': active, 'username': user.username}), 200
| [((885, 913), 'flask.Blueprint', 'Blueprint', (['"""paste"""', '__name__'], {}), "('paste', __name__)\n", (894, 913), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((914, 929), 'flask_cors.CORS', 'CORS', (['mod_paste'], {}), '(mod_paste)\n', (918, 929), False, 'from flask_cors import CORS\n'), ((516, 524), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (521, 524), False, 'from functools import wraps\n'), ((1190, 1242), 'flask.render_template', 'render_template', (['"""user.html"""'], {'username': 'user.username'}), "('user.html', username=user.username)\n", (1205, 1242), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((12287, 12331), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'reply': '"""Please Login"""'}), "(success=False, reply='Please Login')\n", (12294, 12331), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((12433, 12449), 'app.db.session.all', 'db.session.all', ([], {}), '()\n', (12447, 12449), False, 'from app import db, requires_auth\n'), ((12775, 12830), 'flask.render_template', 'render_template', (['"""allpaste.html"""'], {'paste_list': 'paste_list'}), "('allpaste.html', paste_list=paste_list)\n", (12790, 12830), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((962, 993), 'dateutil.parser.parse', 'parser.parse', (['paste.expire_time'], {}), '(paste.expire_time)\n', (974, 993), False, 'from dateutil import parser\n'), ((996, 1010), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1008, 1010), False, 'from datetime import datetime\n'), ((1666, 1680), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1678, 1680), False, 'from datetime import datetime\n'), ((1693, 1705), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1703, 1705), False, 'import uuid\n'), ((1943, 1964), 'app.db.session.add', 'db.session.add', (['paste'], {}), '(paste)\n', (1957, 1964), False, 'from app import db, requires_auth\n'), ((1967, 1986), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1984, 1986), False, 'from app import db, requires_auth\n'), ((2556, 2587), 'flask.render_template', 'render_template', (['"""mypaste.html"""'], {}), "('mypaste.html')\n", (2571, 2587), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((3289, 3347), 'flask.jsonify', 'jsonify', (["{'paste_list': active, 'username': user.username}"], {}), "({'paste_list': active, 'username': user.username})\n", (3296, 3347), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((3516, 3616), 'flask.render_template', 'render_template', (['"""embed.html"""'], {'paste_text': 'paste.text', 'paste_link': "('http://127.0.0.1:8080/' + url)"}), "('embed.html', paste_text=paste.text, paste_link=\n 'http://127.0.0.1:8080/' + url)\n", (3531, 3616), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((3777, 3801), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (3794, 3801), False, 'from app import db, requires_auth\n'), ((3804, 3823), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3821, 3823), False, 'from app import db, requires_auth\n'), ((4231, 4267), 'flask.render_template', 'render_template', (['"""embed_output.html"""'], {}), "('embed_output.html')\n", (4246, 4267), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((4433, 4457), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (4450, 4457), False, 'from app import db, requires_auth\n'), ((4460, 4479), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4477, 4479), False, 'from app import db, requires_auth\n'), ((7767, 7791), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (7784, 7791), False, 'from app import db, requires_auth\n'), ((7794, 7813), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (7811, 7813), False, 'from app import db, requires_auth\n'), ((9914, 9938), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (9931, 9938), False, 'from app import db, requires_auth\n'), ((9941, 9960), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9958, 9960), False, 'from app import db, requires_auth\n'), ((11269, 11313), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'reply': '"""Please Login"""'}), "(success=False, reply='Please Login')\n", (11276, 11313), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((12219, 12238), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (12236, 12238), False, 'from app import db, requires_auth\n'), ((12248, 12278), 'flask.jsonify', 'jsonify', ([], {'success': '(True)', 'url': 'url'}), '(success=True, url=url)\n', (12255, 12278), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((13065, 13099), 'flask.render_template', 'render_template', (['"""user_paste.html"""'], {}), "('user_paste.html')\n", (13080, 13099), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((13897, 13955), 'flask.jsonify', 'jsonify', (["{'paste_list': active, 'username': user.username}"], {}), "({'paste_list': active, 'username': user.username})\n", (13904, 13955), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((1136, 1173), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == curr_id)'], {}), '(User.id == curr_id)\n', (1153, 1173), False, 'from app.user.models import User\n'), ((2045, 2066), 'flask.jsonify', 'jsonify', (["{'url': url}"], {}), "({'url': url})\n", (2052, 2066), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((2509, 2546), 'flask.render_template', 'render_template', (['"""admin_mypaste.html"""'], {}), "('admin_mypaste.html')\n", (2524, 2546), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((2604, 2650), 'flask.jsonify', 'jsonify', (["{'error': 'Please Login to Continue'}"], {}), "({'error': 'Please Login to Continue'})\n", (2611, 2650), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((2870, 2907), 'app.user.models.User.query.filter', 'User.query.filter', (['(user_id == User.id)'], {}), '(user_id == User.id)\n', (2887, 2907), False, 'from app.user.models import User\n'), ((3128, 3171), 'app.user.models.User.query.filter', 'User.query.filter', (['(userid_to_red == User.id)'], {}), '(userid_to_red == User.id)\n', (3145, 3171), False, 'from app.user.models import User\n'), ((3232, 3256), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (3249, 3256), False, 'from app import db, requires_auth\n'), ((3260, 3279), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3277, 3279), False, 'from app import db, requires_auth\n'), ((3833, 3862), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (3848, 3862), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((4489, 4518), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (4504, 4518), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((7098, 7135), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == user_id)'], {}), '(User.id == user_id)\n', (7115, 7135), False, 'from app.user.models import User\n'), ((7172, 7201), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (7187, 7201), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((7445, 7469), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (7462, 7469), False, 'from app import db, requires_auth\n'), ((7473, 7492), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (7490, 7492), False, 'from app import db, requires_auth\n'), ((7823, 7852), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (7838, 7852), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((8989, 9029), 'flask.render_template', 'render_template', (['"""view_paste_guest.html"""'], {}), "('view_paste_guest.html')\n", (9004, 9029), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((9200, 9224), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (9217, 9224), False, 'from app import db, requires_auth\n'), ((9228, 9247), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9245, 9247), False, 'from app import db, requires_auth\n'), ((9309, 9338), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (9324, 9338), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((9477, 9520), 'app.user.models.User.query.filter', 'User.query.filter', (['(paste.user_id == User.id)'], {}), '(paste.user_id == User.id)\n', (9494, 9520), False, 'from app.user.models import User\n'), ((9560, 9751), 'flask.jsonify', 'jsonify', (["{'paste_owner': user.username, 'paste_text': paste.text, 'paste_title':\n paste.title, 'paste_lang': paste.lang, 'paste_add': paste.add_time,\n 'paste_expire': paste.expire_time}"], {}), "({'paste_owner': user.username, 'paste_text': paste.text,\n 'paste_title': paste.title, 'paste_lang': paste.lang, 'paste_add':\n paste.add_time, 'paste_expire': paste.expire_time})\n", (9567, 9751), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((9970, 9999), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (9985, 9999), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((11168, 11192), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (11185, 11192), False, 'from app import db, requires_auth\n'), ((11196, 11215), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (11213, 11215), False, 'from app import db, requires_auth\n'), ((11708, 11732), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (11725, 11732), False, 'from app import db, requires_auth\n'), ((11736, 11755), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (11753, 11755), False, 'from app import db, requires_auth\n'), ((12719, 12743), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (12736, 12743), False, 'from app import db, requires_auth\n'), ((12747, 12766), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (12764, 12766), False, 'from app import db, requires_auth\n'), ((13116, 13162), 'flask.jsonify', 'jsonify', (["{'error': 'Please Login to Continue'}"], {}), "({'error': 'Please Login to Continue'})\n", (13123, 13162), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((13465, 13509), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.username == username)'], {}), '(User.username == username)\n', (13482, 13509), False, 'from app.user.models import User\n'), ((13841, 13865), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (13858, 13865), False, 'from app import db, requires_auth\n'), ((13869, 13888), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (13886, 13888), False, 'from app import db, requires_auth\n'), ((599, 645), 'flask.jsonify', 'jsonify', ([], {'message': '"""Unauthorized"""', 'success': '(False)'}), "(message='Unauthorized', success=False)\n", (606, 645), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((691, 728), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == user_id)'], {}), '(User.id == user_id)\n', (708, 728), False, 'from app.user.models import User\n'), ((774, 820), 'flask.jsonify', 'jsonify', ([], {'message': '"""Unauthorized"""', 'success': '(False)'}), "(message='Unauthorized', success=False)\n", (781, 820), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((1485, 1528), 'app.user.models.User.query.filter', 'User.query.filter', (["(User.username == 'Guest')"], {}), "(User.username == 'Guest')\n", (1502, 1528), False, 'from app.user.models import User\n'), ((1845, 1882), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == user_id)'], {}), '(User.id == user_id)\n', (1862, 1882), False, 'from app.user.models import User\n'), ((2090, 2181), 'flask.jsonify', 'jsonify', (["{'error': 'Error while creating Paste, Please check if all fields are filled'}"], {}), "({'error':\n 'Error while creating Paste, Please check if all fields are filled'})\n", (2097, 2181), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((2427, 2464), 'app.user.models.User.query.filter', 'User.query.filter', (['(curr_id == User.id)'], {}), '(curr_id == User.id)\n', (2444, 2464), False, 'from app.user.models import User\n'), ((3667, 3710), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (3684, 3710), False, 'from app.user.models import User\n'), ((4323, 4366), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (4340, 4366), False, 'from app.user.models import User\n'), ((7503, 7550), 'flask.jsonify', 'jsonify', ([], {'success': '(True)', 'user_type': 'user.user_type'}), '(success=True, user_type=user.user_type)\n', (7510, 7550), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((7574, 7596), 'flask.jsonify', 'jsonify', ([], {'success': '(False)'}), '(success=False)\n', (7581, 7596), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((7657, 7700), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (7674, 7700), False, 'from app.user.models import User\n'), ((9258, 9287), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (9273, 9287), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((9804, 9847), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (9821, 9847), False, 'from app.user.models import User\n'), ((10902, 10935), 'flask.render_template', 'render_template', (['"""editpaste.html"""'], {}), "('editpaste.html')\n", (10917, 10935), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((10946, 10992), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'reply': '"""Not Authorized"""'}), "(success=False, reply='Not Authorized')\n", (10953, 10992), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((11226, 11255), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (11241, 11255), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((11766, 11795), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (11781, 11795), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((11842, 11888), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'reply': '"""Not Authorized"""'}), "(success=False, reply='Not Authorized')\n", (11849, 11888), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((7333, 7376), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (7350, 7376), False, 'from app.user.models import User\n'), ((8863, 8897), 'flask.render_template', 'render_template', (['"""view_paste.html"""'], {}), "('view_paste.html')\n", (8878, 8897), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((8938, 8978), 'flask.render_template', 'render_template', (['"""view_paste_admin.html"""'], {}), "('view_paste_admin.html')\n", (8953, 8978), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((9088, 9131), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (9105, 9131), False, 'from app.user.models import User\n'), ((11056, 11099), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (11073, 11099), False, 'from app.user.models import User\n'), ((11596, 11639), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (11613, 11639), False, 'from app.user.models import User\n'), ((12607, 12650), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (12624, 12650), False, 'from app.user.models import User\n'), ((13729, 13772), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (13746, 13772), False, 'from app.user.models import User\n'), ((8698, 8727), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (8713, 8727), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((8777, 8814), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == user_id)'], {}), '(User.id == user_id)\n', (8794, 8814), False, 'from app.user.models import User\n')] |
Apiquet/DeepLearningFrameworkFromScratch | control_drone/run_model_on_cam.py | 798ac42aa1a05286eb148576072e015fd94dbf94 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script run neural network model on a camera live stream
"""
import argparse
import cv2
import numpy as np
import os
import time
import sys
COMMANDS = {0: "move_forward", 1: "go_down", 2: "rot_10_deg",
3: "go_up", 4: "take_off", 5: "land", 6: "idle"}
def send_command(anafi, command_id):
"""
Function to send commands to an Anafi drone in function of the command id
"""
if command_id not in COMMANDS:
raise f"Command id not in COMMANDS choices: {command_id}"
print("The following command will be sent: ", COMMANDS[command_id])
if COMMANDS[command_id] == "move_forward":
anafi.move_relative(dx=1, dy=0, dz=0, dradians=0)
if COMMANDS[command_id] == "go_down":
anafi.move_relative(dx=0, dy=0, dz=-0.5, dradians=0)
if COMMANDS[command_id] == "rot_10_deg":
anafi.move_relative(dx=0, dy=0, dz=0, dradians=0.785)
if COMMANDS[command_id] == "go_up":
anafi.move_relative(dx=0, dy=0, dz=0.5, dradians=0)
if COMMANDS[command_id] == "take_off":
anafi.safe_takeoff(5)
if COMMANDS[command_id] == "land":
anafi.safe_land(5)
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--weight_path",
required=True,
type=str,
help="Path to load weights for the model."
)
parser.add_argument(
"-a",
"--pyparrot_path",
required=True,
type=str,
help="Path to pyparrot module downloaded from amymcgovern on github."
)
parser.add_argument(
"-w",
"--img_width",
required=False,
default=28,
type=int,
help="Image width."
)
parser.add_argument(
"-n",
"--num_classes",
required=False,
default=7,
type=int,
help="Number of classes."
)
parser.add_argument(
"-c",
"--crop",
required=False,
default=None,
type=str,
help="Crop image, format: MinWidth,MaxWidth,MinHeight,MaxHeight.\
Set -1 for the unchanged ones"
)
parser.add_argument(
"-r",
"--resize",
required=False,
default=None,
type=str,
help="Resize shape, format: height,width"
)
parser.add_argument(
"-b",
"--binarize",
required=False,
default=None,
type=str,
help="To binarize images, format for thresholding: min,max"
)
parser.add_argument(
"-g",
"--gray",
required=False,
action="store_true",
help="To save 1-channel images"
)
parser.add_argument(
"-e",
"--erode",
required=False,
default=None,
type=str,
help="Erode option, format: kernel_size,iteration"
)
parser.add_argument(
"-d",
"--dilate",
required=False,
default=None,
type=str,
help="Dilate option, format: kernel_size,iteration"
)
parser.add_argument(
"-m",
"--camid",
required=False,
default=0,
type=int,
help="Camera ID, default is 0"
)
parser.add_argument(
"-t",
"--tensorflow",
required=False,
action="store_true",
help="To specify if Tensorflow model is used."
)
parser.add_argument(
"-z",
"--number_of_confimation",
required=False,
default=3,
type=int,
help="Minimum number of identical commands before sending to drone."
)
args = parser.parse_args()
"""
Drone connection
"""
sys.path.append(args.pyparrot_path)
from pyparrot.Anafi import Anafi
print("Connecting to drone...")
anafi = Anafi(drone_type="Anafi", ip_address="192.168.42.1")
success = anafi.connect(10)
print(success)
print("Sleeping few seconds...")
anafi.smart_sleep(3)
"""
Load model
"""
print("Loading model...")
input_size = args.img_width**2
num_class = args.num_classes
hidden_size = 128
if args.tensorflow:
import tensorflow as tf
model = tf.keras.models.load_model(args.weight_path)
else:
script_path = os.path.realpath(__file__)
sys.path.append(os.path.dirname(script_path) + "/../")
from homemade_framework import framework as NN
model = NN.Sequential([NN.Linear(input_size, hidden_size),
NN.LeakyReLU(), NN.BatchNorm(),
NN.Linear(hidden_size, hidden_size),
NN.LeakyReLU(), NN.BatchNorm(),
NN.Linear(hidden_size, num_class),
NN.Softmax()], NN.LossMSE())
model.load(args.weight_path)
"""
Webcam process
"""
print("Start webcam...")
cam = cv2.VideoCapture(args.camid)
ret, frame = cam.read()
min_height, max_height = 0, frame.shape[0]
min_width, max_width = 0, frame.shape[1]
print("Cam resolution: {}x{}".format(max_width, max_height))
if args.crop is not None:
res = [int(x) for x in args.crop.split(',')]
if res[0] != -1:
min_width = res[0]
if res[1] != -1:
max_width = res[1]
if res[2] != -1:
min_height = res[2]
if res[3] != -1:
max_height = res[3]
print("Image cropped to minWidth:maxWidth, minHeight:maxHeight: {}:{}\
, {},{}".format(min_width, max_width, min_height, max_height))
pause = False
imgs = []
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
if args.crop is not None:
frame = frame[min_height:max_height, min_width:max_width]
cv2.imshow("Original image", frame)
k = cv2.waitKey(1)
if k % 256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k % 256 == ord('p'):
# p pressed
if pause:
pause = False
else:
pause = True
if not pause:
if args.gray:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if args.binarize:
frame = cv2.medianBlur(frame, 5)
min_thresh, max_thresh = [int(x) for x in
args.binarize.split(',')]
ret, frame = cv2.threshold(frame, min_thresh, max_thresh,
cv2.THRESH_BINARY)
if args.erode is not None:
k_size, iteration = [int(x) for x in args.erode.split(',')]
kernel = np.ones((k_size, k_size), np.uint8)
frame = cv2.erode(frame, kernel, iterations=int(iteration))
if args.dilate is not None:
k_size, iteration = [int(x) for x in args.dilate.split(',')]
kernel = np.ones((k_size, k_size), np.uint8)
frame = cv2.dilate(frame, kernel, iterations=int(iteration))
if args.resize:
height, width = [int(size) for size in args.resize.split(',')]
frame = cv2.resize(frame, (height, width),
interpolation=cv2.INTER_AREA)
image = np.asarray(frame)/255.
cv2.imshow("Input image for the model", frame)
image = image.reshape([np.prod(image.shape)])
if len(imgs) < args.number_of_confimation:
imgs.append(image)
else:
if args.tensorflow:
results = np.argmax(model(np.asarray(imgs)), axis=1)
else:
results = NN.get_inferences(model, np.asarray(imgs))
print("Model's output on buffer: ", results)
if np.unique(results).size == 1 and\
COMMANDS[results[0]] != "idle":
send_command(anafi, results[0])
imgs = []
imgs = imgs[1:]
imgs.append(image)
time.sleep(0.3)
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| [((1221, 1246), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1244, 1246), False, 'import argparse\n'), ((3712, 3747), 'sys.path.append', 'sys.path.append', (['args.pyparrot_path'], {}), '(args.pyparrot_path)\n', (3727, 3747), False, 'import sys\n'), ((3833, 3885), 'pyparrot.Anafi.Anafi', 'Anafi', ([], {'drone_type': '"""Anafi"""', 'ip_address': '"""192.168.42.1"""'}), "(drone_type='Anafi', ip_address='192.168.42.1')\n", (3838, 3885), False, 'from pyparrot.Anafi import Anafi\n'), ((4945, 4973), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.camid'], {}), '(args.camid)\n', (4961, 4973), False, 'import cv2\n'), ((8269, 8292), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8290, 8292), False, 'import cv2\n'), ((4224, 4268), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['args.weight_path'], {}), '(args.weight_path)\n', (4250, 4268), True, 'import tensorflow as tf\n'), ((4301, 4327), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4317, 4327), False, 'import os\n'), ((5898, 5933), 'cv2.imshow', 'cv2.imshow', (['"""Original image"""', 'frame'], {}), "('Original image', frame)\n", (5908, 5933), False, 'import cv2\n'), ((5947, 5961), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5958, 5961), False, 'import cv2\n'), ((4819, 4831), 'homemade_framework.framework.LossMSE', 'NN.LossMSE', ([], {}), '()\n', (4829, 4831), True, 'from homemade_framework import framework as NN\n'), ((7482, 7528), 'cv2.imshow', 'cv2.imshow', (['"""Input image for the model"""', 'frame'], {}), "('Input image for the model', frame)\n", (7492, 7528), False, 'import cv2\n'), ((8230, 8245), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (8240, 8245), False, 'import time\n'), ((4352, 4380), 'os.path.dirname', 'os.path.dirname', (['script_path'], {}), '(script_path)\n', (4367, 4380), False, 'import os\n'), ((4477, 4511), 'homemade_framework.framework.Linear', 'NN.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (4486, 4511), True, 'from homemade_framework import framework as NN\n'), ((4544, 4558), 'homemade_framework.framework.LeakyReLU', 'NN.LeakyReLU', ([], {}), '()\n', (4556, 4558), True, 'from homemade_framework import framework as NN\n'), ((4560, 4574), 'homemade_framework.framework.BatchNorm', 'NN.BatchNorm', ([], {}), '()\n', (4572, 4574), True, 'from homemade_framework import framework as NN\n'), ((4607, 4642), 'homemade_framework.framework.Linear', 'NN.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (4616, 4642), True, 'from homemade_framework import framework as NN\n'), ((4675, 4689), 'homemade_framework.framework.LeakyReLU', 'NN.LeakyReLU', ([], {}), '()\n', (4687, 4689), True, 'from homemade_framework import framework as NN\n'), ((4691, 4705), 'homemade_framework.framework.BatchNorm', 'NN.BatchNorm', ([], {}), '()\n', (4703, 4705), True, 'from homemade_framework import framework as NN\n'), ((4738, 4771), 'homemade_framework.framework.Linear', 'NN.Linear', (['hidden_size', 'num_class'], {}), '(hidden_size, num_class)\n', (4747, 4771), True, 'from homemade_framework import framework as NN\n'), ((4804, 4816), 'homemade_framework.framework.Softmax', 'NN.Softmax', ([], {}), '()\n', (4814, 4816), True, 'from homemade_framework import framework as NN\n'), ((6306, 6345), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (6318, 6345), False, 'import cv2\n'), ((6400, 6424), 'cv2.medianBlur', 'cv2.medianBlur', (['frame', '(5)'], {}), '(frame, 5)\n', (6414, 6424), False, 'import cv2\n'), ((6580, 6643), 'cv2.threshold', 'cv2.threshold', (['frame', 'min_thresh', 'max_thresh', 'cv2.THRESH_BINARY'], {}), '(frame, min_thresh, max_thresh, cv2.THRESH_BINARY)\n', (6593, 6643), False, 'import cv2\n'), ((6827, 6862), 'numpy.ones', 'np.ones', (['(k_size, k_size)', 'np.uint8'], {}), '((k_size, k_size), np.uint8)\n', (6834, 6862), True, 'import numpy as np\n'), ((7081, 7116), 'numpy.ones', 'np.ones', (['(k_size, k_size)', 'np.uint8'], {}), '((k_size, k_size), np.uint8)\n', (7088, 7116), True, 'import numpy as np\n'), ((7326, 7390), 'cv2.resize', 'cv2.resize', (['frame', '(height, width)'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, (height, width), interpolation=cv2.INTER_AREA)\n', (7336, 7390), False, 'import cv2\n'), ((7447, 7464), 'numpy.asarray', 'np.asarray', (['frame'], {}), '(frame)\n', (7457, 7464), True, 'import numpy as np\n'), ((7564, 7584), 'numpy.prod', 'np.prod', (['image.shape'], {}), '(image.shape)\n', (7571, 7584), True, 'import numpy as np\n'), ((7881, 7897), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (7891, 7897), True, 'import numpy as np\n'), ((7777, 7793), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (7787, 7793), True, 'import numpy as np\n'), ((7979, 7997), 'numpy.unique', 'np.unique', (['results'], {}), '(results)\n', (7988, 7997), True, 'import numpy as np\n')] |
rmsare/cs231a-project | classify_images.py | 91776ada3512d3805de0e66940c9f1c5b3c4c641 | """
Classification of pixels in images using color and other features.
General pipeline usage:
1. Load and segment images (img_utils.py)
2. Prepare training data (label_image.py)
3. Train classifier or cluster data (sklearn KMeans, MeanShift, SVC, etc.)
4. Predict labels on new image or directory (classify_directory())
5. Apply classification to 3D points and estimate ground plane orientation (process_pointcloud.py)
Project uses the following directory structure:
images/ - contains binary files of numpy arrays corresponding to survey images and segmentations
labelled/ - contains labelled ground truth images or training data
results/ - contains results of classification
I store randomly split training and testing images in test/ and train/ directories.
Author: Robert Sare
E-mail: [email protected]
Date: 8 June 2017
"""
import numpy as np
import matplotlib.pyplot as plt
import skimage.color, skimage.io
from skimage.segmentation import mark_boundaries
from sklearn.svm import SVC
from sklearn.cluster import KMeans, MeanShift
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
import os, fnmatch
def classify_directory(classifier, test_dir, train_dir='train/'):
"""
Classify all images in a directory using an arbitrary sklearn classifier.
Saves results to results/ directory.
"""
# XXX: This is here if the classifier needs to be trained from scratch
#print("Preparing training data...")
#n_samples = 1000
#train_data, train_labels = load_training_images(train_dir, n_samples)
#
#print("Training classifier...")
#classifier = ImageSVC()
#classifier.fit(train_data, train_labels)
files = os.listdir(test_dir)
for f in files:
image = skimage.io.imread(f)
height, width, depth = image.shape
print("Predicting labels for " + f.strip('.JPG') + ".jpg")
features = compute_colorxy_features(image)
features /= features.max(axis=0)
pred_labels = classifier.predict(features)
print("Saving predictions for " + f.strip('.JPG') + ".jpg")
plt.figure()
plt.imshow(image)
plt.imshow(pred_labels.reshape((height, width)), alpha=0.5, vmin=0, vmax=2)
plt.show(block=False)
plt.savefig('results/' + f.strip('.JPG') + '_svm_pred.png')
plt.close()
np.save('results/' + f.strip('.JPG') + 'svm.npy', pred_labels.reshape((height,width)))
def compute_colorxy_features(image):
"""
Extract and normalize color and pixel location features from image data
"""
height, width, depth = image.shape
colors = skimage.color.rgb2lab(image.reshape((height*width, depth))
X, Y = np.meshgrid(np.arange(height), np.arange(width))
xy = np.hstack([X.reshape((height*width, 1)), Y.reshape((height*width, 1))])
colorxy = np.hstack([xy, colors])
colorxy /= colorxy.max(axis=0)
return colorxy
def load_ground_truth(filename):
"""
Load ground truth or training image array and redefine labelling for nice
default colors
"""
truth = np.load(filename)
# Change labels for nice default colorscale when plotted
truth = truth - 1
truth[truth == -1] = 0
truth[truth == 0] = 5
truth[truth == 2] = 0
truth[truth == 5] = 2
return truth
def load_image_labels(name):
"""
Load image and labels from previous labelling session
"""
fname = 'images/' + name + '_image.npy'
image = np.load(fname)
fname = 'labelled/' + name + '_labels.npy'
labels = np.load(fname)
return image, labels
def plot_class_image(image, segments, labels):
"""
Display image with segments and class label overlay
"""
plt.figure()
plt.subplot(1,2,1)
plt.imshow(mark_boundaries(image, segments, color=(1,0,0), mode='thick'))
plt.title('segmented image')
plt.subplot(1,2,2)
plt.imshow(image)
plt.imshow(labels, alpha=0.75)
cb = plt.colorbar(orientation='horizontal', shrink=0.5)
plt.title('predicted class labels')
plt.show(block=False)
def load_training_images(train_dir, n_samples=1000, n_features=3):
"""
Load training images from directory and subsample for training or validation
"""
train_data = np.empty((0, n_features))
train_labels = np.empty(0)
files = os.listdir(train_dir)
for f in files:
name = parse_filename(f)
image, labels = load_image_labels(name)
ht, wid, depth = image.shape
train_data = np.append(train_data,
compute_color_features(image), axis=0)
train_labels = np.append(train_labels,
labels.reshape(wid*ht, 1).ravel())
train_data, train_labels = shuffle(train_data, train_labels,
random_state=0, n_samples=n_samples)
return train_data, train_labels
def save_prediction(name, pred_labels):
"""
Save predicted class labels
"""
np.save('results/' + name + '_pred', pred_labels)
if __name__ == "__main__":
# Load training data
train_dir = 'train/'
test_dir = 'test/'
train_data, train_labels = load_training_data(train_dir)
# Train classifier
clf = SVC()
clf.fit(train_data, train_labels)
# Predict labels for test images
classify_directory(clf, test_dir)
| [] |
ramild/allennlp-guide | quick_start/my_text_classifier/predictors/sentence_classifier_predictor.py | 4cff916e7bc4629184bc70594e213ef56e14ec70 | from allennlp.common import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.predictors import Predictor
from overrides import overrides
@Predictor.register("sentence_classifier")
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence)
| [((198, 239), 'allennlp.predictors.Predictor.register', 'Predictor.register', (['"""sentence_classifier"""'], {}), "('sentence_classifier')\n", (216, 239), False, 'from allennlp.predictors import Predictor\n')] |
j-danner/autoguess | ciphers/SKINNY-TK2/SKINNY-TK2/skinnytk2.py | 712a8dcfb259a277b2b2a499bd7c5fc4aab97b67 | # Created on Sep 7, 2020
# author: Hosein Hadipour
# contact: [email protected]
import os
output_dir = os.path.curdir
def skinnytk2(R=1):
"""
This function generates the relations of Skinny-n-n for R rounds.
tk ================================================> TWEAKEY_P(tk) ===> ---
SB AC | P MC SB AC |
x_0 ===> x_0 ===> x_0 ===> + ===> y_0 ===> P(y_0) ===> x_1 ===> x_1 ===> x_1 ===> + ===> y_1 ===> ---
"""
cipher_name = 'skinnytk2'
P = [0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12]
TKP = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7]
tk1 = ['tk1_%d' % i for i in range(16)]
tk2 = ['tk2_%d' % i for i in range(16)]
# 1 round
# recommended_mg = 8
# recommended_ms = 4
# 2 rounds
# recommended_mg = 16
# recommended_ms = 8
# 3 rounds
# recommended_mg = 19
# recommended_ms = 24
# 4 rounds
# recommended_mg = 21
# recommended_ms = 27
# 5 rounds
# recommended_mg = 22
# recommended_ms = 35
# 6 rounds
# recommended_mg = 25
# recommended_ms = 40
# 7 rounds
# recommended_mg = 26
# recommended_ms = 70
# 8 rounds
# recommended_mg = 28
# recommended_ms = 80
# 9 rounds
# recommended_mg = 28
# recommended_ms = 100
# 10 rounds
recommended_mg = 30
recommended_ms = 100
# 11 rounds
# recommended_mg = 31
# recommended_ms = 100
eqs = '#%s %d Rounds\n' % (cipher_name, R)
eqs += 'connection relations\n'
for r in range(R):
xin = ['x_%d_%d' % (r, i) for i in range(16)]
xout = ['x_%d_%d' % (r + 1, i) for i in range(16)]
y = ['y_%d_%d' % (r, i) for i in range(16)]
tk = ['tk_%d_%d' % (r, i) for i in range(8)]
# Generaete AddTweakey relations
for i in range(4):
for j in range(4):
if i < 2:
eqs += '%s, %s, %s\n' % (tk1[j + 4*i], tk2[j + 4*i], tk[j + 4*i])
eqs += '%s, %s, %s\n' % (xin[j + 4*i], tk[j + 4*i], y[j + 4*i])
else:
eqs += '%s, %s\n' % (xin[j + 4*i], y[j + 4*i])
# Apply ShiftRows
py = [y[P[i]] for i in range(16)]
# Generate MixColumn relations
for j in range(4):
eqs += '%s, %s, %s, %s\n' % (py[j + 0*4], py[j + 2*4], py[j + 3*4], xout[j + 0*4])
eqs += '%s, %s\n' % (py[j], xout[j + 1*4])
eqs += '%s, %s, %s\n' % (py[j + 1*4], py[j + 2*4], xout[j + 2*4])
eqs += '%s, %s, %s\n' % (py[j + 0*4], py[j + 2*4], xout[j + 3*4])
# Update Tweakey
temp1 = tk1.copy()
temp2 = tk2.copy()
tk1 = [temp1[TKP[i]] for i in range(16)]
tk2 = [temp2[TKP[i]] for i in range(16)]
plaintext = ['x_0_%d' % i for i in range(16)]
ciphertext = ['x_%d_%d' % (R, i) for i in range(16)]
eqs += 'known\n' + '\n'.join(plaintext + ciphertext)
eqs += '\nend'
relation_file_path = os.path.join(output_dir, 'relationfile_%s_%dr_mg%d_ms%d.txt' % (cipher_name, R, recommended_mg, recommended_ms))
with open(relation_file_path, 'w') as relation_file:
relation_file.write(eqs)
def main():
skinnytk2(R=10)
if __name__ == '__main__':
main()
| [((3078, 3194), 'os.path.join', 'os.path.join', (['output_dir', "('relationfile_%s_%dr_mg%d_ms%d.txt' % (cipher_name, R, recommended_mg,\n recommended_ms))"], {}), "(output_dir, 'relationfile_%s_%dr_mg%d_ms%d.txt' % (cipher_name,\n R, recommended_mg, recommended_ms))\n", (3090, 3194), False, 'import os\n')] |
visr/bmi-python | tests/test_bmipy.py | 0fcca448d097bc001f7492094ce1fd95d041b81d | import pytest
from bmipy import Bmi
class EmptyBmi(Bmi):
def __init__(self):
pass
def initialize(self, config_file):
pass
def update(self):
pass
def update_until(self, then):
pass
def finalize(self):
pass
def get_var_type(self, var_name):
pass
def get_var_units(self, var_name):
pass
def get_var_nbytes(self, var_name):
pass
def get_var_itemsize(self, name):
pass
def get_var_location(self, name):
pass
def get_var_grid(self, var_name):
pass
def get_grid_rank(self, grid_id):
pass
def get_grid_size(self, grid_id):
pass
def get_value_ptr(self, var_name):
pass
def get_value(self, var_name):
pass
def get_value_at_indices(self, var_name, indices):
pass
def set_value(self, var_name, src):
pass
def set_value_at_indices(self, var_name, src, indices):
pass
def get_component_name(self):
pass
def get_input_item_count(self):
pass
def get_output_item_count(self):
pass
def get_input_var_names(self):
pass
def get_output_var_names(self):
pass
def get_grid_shape(self, grid_id):
pass
def get_grid_spacing(self, grid_id):
pass
def get_grid_origin(self, grid_id):
pass
def get_grid_type(self, grid_id):
pass
def get_start_time(self):
pass
def get_end_time(self):
pass
def get_current_time(self):
pass
def get_time_step(self):
pass
def get_time_units(self):
pass
def get_grid_edge_count(self, grid):
pass
def get_grid_edge_nodes(self, grid, edge_nodes):
pass
def get_grid_face_count(self, grid):
pass
def get_grid_face_nodes(self, grid, face_nodes):
pass
def get_grid_face_edges(self, grid, face_edges):
pass
def get_grid_node_count(self, grid):
pass
def get_grid_nodes_per_face(self, grid, nodes_per_face):
pass
def get_grid_x(self, grid, x):
pass
def get_grid_y(self, grid, y):
pass
def get_grid_z(self, grid, z):
pass
def test_bmi_not_implemented():
class MyBmi(Bmi):
pass
with pytest.raises(TypeError):
Bmi()
def test_bmi_implemented():
assert isinstance(EmptyBmi(), Bmi)
| [((2338, 2362), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2351, 2362), False, 'import pytest\n'), ((2372, 2377), 'bmipy.Bmi', 'Bmi', ([], {}), '()\n', (2375, 2377), False, 'from bmipy import Bmi\n')] |
Sphynx-HenryAY/scrapy-compose | scrapy_compose/fields/parser/string_field.py | bac45ee51bf4a49b3d4a9902767a17072137f869 |
from scrapy_compose.utils.context import realize
from .field import FuncField as BaseField
class StringField( BaseField ):
process_timing = [ "post_pack" ]
def __init__( self, key = None, value = None, selector = None, **kwargs ):
#unify value format
if isinstance( value, str ):
value = { "_type": "string", "value": value }
super( StringField, self ).__init__( key = key, value = value, selector = selector, **kwargs )
def make_field( self, selector, key = None, value = None, **kwargs ):
return { realize( selector, key ): self.post_pack( realize( selector, value ) ) }
| [((519, 541), 'scrapy_compose.utils.context.realize', 'realize', (['selector', 'key'], {}), '(selector, key)\n', (526, 541), False, 'from scrapy_compose.utils.context import realize\n'), ((561, 585), 'scrapy_compose.utils.context.realize', 'realize', (['selector', 'value'], {}), '(selector, value)\n', (568, 585), False, 'from scrapy_compose.utils.context import realize\n')] |
vincentmuya/News-highlight | app/request.py | 67f61bb0bea69ec004c11a2148c62cd892a19615 | import urllib.request
import json
from .models import News
# Getting api key
api_key = None
# Getting the movie base url
base_url = None
def configure_request(app):
global api_key,base_url
api_key = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_API_BASE_URL']
def get_news_source(country,category):
'''
Function that gets the json response to our url request
'''
get_news_source_url = base_url.format(country,category,api_key)
with urllib.request.urlopen(get_news_source_url)as url:
get_news_source_data = url.read()
get_news_source_response = json.loads(get_news_source_data)
print(get_news_source_response)
source_result = None
if get_news_source_response['articles']:
source_result_list = get_news_source_response['articles']
source_result = process_result(source_result_list)
return source_result
def process_result(source_list):
'''
this function processes the results and converts them into a list
the source list is a list of dictionaries containing news results
'''
source_result= []
for source_item in source_list:
source = source_item.get('source')
author = source_item.get('author')
title = source_item.get('title')
description = source_item.get('description')
url = source_item.get('url')
urlToImage = source_item.get('urlToImage')
publishedAt = source_item.get('publishedAt')
if urlToImage:
source_object = News(source,author,title,description,url,urlToImage,publishedAt)
source_result.append(source_object)
return source_result
def get_news(source):
get_news_details_url = base_url.format(source,api_key)
with urllib.request.urlopen(get_news_details_url) as url:
news_details_data = url.read()
news_details_response = json.loads(news_details_data)
news_object = None
if news_details_response:
source = news_details_response.get('source')
author = news_details_response.get('original_author')
title = news_details_response.get('title')
description = news_details_response.get('description')
url = news_details_response.get('url')
urlToImage = news_details_response.get('urlToImage')
news_object = news(source,author,title,description,url,urlToImage,publishedAt)
return news_object
| [((604, 636), 'json.loads', 'json.loads', (['get_news_source_data'], {}), '(get_news_source_data)\n', (614, 636), False, 'import json\n'), ((1880, 1909), 'json.loads', 'json.loads', (['news_details_data'], {}), '(news_details_data)\n', (1890, 1909), False, 'import json\n')] |
christian-jacobsen/hypernet | hypernet/src/thermophysicalModels/reactionThermo/mixture/multiComponent.py | 9f62e1531eb152cc08af0b0c6b09d6fde8d42400 | import numpy as np
from hypernet.src.general import const
from hypernet.src.general import utils
from hypernet.src.thermophysicalModels.reactionThermo.mixture import Basic
class MultiComponent(Basic):
# Initialization
###########################################################################
def __init__(
self,
specieThermos,
*args,
**kwargs
):
super(MultiComponent, self).__init__(specieThermos)
# Methods
###########################################################################
# Mixture properties ------------------------------------------------------
def update(self, XY, var='Y'):
# Update mass/molar fractions
for name, value in XY.items():
value = utils.check_XY(utils.convert_to_array(value))
setattr(self.spTh[name].specie, var, value)
# Update mixture/species properties
self.M = self.M_(var=var)
if var == 'Y':
self.Xi_()
elif var == 'X':
self.Yi_()
self.R = self.R_()
# Mixture properties ------------------------------------------------------
# Mass
def M_(self, var='Y'):
# [kg/mol]
if var == 'Y':
M = [spTh.specie.Y / spTh.specie.M for spTh in self.spTh.values()]
return 1./np.sum(np.concatenate(M))
elif var == 'X':
M = [spTh.specie.X * spTh.specie.M for spTh in self.spTh.values()]
return np.sum(np.concatenate(M))
# Specific gas constant
def R_(self):
R = [spTh.specie.Y * spTh.specie.R for spTh in self.spTh.values()]
return np.sum(np.concatenate(R))
# Pressure
def p_(self, rho, T):
return rho*self.R*T
# Density
def rho_(self, p, T):
return p/(self.R*T)
# Number density
def n_(self, rho):
self.ni_(rho=rho, var='Y')
n = [spTh.specie.n for spTh in self.spTh.values()]
return np.sum(np.concatenate(n))
# Enthalpy/Energy
def he_(self):
# [J/kg]
he = [spTh.specie.Y * spTh.thermo.he for spTh in self.spTh.values()]
return np.sum(np.concatenate(he))
def cpv_(self):
# [J/(kg K)]
cpv = [spTh.specie.Y * spTh.thermo.cpv for spTh in self.spTh.values()]
return np.sum(np.concatenate(cpv))
def dcpvdT_(self):
# [J/kg]
dcpvdT = [
spTh.specie.Y * spTh.thermo.dcpvdT for spTh in self.spTh.values()
]
return np.sum(np.concatenate(dcpvdT))
def dhedY_(self, dY):
# [J/kg]
dhedY = [
np.sum(dY[name] * spTh.thermo.he) \
for name, spTh in self.spTh.items()
]
return np.sum(dhedY)
# Species properties ------------------------------------------------------
def Yi_(self):
for spTh_ in self.spTh.values():
sp = spTh_.specie
sp.Y = sp.X * sp.M / self.M
def Xi_(self):
for spTh_ in self.spTh.values():
sp = spTh_.specie
sp.X = sp.Y * self.M / sp.M
def ni_(self, rho=None, n=None, var='Y'):
for spTh_ in self.spTh.values():
sp = spTh_.specie
if var == 'Y':
sp.n = sp.Y * rho / sp.M * const.UNA
elif var == 'X':
sp.n = sp.X * n
def rhoi_(self, rho=None, n=None, var='Y'):
for spTh_ in self.spTh.values():
sp = spTh_.specie
if var == 'Y':
sp.rho = sp.Y * rho
elif var == 'X':
sp.rho = sp.X * n * sp.M / const.UNA
| [((2710, 2723), 'numpy.sum', 'np.sum', (['dhedY'], {}), '(dhedY)\n', (2716, 2723), True, 'import numpy as np\n'), ((1649, 1666), 'numpy.concatenate', 'np.concatenate', (['R'], {}), '(R)\n', (1663, 1666), True, 'import numpy as np\n'), ((1968, 1985), 'numpy.concatenate', 'np.concatenate', (['n'], {}), '(n)\n', (1982, 1985), True, 'import numpy as np\n'), ((2145, 2163), 'numpy.concatenate', 'np.concatenate', (['he'], {}), '(he)\n', (2159, 2163), True, 'import numpy as np\n'), ((2308, 2327), 'numpy.concatenate', 'np.concatenate', (['cpv'], {}), '(cpv)\n', (2322, 2327), True, 'import numpy as np\n'), ((2499, 2521), 'numpy.concatenate', 'np.concatenate', (['dcpvdT'], {}), '(dcpvdT)\n', (2513, 2521), True, 'import numpy as np\n'), ((2597, 2630), 'numpy.sum', 'np.sum', (['(dY[name] * spTh.thermo.he)'], {}), '(dY[name] * spTh.thermo.he)\n', (2603, 2630), True, 'import numpy as np\n'), ((782, 811), 'hypernet.src.general.utils.convert_to_array', 'utils.convert_to_array', (['value'], {}), '(value)\n', (804, 811), False, 'from hypernet.src.general import utils\n'), ((1337, 1354), 'numpy.concatenate', 'np.concatenate', (['M'], {}), '(M)\n', (1351, 1354), True, 'import numpy as np\n'), ((1486, 1503), 'numpy.concatenate', 'np.concatenate', (['M'], {}), '(M)\n', (1500, 1503), True, 'import numpy as np\n')] |
Roger-Takeshita/Software_Engineer | Exercises/W08D04_Exercise_01_Django_Cat_Collector/main_app/models.py | ec647bb969aa02453dae1884b5787d2045f7b4e2 | from django.db import models
from django.urls import reverse
from datetime import date
from django.contrib.auth.models import User #! 1 - Import user models
MEALS = (
('B', 'Breakfast'),
('L', 'Lunch'),
('D', 'Dinner')
)
class Toy(models.Model):
name = models.CharField(max_length=50)
color = models.CharField(max_length=20)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('toys_detail', kwargs={'pk': self.id})
class Cat(models.Model):
name = models.CharField(max_length=100)
breed = models.CharField(max_length=100)
description = models.TextField(max_length=250)
age = models.IntegerField()
toys = models.ManyToManyField(Toy)
user = models.ForeignKey(User, on_delete=models.CASCADE) #+ 1.1 - Add user as ForeignKey (URL mapping, we use this to point to our class based views)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('detail', kwargs={'cat_id': self.id})
def fed_for_today(self):
return self.feeding_set.filter(date=date.today()).count() >= len(MEALS)
class Feeding(models.Model):
date = models.DateField('feeding date')
meal = models.CharField(
max_length=1,
choices=MEALS,
default=MEALS[0][0]
)
cat = models.ForeignKey(Cat, on_delete=models.CASCADE)
def __str__(self):
# Nice method for obtaining the friendly value of a Field.choice
return f"{self.get_meal_display()} on {self.date}"
# change the default sort
class Meta:
ordering = ['-date']
class Photo(models.Model):
url = models.CharField(max_length=200)
cat = models.ForeignKey(Cat, on_delete=models.CASCADE)
def __str__(self):
return f"Photo for cat_id: {self.cat_id} @{self.url}" | [((291, 322), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (307, 322), False, 'from django.db import models\n'), ((333, 364), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (349, 364), False, 'from django.db import models\n'), ((532, 564), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (548, 564), False, 'from django.db import models\n'), ((575, 607), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (591, 607), False, 'from django.db import models\n'), ((624, 656), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (640, 656), False, 'from django.db import models\n'), ((665, 686), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (684, 686), False, 'from django.db import models\n'), ((696, 723), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Toy'], {}), '(Toy)\n', (718, 723), False, 'from django.db import models\n'), ((733, 782), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (750, 782), False, 'from django.db import models\n'), ((1159, 1191), 'django.db.models.DateField', 'models.DateField', (['"""feeding date"""'], {}), "('feeding date')\n", (1175, 1191), False, 'from django.db import models\n'), ((1201, 1267), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'MEALS', 'default': 'MEALS[0][0]'}), '(max_length=1, choices=MEALS, default=MEALS[0][0])\n', (1217, 1267), False, 'from django.db import models\n'), ((1292, 1340), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Cat'], {'on_delete': 'models.CASCADE'}), '(Cat, on_delete=models.CASCADE)\n', (1309, 1340), False, 'from django.db import models\n'), ((1591, 1623), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1607, 1623), False, 'from django.db import models\n'), ((1632, 1680), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Cat'], {'on_delete': 'models.CASCADE'}), '(Cat, on_delete=models.CASCADE)\n', (1649, 1680), False, 'from django.db import models\n'), ((450, 496), 'django.urls.reverse', 'reverse', (['"""toys_detail"""'], {'kwargs': "{'pk': self.id}"}), "('toys_detail', kwargs={'pk': self.id})\n", (457, 496), False, 'from django.urls import reverse\n'), ((970, 1015), 'django.urls.reverse', 'reverse', (['"""detail"""'], {'kwargs': "{'cat_id': self.id}"}), "('detail', kwargs={'cat_id': self.id})\n", (977, 1015), False, 'from django.urls import reverse\n'), ((1084, 1096), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1094, 1096), False, 'from datetime import date\n')] |
kedro-org/kedro-plugins | kedro-airflow/kedro_airflow/__init__.py | ad0755f503b275b73aeb8feb592a0ec0ea1bca8e | """ Kedro plugin for running a project with Airflow """
__version__ = "0.5.0"
| [] |
AniruddhaG123/robocup-software | soccer/gameplay/plays/testing/debug_window_evaluator.py | 0eb3b3957428894f2f39341594800be803665f44 | import play
import behavior
import main
import robocup
import constants
import time
import math
## This isn't a real play, but it's pretty useful
# Turn it on and we'll draw the window evaluator stuff on-screen from the ball to our goal
class DebugWindowEvaluator(play.Play):
def __init__(self):
super().__init__(continuous=True)
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running, lambda: True,
'immediately')
def execute_running(self):
win_eval = robocup.WindowEvaluator(main.context())
win_eval.debug = True
windows, best = win_eval.eval_pt_to_our_goal(main.ball().pos)
| [((597, 611), 'main.context', 'main.context', ([], {}), '()\n', (609, 611), False, 'import main\n'), ((696, 707), 'main.ball', 'main.ball', ([], {}), '()\n', (705, 707), False, 'import main\n')] |
zkbt/rainbow-connection | rainbowconnection/sources/phoenix/utils.py | 53828fd0b63a552a22a6aa38393cefda27c61b9a | from ...imports import *
def stringify_metallicity(Z):
"""
Convert a metallicity into a PHOENIX-style string.
Parameters
----------
Z : float
[Fe/H]-style metallicity (= 0.0 for solar)
"""
if Z <= 0:
return "-{:03.1f}".format(np.abs(Z))
else:
return "+{:03.1f}".format(Z)
| [] |
awfssv/ShiPanE-Python-SDK | shipane_sdk/transaction.py | 678790e5eb220cf685e5f8d03ba3310f3fbb8d22 | # -*- coding: utf-8 -*-
class Transaction(object):
def __init__(self, **kwargs):
self._completed_at = kwargs.get('completed_at')
self._type = kwargs.get('type')
self._symbol = kwargs.get('symbol')
self._price = kwargs.get('price')
self._amount = kwargs.get('amount')
def __eq__(self, other):
if self.completed_at != other.completed_at:
return False
if self.type != other.type:
return False
if self.symbol != other.symbol:
return False
if self.price != other.price:
return False
if self.amount != other.amount:
return False
return True
def get_cn_type(self):
return u'买入' if self.type == 'BUY' else u'卖出'
@property
def completed_at(self):
return self._completed_at
@completed_at.setter
def completed_at(self, value):
self._completed_at = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def symbol(self):
return self._symbol
@symbol.setter
def symbol(self, value):
self._symbol = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
| [] |
wataruhashimoto52/probability | tensorflow_probability/python/distributions/laplace_test.py | 12e3f256544eadea6e863868da825614f4423eb0 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats as sp_stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
@test_util.test_all_tf_execution_regimes
class LaplaceTest(test_util.TestCase):
def testLaplaceShape(self):
loc = tf.constant([3.0] * 5)
scale = tf.constant(11.0)
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
self.assertEqual(self.evaluate(laplace.batch_shape_tensor()), (5,))
self.assertEqual(laplace.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(laplace.event_shape_tensor()), [])
self.assertEqual(laplace.event_shape, tf.TensorShape([]))
def testLaplaceLogPDF(self):
batch_size = 6
loc = tf.constant([2.0] * batch_size)
scale = tf.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
log_pdf = laplace.log_prob(x)
self.assertEqual(log_pdf.shape, (6,))
expected_log_pdf = sp_stats.laplace.logpdf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(log_pdf), expected_log_pdf)
pdf = laplace.prob(x)
self.assertEqual(pdf.shape, (6,))
self.assertAllClose(self.evaluate(pdf), np.exp(expected_log_pdf))
def testLaplaceLogPDFMultidimensional(self):
batch_size = 6
loc = tf.constant([[2.0, 4.0]] * batch_size)
scale = tf.constant([[3.0, 4.0]] * batch_size)
loc_v = np.array([2.0, 4.0])
scale_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
log_pdf = laplace.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.shape, (6, 2))
pdf = laplace.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.shape, (6, 2))
expected_log_pdf = sp_stats.laplace.logpdf(x, loc_v, scale=scale_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testLaplaceLogPDFMultidimensionalBroadcasting(self):
batch_size = 6
loc = tf.constant([[2.0, 4.0]] * batch_size)
scale = tf.constant(3.0)
loc_v = np.array([2.0, 4.0])
scale_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
log_pdf = laplace.log_prob(x)
log_pdf_values = self.evaluate(log_pdf)
self.assertEqual(log_pdf.shape, (6, 2))
pdf = laplace.prob(x)
pdf_values = self.evaluate(pdf)
self.assertEqual(pdf.shape, (6, 2))
expected_log_pdf = sp_stats.laplace.logpdf(x, loc_v, scale=scale_v)
self.assertAllClose(log_pdf_values, expected_log_pdf)
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testLaplaceCDF(self):
batch_size = 6
loc = tf.constant([2.0] * batch_size)
scale = tf.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
cdf = laplace.cdf(x)
self.assertEqual(cdf.shape, (6,))
expected_cdf = sp_stats.laplace.cdf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testLaplaceLogCDF(self):
batch_size = 6
loc = tf.constant([2.0] * batch_size)
scale = tf.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
cdf = laplace.log_cdf(x)
self.assertEqual(cdf.shape, (6,))
expected_cdf = sp_stats.laplace.logcdf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(cdf), expected_cdf)
def testLaplaceQuantile(self):
qs = self.evaluate(
tf.concat(
[[0., 1],
samplers.uniform([10], minval=.1, maxval=.9,
seed=test_util.test_seed())],
axis=0))
d = tfd.Laplace(loc=1., scale=1.3, validate_args=True)
vals = d.quantile(qs)
self.assertAllClose([-np.inf, np.inf], vals[:2])
self.assertAllClose(qs[2:], d.cdf(vals[2:]))
def testLaplaceLogSurvivalFunction(self):
batch_size = 6
loc = tf.constant([2.0] * batch_size)
scale = tf.constant([3.0] * batch_size)
loc_v = 2.0
scale_v = 3.0
x = np.array([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32)
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
sf = laplace.log_survival_function(x)
self.assertEqual(sf.shape, (6,))
expected_sf = sp_stats.laplace.logsf(x, loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(sf), expected_sf)
def testLaplaceMean(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
self.assertEqual(laplace.mean().shape, (3,))
expected_means = sp_stats.laplace.mean(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.mean()), expected_means)
def testLaplaceMode(self):
loc_v = np.array([0.5, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
self.assertEqual(laplace.mode().shape, (3,))
self.assertAllClose(self.evaluate(laplace.mode()), loc_v)
def testLaplaceVariance(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
self.assertEqual(laplace.variance().shape, (3,))
expected_variances = sp_stats.laplace.var(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.variance()), expected_variances)
def testLaplaceStd(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
self.assertEqual(laplace.stddev().shape, (3,))
expected_stddev = sp_stats.laplace.std(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.stddev()), expected_stddev)
def testLaplaceEntropy(self):
loc_v = np.array([1.0, 3.0, 2.5])
scale_v = np.array([1.0, 4.0, 5.0])
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
self.assertEqual(laplace.entropy().shape, (3,))
expected_entropy = sp_stats.laplace.entropy(loc_v, scale=scale_v)
self.assertAllClose(self.evaluate(laplace.entropy()), expected_entropy)
def testLaplaceSample(self):
loc_v = 4.0
scale_v = 3.0
loc = tf.constant(loc_v)
scale = tf.constant(scale_v)
n = 100000
laplace = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
samples = laplace.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
sp_stats.laplace.mean(loc_v, scale=scale_v),
rtol=0.05,
atol=0.)
self.assertAllClose(
sample_values.var(),
sp_stats.laplace.var(loc_v, scale=scale_v),
rtol=0.05,
atol=0.)
self.assertTrue(self._kstest(loc_v, scale_v, sample_values))
def testLaplaceFullyReparameterized(self):
loc = tf.constant(4.0)
scale = tf.constant(3.0)
_, [grad_loc, grad_scale] = tfp.math.value_and_gradient(
lambda l, s: tfd.Laplace(loc=l, scale=s, validate_args=True).sample( # pylint: disable=g-long-lambda
100, seed=test_util.test_seed()), [loc, scale])
self.assertIsNotNone(grad_loc)
self.assertIsNotNone(grad_scale)
def testLaplaceSampleMultiDimensional(self):
loc_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
scale_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
laplace = tfd.Laplace(loc=loc_v, scale=scale_v, validate_args=True)
n = 10000
samples = laplace.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(samples.shape, (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(loc_v + scale_v) # 10 x 100
loc_bc = loc_v + zeros
scale_bc = scale_v + zeros
self.assertAllClose(
sample_values.mean(axis=0),
sp_stats.laplace.mean(loc_bc, scale=scale_bc),
rtol=0.35,
atol=0.)
self.assertAllClose(
sample_values.var(axis=0),
sp_stats.laplace.var(loc_bc, scale=scale_bc),
rtol=0.10,
atol=0.)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(loc_v, [-1])):
for bi, b in enumerate(np.reshape(scale_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, loc, scale, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = sp_stats.kstest(samples, sp_stats.laplace(loc, scale=scale).cdf)
# Return True when the test passes.
return ks < 0.02
def testLaplacePdfOfSampleMultiDims(self):
laplace = tfd.Laplace(loc=[7., 11.], scale=[[5.], [6.]], validate_args=True)
num = 50000
samples = laplace.sample(num, seed=test_util.test_seed())
pdfs = laplace.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual(samples.shape, (num, 2, 2))
self.assertEqual(pdfs.shape, (num, 2, 2))
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
self.assertAllClose(
sp_stats.laplace.mean(
[[7., 11.], [7., 11.]], scale=np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
rtol=0.05,
atol=0.)
self.assertAllClose(
sp_stats.laplace.var([[7., 11.], [7., 11.]],
scale=np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
rtol=0.05,
atol=0.)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testLaplaceNonPositiveInitializationParamsRaises(self):
loc_v = tf.constant(0.0, name='loc')
scale_v = tf.constant(-1.0, name='scale')
with self.assertRaisesOpError('Argument `scale` must be positive.'):
laplace = tfd.Laplace(
loc=loc_v, scale=scale_v, validate_args=True)
self.evaluate(laplace.mean())
loc_v = tf.constant(1.0, name='loc')
scale_v = tf.constant(0.0, name='scale')
with self.assertRaisesOpError('Argument `scale` must be positive.'):
laplace = tfd.Laplace(
loc=loc_v, scale=scale_v, validate_args=True)
self.evaluate(laplace.mean())
scale = tf.Variable([1., 2., -3.])
self.evaluate(scale.initializer)
with self.assertRaisesOpError('Argument `scale` must be positive.'):
d = tfd.Laplace(loc=0, scale=scale, validate_args=True)
self.evaluate(d.sample(seed=test_util.test_seed()))
def testLaplaceLaplaceKL(self):
batch_size = 6
event_size = 3
a_loc = np.array([[0.5] * event_size] * batch_size, dtype=np.float32)
a_scale = np.array([[0.1] * event_size] * batch_size, dtype=np.float32)
b_loc = np.array([[0.4] * event_size] * batch_size, dtype=np.float32)
b_scale = np.array([[0.2] * event_size] * batch_size, dtype=np.float32)
a = tfd.Laplace(loc=a_loc, scale=a_scale, validate_args=True)
b = tfd.Laplace(loc=b_loc, scale=b_scale, validate_args=True)
distance = tf.abs(a_loc - b_loc)
ratio = a_scale / b_scale
true_kl = (-tf.math.log(ratio) - 1 + distance / b_scale +
ratio * tf.exp(-distance / a_scale))
kl = tfd.kl_divergence(a, b)
x = a.sample(int(1e4), seed=test_util.test_seed())
kl_sample = tf.reduce_mean(a.log_prob(x) - b.log_prob(x), axis=0)
true_kl_, kl_, kl_sample_ = self.evaluate([true_kl, kl, kl_sample])
self.assertAllClose(true_kl_, kl_, atol=1e-5, rtol=1e-5)
self.assertAllClose(true_kl_, kl_sample_, atol=0., rtol=1e-1)
zero_kl = tfd.kl_divergence(a, a)
true_zero_kl_, zero_kl_ = self.evaluate([tf.zeros_like(true_kl), zero_kl])
self.assertAllEqual(true_zero_kl_, zero_kl_)
@test_util.tf_tape_safety_test
def testGradientThroughParams(self):
loc = tf.Variable([-5., 0., 5.])
scale = tf.Variable(2.)
d = tfd.Laplace(loc=loc, scale=scale, validate_args=True)
with tf.GradientTape() as tape:
loss = -d.log_prob([1., 2., 3.])
grad = tape.gradient(loss, d.trainable_variables)
self.assertLen(grad, 2)
self.assertAllNotNone(grad)
def testAssertsPositiveScaleAfterMutation(self):
scale = tf.Variable([1., 2., 3.])
d = tfd.Laplace(loc=0., scale=scale, validate_args=True)
self.evaluate([v.initializer for v in d.variables])
with self.assertRaisesOpError('Argument `scale` must be positive.'):
with tf.control_dependencies([scale.assign([1., 2., -3.])]):
self.evaluate(tfd.Laplace(loc=0., scale=1.).kl_divergence(d))
def testAssertParamsAreFloats(self):
loc = tf.convert_to_tensor(0, dtype=tf.int32)
scale = tf.convert_to_tensor(1, dtype=tf.int32)
with self.assertRaisesRegexp(ValueError, 'Expected floating point'):
tfd.Laplace(loc=loc, scale=scale)
if __name__ == '__main__':
tf.test.main()
| [((14764, 14778), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (14776, 14778), True, 'import tensorflow.compat.v2 as tf\n'), ((1208, 1230), 'tensorflow.compat.v2.constant', 'tf.constant', (['([3.0] * 5)'], {}), '([3.0] * 5)\n', (1219, 1230), True, 'import tensorflow.compat.v2 as tf\n'), ((1243, 1260), 'tensorflow.compat.v2.constant', 'tf.constant', (['(11.0)'], {}), '(11.0)\n', (1254, 1260), True, 'import tensorflow.compat.v2 as tf\n'), ((1661, 1692), 'tensorflow.compat.v2.constant', 'tf.constant', (['([2.0] * batch_size)'], {}), '([2.0] * batch_size)\n', (1672, 1692), True, 'import tensorflow.compat.v2 as tf\n'), ((1705, 1736), 'tensorflow.compat.v2.constant', 'tf.constant', (['([3.0] * batch_size)'], {}), '([3.0] * batch_size)\n', (1716, 1736), True, 'import tensorflow.compat.v2 as tf\n'), ((1779, 1837), 'numpy.array', 'np.array', (['[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]'], {'dtype': 'np.float32'}), '([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)\n', (1787, 1837), True, 'import numpy as np\n'), ((2005, 2053), 'scipy.stats.laplace.logpdf', 'sp_stats.laplace.logpdf', (['x', 'loc_v'], {'scale': 'scale_v'}), '(x, loc_v, scale=scale_v)\n', (2028, 2053), True, 'from scipy import stats as sp_stats\n'), ((2332, 2370), 'tensorflow.compat.v2.constant', 'tf.constant', (['([[2.0, 4.0]] * batch_size)'], {}), '([[2.0, 4.0]] * batch_size)\n', (2343, 2370), True, 'import tensorflow.compat.v2 as tf\n'), ((2383, 2421), 'tensorflow.compat.v2.constant', 'tf.constant', (['([[3.0, 4.0]] * batch_size)'], {}), '([[3.0, 4.0]] * batch_size)\n', (2394, 2421), True, 'import tensorflow.compat.v2 as tf\n'), ((2434, 2454), 'numpy.array', 'np.array', (['[2.0, 4.0]'], {}), '([2.0, 4.0])\n', (2442, 2454), True, 'import numpy as np\n'), ((2469, 2489), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (2477, 2489), True, 'import numpy as np\n'), ((2877, 2925), 'scipy.stats.laplace.logpdf', 'sp_stats.laplace.logpdf', (['x', 'loc_v'], {'scale': 'scale_v'}), '(x, loc_v, scale=scale_v)\n', (2900, 2925), True, 'from scipy import stats as sp_stats\n'), ((3135, 3173), 'tensorflow.compat.v2.constant', 'tf.constant', (['([[2.0, 4.0]] * batch_size)'], {}), '([[2.0, 4.0]] * batch_size)\n', (3146, 3173), True, 'import tensorflow.compat.v2 as tf\n'), ((3186, 3202), 'tensorflow.compat.v2.constant', 'tf.constant', (['(3.0)'], {}), '(3.0)\n', (3197, 3202), True, 'import tensorflow.compat.v2 as tf\n'), ((3215, 3235), 'numpy.array', 'np.array', (['[2.0, 4.0]'], {}), '([2.0, 4.0])\n', (3223, 3235), True, 'import numpy as np\n'), ((3641, 3689), 'scipy.stats.laplace.logpdf', 'sp_stats.laplace.logpdf', (['x', 'loc_v'], {'scale': 'scale_v'}), '(x, loc_v, scale=scale_v)\n', (3664, 3689), True, 'from scipy import stats as sp_stats\n'), ((3868, 3899), 'tensorflow.compat.v2.constant', 'tf.constant', (['([2.0] * batch_size)'], {}), '([2.0] * batch_size)\n', (3879, 3899), True, 'import tensorflow.compat.v2 as tf\n'), ((3912, 3943), 'tensorflow.compat.v2.constant', 'tf.constant', (['([3.0] * batch_size)'], {}), '([3.0] * batch_size)\n', (3923, 3943), True, 'import tensorflow.compat.v2 as tf\n'), ((3986, 4044), 'numpy.array', 'np.array', (['[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]'], {'dtype': 'np.float32'}), '([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)\n', (3994, 4044), True, 'import numpy as np\n'), ((4197, 4242), 'scipy.stats.laplace.cdf', 'sp_stats.laplace.cdf', (['x', 'loc_v'], {'scale': 'scale_v'}), '(x, loc_v, scale=scale_v)\n', (4217, 4242), True, 'from scipy import stats as sp_stats\n'), ((4362, 4393), 'tensorflow.compat.v2.constant', 'tf.constant', (['([2.0] * batch_size)'], {}), '([2.0] * batch_size)\n', (4373, 4393), True, 'import tensorflow.compat.v2 as tf\n'), ((4406, 4437), 'tensorflow.compat.v2.constant', 'tf.constant', (['([3.0] * batch_size)'], {}), '([3.0] * batch_size)\n', (4417, 4437), True, 'import tensorflow.compat.v2 as tf\n'), ((4480, 4540), 'numpy.array', 'np.array', (['[-2.5, 2.5, -4.0, 0.1, 1.0, 2.0]'], {'dtype': 'np.float32'}), '([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32)\n', (4488, 4540), True, 'import numpy as np\n'), ((4697, 4745), 'scipy.stats.laplace.logcdf', 'sp_stats.laplace.logcdf', (['x', 'loc_v'], {'scale': 'scale_v'}), '(x, loc_v, scale=scale_v)\n', (4720, 4745), True, 'from scipy import stats as sp_stats\n'), ((5303, 5334), 'tensorflow.compat.v2.constant', 'tf.constant', (['([2.0] * batch_size)'], {}), '([2.0] * batch_size)\n', (5314, 5334), True, 'import tensorflow.compat.v2 as tf\n'), ((5347, 5378), 'tensorflow.compat.v2.constant', 'tf.constant', (['([3.0] * batch_size)'], {}), '([3.0] * batch_size)\n', (5358, 5378), True, 'import tensorflow.compat.v2 as tf\n'), ((5421, 5481), 'numpy.array', 'np.array', (['[-2.5, 2.5, -4.0, 0.1, 1.0, 2.0]'], {'dtype': 'np.float32'}), '([-2.5, 2.5, -4.0, 0.1, 1.0, 2.0], dtype=np.float32)\n', (5429, 5481), True, 'import numpy as np\n'), ((5649, 5696), 'scipy.stats.laplace.logsf', 'sp_stats.laplace.logsf', (['x', 'loc_v'], {'scale': 'scale_v'}), '(x, loc_v, scale=scale_v)\n', (5671, 5696), True, 'from scipy import stats as sp_stats\n'), ((5795, 5820), 'numpy.array', 'np.array', (['[1.0, 3.0, 2.5]'], {}), '([1.0, 3.0, 2.5])\n', (5803, 5820), True, 'import numpy as np\n'), ((5835, 5860), 'numpy.array', 'np.array', (['[1.0, 4.0, 5.0]'], {}), '([1.0, 4.0, 5.0])\n', (5843, 5860), True, 'import numpy as np\n'), ((6003, 6046), 'scipy.stats.laplace.mean', 'sp_stats.laplace.mean', (['loc_v'], {'scale': 'scale_v'}), '(loc_v, scale=scale_v)\n', (6024, 6046), True, 'from scipy import stats as sp_stats\n'), ((6160, 6185), 'numpy.array', 'np.array', (['[0.5, 3.0, 2.5]'], {}), '([0.5, 3.0, 2.5])\n', (6168, 6185), True, 'import numpy as np\n'), ((6200, 6225), 'numpy.array', 'np.array', (['[1.0, 4.0, 5.0]'], {}), '([1.0, 4.0, 5.0])\n', (6208, 6225), True, 'import numpy as np\n'), ((6455, 6480), 'numpy.array', 'np.array', (['[1.0, 3.0, 2.5]'], {}), '([1.0, 3.0, 2.5])\n', (6463, 6480), True, 'import numpy as np\n'), ((6495, 6520), 'numpy.array', 'np.array', (['[1.0, 4.0, 5.0]'], {}), '([1.0, 4.0, 5.0])\n', (6503, 6520), True, 'import numpy as np\n'), ((6671, 6713), 'scipy.stats.laplace.var', 'sp_stats.laplace.var', (['loc_v'], {'scale': 'scale_v'}), '(loc_v, scale=scale_v)\n', (6691, 6713), True, 'from scipy import stats as sp_stats\n'), ((6834, 6859), 'numpy.array', 'np.array', (['[1.0, 3.0, 2.5]'], {}), '([1.0, 3.0, 2.5])\n', (6842, 6859), True, 'import numpy as np\n'), ((6874, 6899), 'numpy.array', 'np.array', (['[1.0, 4.0, 5.0]'], {}), '([1.0, 4.0, 5.0])\n', (6882, 6899), True, 'import numpy as np\n'), ((7045, 7087), 'scipy.stats.laplace.std', 'sp_stats.laplace.std', (['loc_v'], {'scale': 'scale_v'}), '(loc_v, scale=scale_v)\n', (7065, 7087), True, 'from scipy import stats as sp_stats\n'), ((7207, 7232), 'numpy.array', 'np.array', (['[1.0, 3.0, 2.5]'], {}), '([1.0, 3.0, 2.5])\n', (7215, 7232), True, 'import numpy as np\n'), ((7247, 7272), 'numpy.array', 'np.array', (['[1.0, 4.0, 5.0]'], {}), '([1.0, 4.0, 5.0])\n', (7255, 7272), True, 'import numpy as np\n'), ((7420, 7466), 'scipy.stats.laplace.entropy', 'sp_stats.laplace.entropy', (['loc_v'], {'scale': 'scale_v'}), '(loc_v, scale=scale_v)\n', (7444, 7466), True, 'from scipy import stats as sp_stats\n'), ((7619, 7637), 'tensorflow.compat.v2.constant', 'tf.constant', (['loc_v'], {}), '(loc_v)\n', (7630, 7637), True, 'import tensorflow.compat.v2 as tf\n'), ((7650, 7670), 'tensorflow.compat.v2.constant', 'tf.constant', (['scale_v'], {}), '(scale_v)\n', (7661, 7670), True, 'import tensorflow.compat.v2 as tf\n'), ((8354, 8370), 'tensorflow.compat.v2.constant', 'tf.constant', (['(4.0)'], {}), '(4.0)\n', (8365, 8370), True, 'import tensorflow.compat.v2 as tf\n'), ((8383, 8399), 'tensorflow.compat.v2.constant', 'tf.constant', (['(3.0)'], {}), '(3.0)\n', (8394, 8399), True, 'import tensorflow.compat.v2 as tf\n'), ((9202, 9232), 'numpy.zeros_like', 'np.zeros_like', (['(loc_v + scale_v)'], {}), '(loc_v + scale_v)\n', (9215, 9232), True, 'import numpy as np\n'), ((11636, 11664), 'tensorflow.compat.v2.constant', 'tf.constant', (['(0.0)'], {'name': '"""loc"""'}), "(0.0, name='loc')\n", (11647, 11664), True, 'import tensorflow.compat.v2 as tf\n'), ((11679, 11710), 'tensorflow.compat.v2.constant', 'tf.constant', (['(-1.0)'], {'name': '"""scale"""'}), "(-1.0, name='scale')\n", (11690, 11710), True, 'import tensorflow.compat.v2 as tf\n'), ((11918, 11946), 'tensorflow.compat.v2.constant', 'tf.constant', (['(1.0)'], {'name': '"""loc"""'}), "(1.0, name='loc')\n", (11929, 11946), True, 'import tensorflow.compat.v2 as tf\n'), ((11961, 11991), 'tensorflow.compat.v2.constant', 'tf.constant', (['(0.0)'], {'name': '"""scale"""'}), "(0.0, name='scale')\n", (11972, 11991), True, 'import tensorflow.compat.v2 as tf\n'), ((12199, 12228), 'tensorflow.compat.v2.Variable', 'tf.Variable', (['[1.0, 2.0, -3.0]'], {}), '([1.0, 2.0, -3.0])\n', (12210, 12228), True, 'import tensorflow.compat.v2 as tf\n'), ((12542, 12603), 'numpy.array', 'np.array', (['([[0.5] * event_size] * batch_size)'], {'dtype': 'np.float32'}), '([[0.5] * event_size] * batch_size, dtype=np.float32)\n', (12550, 12603), True, 'import numpy as np\n'), ((12618, 12679), 'numpy.array', 'np.array', (['([[0.1] * event_size] * batch_size)'], {'dtype': 'np.float32'}), '([[0.1] * event_size] * batch_size, dtype=np.float32)\n', (12626, 12679), True, 'import numpy as np\n'), ((12692, 12753), 'numpy.array', 'np.array', (['([[0.4] * event_size] * batch_size)'], {'dtype': 'np.float32'}), '([[0.4] * event_size] * batch_size, dtype=np.float32)\n', (12700, 12753), True, 'import numpy as np\n'), ((12768, 12829), 'numpy.array', 'np.array', (['([[0.2] * event_size] * batch_size)'], {'dtype': 'np.float32'}), '([[0.2] * event_size] * batch_size, dtype=np.float32)\n', (12776, 12829), True, 'import numpy as np\n'), ((12979, 13000), 'tensorflow.compat.v2.abs', 'tf.abs', (['(a_loc - b_loc)'], {}), '(a_loc - b_loc)\n', (12985, 13000), True, 'import tensorflow.compat.v2 as tf\n'), ((13755, 13784), 'tensorflow.compat.v2.Variable', 'tf.Variable', (['[-5.0, 0.0, 5.0]'], {}), '([-5.0, 0.0, 5.0])\n', (13766, 13784), True, 'import tensorflow.compat.v2 as tf\n'), ((13794, 13810), 'tensorflow.compat.v2.Variable', 'tf.Variable', (['(2.0)'], {}), '(2.0)\n', (13805, 13810), True, 'import tensorflow.compat.v2 as tf\n'), ((14125, 14153), 'tensorflow.compat.v2.Variable', 'tf.Variable', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (14136, 14153), True, 'import tensorflow.compat.v2 as tf\n'), ((14528, 14567), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (14548, 14567), True, 'import tensorflow.compat.v2 as tf\n'), ((14580, 14619), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['(1)'], {'dtype': 'tf.int32'}), '(1, dtype=tf.int32)\n', (14600, 14619), True, 'import tensorflow.compat.v2 as tf\n'), ((1444, 1463), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[5]'], {}), '([5])\n', (1458, 1463), True, 'import tensorflow.compat.v2 as tf\n'), ((1580, 1598), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (1594, 1598), True, 'import tensorflow.compat.v2 as tf\n'), ((2229, 2253), 'numpy.exp', 'np.exp', (['expected_log_pdf'], {}), '(expected_log_pdf)\n', (2235, 2253), True, 'import numpy as np\n'), ((2498, 2558), 'numpy.array', 'np.array', (['[[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]]'], {'dtype': 'np.float32'}), '([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32)\n', (2506, 2558), True, 'import numpy as np\n'), ((3020, 3044), 'numpy.exp', 'np.exp', (['expected_log_pdf'], {}), '(expected_log_pdf)\n', (3026, 3044), True, 'import numpy as np\n'), ((3262, 3322), 'numpy.array', 'np.array', (['[[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]]'], {'dtype': 'np.float32'}), '([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32)\n', (3270, 3322), True, 'import numpy as np\n'), ((3784, 3808), 'numpy.exp', 'np.exp', (['expected_log_pdf'], {}), '(expected_log_pdf)\n', (3790, 3808), True, 'import numpy as np\n'), ((8010, 8053), 'scipy.stats.laplace.mean', 'sp_stats.laplace.mean', (['loc_v'], {'scale': 'scale_v'}), '(loc_v, scale=scale_v)\n', (8031, 8053), True, 'from scipy import stats as sp_stats\n'), ((8153, 8195), 'scipy.stats.laplace.var', 'sp_stats.laplace.var', (['loc_v'], {'scale': 'scale_v'}), '(loc_v, scale=scale_v)\n', (8173, 8195), True, 'from scipy import stats as sp_stats\n'), ((9372, 9417), 'scipy.stats.laplace.mean', 'sp_stats.laplace.mean', (['loc_bc'], {'scale': 'scale_bc'}), '(loc_bc, scale=scale_bc)\n', (9393, 9417), True, 'from scipy import stats as sp_stats\n'), ((9523, 9567), 'scipy.stats.laplace.var', 'sp_stats.laplace.var', (['loc_bc'], {'scale': 'scale_bc'}), '(loc_bc, scale=scale_bc)\n', (9543, 9567), True, 'from scipy import stats as sp_stats\n'), ((9661, 9684), 'numpy.reshape', 'np.reshape', (['loc_v', '[-1]'], {}), '(loc_v, [-1])\n', (9671, 9684), True, 'import numpy as np\n'), ((13881, 13898), 'tensorflow.compat.v2.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (13896, 13898), True, 'import tensorflow.compat.v2 as tf\n'), ((7791, 7812), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (7810, 7812), False, 'from tensorflow_probability.python.internal import test_util\n'), ((8773, 8808), 'numpy.arange', 'np.arange', (['(1)', '(101)'], {'dtype': 'np.float32'}), '(1, 101, dtype=np.float32)\n', (8782, 8808), True, 'import numpy as np\n'), ((9018, 9039), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (9037, 9039), False, 'from tensorflow_probability.python.internal import test_util\n'), ((9716, 9741), 'numpy.reshape', 'np.reshape', (['scale_v', '[-1]'], {}), '(scale_v, [-1])\n', (9726, 9741), True, 'import numpy as np\n'), ((10034, 10068), 'scipy.stats.laplace', 'sp_stats.laplace', (['loc'], {'scale': 'scale'}), '(loc, scale=scale)\n', (10050, 10068), True, 'from scipy import stats as sp_stats\n'), ((10317, 10338), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (10336, 10338), False, 'from tensorflow_probability.python.internal import test_util\n'), ((13116, 13143), 'tensorflow.compat.v2.exp', 'tf.exp', (['(-distance / a_scale)'], {}), '(-distance / a_scale)\n', (13122, 13143), True, 'import tensorflow.compat.v2 as tf\n'), ((13212, 13233), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (13231, 13233), False, 'from tensorflow_probability.python.internal import test_util\n'), ((13589, 13611), 'tensorflow.compat.v2.zeros_like', 'tf.zeros_like', (['true_kl'], {}), '(true_kl)\n', (13602, 13611), True, 'import tensorflow.compat.v2 as tf\n'), ((8846, 8880), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {'dtype': 'np.float32'}), '(1, 11, dtype=np.float32)\n', (8855, 8880), True, 'import numpy as np\n'), ((10929, 10963), 'numpy.array', 'np.array', (['[[5.0, 5.0], [6.0, 6.0]]'], {}), '([[5.0, 5.0], [6.0, 6.0]])\n', (10937, 10963), True, 'import numpy as np\n'), ((11145, 11179), 'numpy.array', 'np.array', (['[[5.0, 5.0], [6.0, 6.0]]'], {}), '([[5.0, 5.0], [6.0, 6.0]])\n', (11153, 11179), True, 'import numpy as np\n'), ((8593, 8614), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (8612, 8614), False, 'from tensorflow_probability.python.internal import test_util\n'), ((12432, 12453), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (12451, 12453), False, 'from tensorflow_probability.python.internal import test_util\n'), ((13047, 13065), 'tensorflow.compat.v2.math.log', 'tf.math.log', (['ratio'], {}), '(ratio)\n', (13058, 13065), True, 'import tensorflow.compat.v2 as tf\n'), ((4996, 5017), 'tensorflow_probability.python.internal.test_util.test_seed', 'test_util.test_seed', ([], {}), '()\n', (5015, 5017), False, 'from tensorflow_probability.python.internal import test_util\n')] |
credwood/bitplayers | app/__init__.py | 4ca6b6c6a21bb21d7cd963c64028415559c3dcc4 | import dash
from flask import Flask
from flask.helpers import get_root_path
from flask_login import login_required
from flask_wtf.csrf import CSRFProtect
from flask_admin import Admin, BaseView, expose
from flask_admin.contrib.sqla import ModelView
from datetime import datetime
from dateutil import parser
import pytz
from pytz import timezone
from config import BaseConfig
csrf = CSRFProtect()
def create_app():
from app.models import Blog, User, MyModelView, Contact
from app.extensions import db
from app.dashapp1.layout import layout as layout_1
from app.dashapp1.callbacks import register_callbacks as register_callbacks_1
#from app.dashapp2.layout import layout as layout_2
#from app.dashapp2.callbacks import register_callbacks as register_callbacks_2
from app.dashapp3.layout import layout as layout_3
from app.dashapp3.callbacks import register_callbacks as register_callbacks_3
server = Flask(__name__)
server.config.from_object(BaseConfig)
csrf.init_app(server)
csrf._exempt_views.add('dash.dash.dispatch')
admin = Admin(server)
admin.add_view(MyModelView(User, db.session))
admin.add_view(MyModelView(Blog, db.session))
admin.add_view(MyModelView(Contact, db.session))
register_dashapp(server, 'dashapp1', 'dashboard1', layout_1, register_callbacks_1)
#register_dashapp(server, 'dashapp2', 'dashboard2', layout_2, register_callbacks_2)
register_dashapp(server, 'dashapp3', 'dashboard3', layout_3, register_callbacks_3)
register_extensions(server)
register_blueprints(server)
server.jinja_env.filters['formatdatetime'] = format_datetime
return server
def format_datetime(date,fmt=None):
western = timezone("America/Los_Angeles")
native=pytz.utc.localize(date, is_dst=None).astimezone(western)
#date = parser.parse(str(date))
#native = date.astimezone(western)
format='%m-%d-%Y %I:%M %p'
return native.strftime(format)
def register_dashapp(app, title, base_pathname, layout, register_callbacks_fun):
# Meta tags for viewport responsiveness
meta_viewport = {"name": "viewport", "content": "width=device-width, initial-scale=1, shrink-to-fit=no"}
my_dashapp = dash.Dash(__name__,
server=app,
url_base_pathname=f'/{base_pathname}/',
assets_folder=get_root_path(__name__) + f'/{base_pathname}/assets/',
meta_tags=[meta_viewport])
with app.app_context():
my_dashapp.title = title
my_dashapp.layout = layout
register_callbacks_fun(my_dashapp)
#_protect_dashviews(my_dashapp)
def _protect_dashviews(dashapp):
for view_func in dashapp.server.view_functions:
if view_func.startswith(dashapp.config.url_base_pathname):
dashapp.server.view_functions[view_func] = login_required(dashapp.server.view_functions[view_func])
def register_extensions(server):
from app.extensions import db
from app.extensions import login_inst
from app.extensions import migrate
from app.extensions import mail
db.init_app(server)
login_inst.init_app(server)
login_inst.login_view = 'main.login'
migrate.init_app(server, db)
mail.init_app(server)
def register_blueprints(server):
from app.webapp import server_bp
server.register_blueprint(server_bp)
| [((385, 398), 'flask_wtf.csrf.CSRFProtect', 'CSRFProtect', ([], {}), '()\n', (396, 398), False, 'from flask_wtf.csrf import CSRFProtect\n'), ((939, 954), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (944, 954), False, 'from flask import Flask\n'), ((1085, 1098), 'flask_admin.Admin', 'Admin', (['server'], {}), '(server)\n', (1090, 1098), False, 'from flask_admin import Admin, BaseView, expose\n'), ((1714, 1745), 'pytz.timezone', 'timezone', (['"""America/Los_Angeles"""'], {}), "('America/Los_Angeles')\n", (1722, 1745), False, 'from pytz import timezone\n'), ((3116, 3135), 'app.extensions.db.init_app', 'db.init_app', (['server'], {}), '(server)\n', (3127, 3135), False, 'from app.extensions import db\n'), ((3140, 3167), 'app.extensions.login_inst.init_app', 'login_inst.init_app', (['server'], {}), '(server)\n', (3159, 3167), False, 'from app.extensions import login_inst\n'), ((3213, 3241), 'app.extensions.migrate.init_app', 'migrate.init_app', (['server', 'db'], {}), '(server, db)\n', (3229, 3241), False, 'from app.extensions import migrate\n'), ((3246, 3267), 'app.extensions.mail.init_app', 'mail.init_app', (['server'], {}), '(server)\n', (3259, 3267), False, 'from app.extensions import mail\n'), ((1118, 1147), 'app.models.MyModelView', 'MyModelView', (['User', 'db.session'], {}), '(User, db.session)\n', (1129, 1147), False, 'from app.models import Blog, User, MyModelView, Contact\n'), ((1168, 1197), 'app.models.MyModelView', 'MyModelView', (['Blog', 'db.session'], {}), '(Blog, db.session)\n', (1179, 1197), False, 'from app.models import Blog, User, MyModelView, Contact\n'), ((1218, 1250), 'app.models.MyModelView', 'MyModelView', (['Contact', 'db.session'], {}), '(Contact, db.session)\n', (1229, 1250), False, 'from app.models import Blog, User, MyModelView, Contact\n'), ((1757, 1793), 'pytz.utc.localize', 'pytz.utc.localize', (['date'], {'is_dst': 'None'}), '(date, is_dst=None)\n', (1774, 1793), False, 'import pytz\n'), ((2868, 2924), 'flask_login.login_required', 'login_required', (['dashapp.server.view_functions[view_func]'], {}), '(dashapp.server.view_functions[view_func])\n', (2882, 2924), False, 'from flask_login import login_required\n'), ((2375, 2398), 'flask.helpers.get_root_path', 'get_root_path', (['__name__'], {}), '(__name__)\n', (2388, 2398), False, 'from flask.helpers import get_root_path\n')] |
kevinbfry/selective-inference | selectinf/randomized/approx_reference_grouplasso.py | 4e846877b5c23969fc420b452f20cc3b16b6cb78 | from __future__ import print_function
from scipy.linalg import block_diag
from scipy.stats import norm as ndist
from scipy.interpolate import interp1d
import collections
import numpy as np
from numpy import log
from numpy.linalg import norm, qr, inv, eig
import pandas as pd
import regreg.api as rr
from .randomization import randomization
from ..base import restricted_estimator
from ..algorithms.barrier_affine import solve_barrier_affine_py as solver
from ..distributions.discrete_family import discrete_family
class group_lasso(object):
def __init__(self,
loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso=True, # should lasso solver be used where applicable - defaults to True
perturb=None):
_check_groups(groups) # make sure groups looks sensible
# log likelihood : quadratic loss
self.loglike = loglike
self.nfeature = self.loglike.shape[0]
# ridge parameter
self.ridge_term = ridge_term
# group lasso penalty (from regreg)
# use regular lasso penalty if all groups are size 1
if use_lasso and groups.size == np.unique(groups).size:
# need to provide weights an an np.array rather than a dictionary
weights_np = np.array([w[1] for w in sorted(weights.items())])
self.penalty = rr.weighted_l1norm(weights=weights_np,
lagrange=1.)
else:
self.penalty = rr.group_lasso(groups,
weights=weights,
lagrange=1.)
# store groups as a class variable since the non-group lasso doesn't
self.groups = groups
self._initial_omega = perturb
# gaussian randomization
self.randomizer = randomizer
def fit(self,
solve_args={'tol': 1.e-12, 'min_its': 50},
perturb=None):
# solve the randomized version of group lasso
(self.initial_soln,
self.initial_subgrad) = self._solve_randomized_problem(perturb=perturb,
solve_args=solve_args)
# initialize variables
active_groups = [] # active group labels
active_dirs = {} # dictionary: keys are group labels, values are unit-norm coefficients
unpenalized = [] # selected groups with no penalty
overall = np.ones(self.nfeature, np.bool) # mask of active features
ordered_groups = [] # active group labels sorted by label
ordered_opt = [] # gamma's ordered by group labels
ordered_vars = [] # indices "ordered" by sorting group labels
tol = 1.e-20
_, self.randomizer_prec = self.randomizer.cov_prec
# now we are collecting the directions and norms of the active groups
for g in sorted(np.unique(self.groups)): # g is group label
group_mask = self.groups == g
soln = self.initial_soln # do not need to keep setting this
if norm(soln[group_mask]) > tol * norm(soln): # is group g appreciably nonzero
ordered_groups.append(g)
# variables in active group
ordered_vars.extend(np.flatnonzero(group_mask))
if self.penalty.weights[g] == 0:
unpenalized.append(g)
else:
active_groups.append(g)
active_dirs[g] = soln[group_mask] / norm(soln[group_mask])
ordered_opt.append(norm(soln[group_mask]))
else:
overall[group_mask] = False
self.selection_variable = {'directions': active_dirs,
'active_groups': active_groups} # kind of redundant with keys of active_dirs
self._ordered_groups = ordered_groups
# exception if no groups are selected
if len(self.selection_variable['active_groups']) == 0:
return np.sign(soln), soln
# otherwise continue as before
self.observed_opt_state = np.hstack(ordered_opt) # gammas as array
_beta_unpenalized = restricted_estimator(self.loglike, # refit OLS on E
overall,
solve_args=solve_args)
beta_bar = np.zeros(self.nfeature)
beta_bar[overall] = _beta_unpenalized # refit OLS beta with zeros
self._beta_full = beta_bar
X, y = self.loglike.data
W = self._W = self.loglike.saturated_loss.hessian(X.dot(beta_bar)) # all 1's for LS
opt_linearNoU = np.dot(X.T, X[:, ordered_vars] * W[:, np.newaxis])
for i, var in enumerate(ordered_vars):
opt_linearNoU[var, i] += self.ridge_term
opt_offset = self.initial_subgrad
self.observed_score_state = -opt_linearNoU.dot(_beta_unpenalized)
self.observed_score_state[~overall] += self.loglike.smooth_objective(beta_bar, 'grad')[~overall]
active_signs = np.sign(self.initial_soln)
active = np.flatnonzero(active_signs)
self.active = active
def compute_Vg(ug):
pg = ug.size # figure out size of g'th group
if pg > 1:
Z = np.column_stack((ug, np.eye(pg, pg - 1)))
Q, _ = qr(Z)
Vg = Q[:, 1:] # drop the first column
else:
Vg = np.zeros((1, 0)) # if the group is size one, the orthogonal complement is empty
return Vg
def compute_Lg(g):
pg = active_dirs[g].size
Lg = self.penalty.weights[g] * np.eye(pg)
return Lg
sorted_active_dirs = collections.OrderedDict(sorted(active_dirs.items()))
Vs = [compute_Vg(ug) for ug in sorted_active_dirs.values()]
V = block_diag(*Vs) # unpack the list
Ls = [compute_Lg(g) for g in sorted_active_dirs]
L = block_diag(*Ls) # unpack the list
XE = X[:, ordered_vars] # changed to ordered_vars
Q = XE.T.dot(self._W[:, None] * XE)
QI = inv(Q)
C = V.T.dot(QI).dot(L).dot(V)
self.XE = XE
self.Q = Q
self.QI = QI
self.C = C
U = block_diag(*[ug for ug in sorted_active_dirs.values()]).T
self.opt_linear = opt_linearNoU.dot(U)
self.active_dirs = active_dirs
self.opt_offset = opt_offset
self.ordered_vars = ordered_vars
self.linear_part = -np.eye(self.observed_opt_state.shape[0])
self.offset = np.zeros(self.observed_opt_state.shape[0])
return active_signs, soln
def _solve_randomized_problem(self,
perturb=None,
solve_args={'tol': 1.e-15, 'min_its': 100}):
# take a new perturbation if supplied
if perturb is not None:
self._initial_omega = perturb
if self._initial_omega is None:
self._initial_omega = self.randomizer.sample()
quad = rr.identity_quadratic(self.ridge_term,
0,
-self._initial_omega,
0)
problem = rr.simple_problem(self.loglike, self.penalty)
# if all groups are size 1, set up lasso penalty and run usual lasso solver... (see existing code)...
initial_soln = problem.solve(quad, **solve_args)
initial_subgrad = -(self.loglike.smooth_objective(initial_soln,
'grad') +
quad.objective(initial_soln, 'grad'))
return initial_soln, initial_subgrad
@staticmethod
def gaussian(X,
Y,
groups,
weights,
sigma=1.,
quadratic=None,
ridge_term=0.,
perturb=None,
use_lasso=True, # should lasso solver be used when applicable - defaults to True
randomizer_scale=None):
loglike = rr.glm.gaussian(X, Y, coef=1. / sigma ** 2, quadratic=quadratic)
n, p = X.shape
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.std(Y) * np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5 * np.std(Y) * np.sqrt(n / (n - 1.))
randomizer = randomization.isotropic_gaussian((p,), randomizer_scale)
return group_lasso(loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso,
perturb)
def _setup_implied_gaussian(self):
_, prec = self.randomizer.cov_prec
if np.asarray(prec).shape in [(), (0,)]:
cond_precision = self.opt_linear.T.dot(self.opt_linear) * prec
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T) * prec
else:
cond_precision = self.opt_linear.T.dot(prec.dot(self.opt_linear))
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T).dot(prec)
cond_mean = -logdens_linear.dot(self.observed_score_state + self.opt_offset)
self.cond_mean = cond_mean
self.cond_cov = cond_cov
self.cond_precision = cond_precision
self.logdens_linear = logdens_linear
return cond_mean, cond_cov, cond_precision, logdens_linear
def selective_MLE(self,
solve_args={'tol': 1.e-12},
level=0.9,
useJacobian=True,
dispersion=None):
"""Do selective_MLE for group_lasso
Note: this masks the selective_MLE inherited from query
because that is not adapted for the group_lasso. Also, assumes
you have already run the fit method since this uses results
from that method.
Parameters
----------
observed_target: from selected_targets
target_cov: from selected_targets
target_cov_score: from selected_targets
init_soln: (opt_state) initial (observed) value of optimization variables
cond_mean: conditional mean of optimization variables (model on _setup_implied_gaussian)
cond_cov: conditional variance of optimization variables (model on _setup_implied_gaussian)
logdens_linear: (model on _setup_implied_gaussian)
linear_part: like A_scaling (from lasso)
offset: like b_scaling (from lasso)
solve_args: passed on to solver
level: level of confidence intervals
useC: whether to use python or C solver
JacobianPieces: (use self.C defined in fitting)
"""
self._setup_implied_gaussian() # Calculate useful quantities
(observed_target, target_cov, target_score_cov, alternatives) = self.selected_targets(dispersion)
init_soln = self.observed_opt_state # just the gammas
cond_mean = self.cond_mean
cond_cov = self.cond_cov
logdens_linear = self.logdens_linear
linear_part = self.linear_part
offset = self.offset
if np.asarray(observed_target).shape in [(), (0,)]:
raise ValueError('no target specified')
observed_target = np.atleast_1d(observed_target)
prec_target = inv(target_cov)
prec_opt = self.cond_precision
score_offset = self.observed_score_state + self.opt_offset
# target_lin determines how the conditional mean of optimization variables
# vary with target
# logdens_linear determines how the argument of the optimization density
# depends on the score, not how the mean depends on score, hence the minus sign
target_linear = target_score_cov.T.dot(prec_target)
target_offset = score_offset - target_linear.dot(observed_target)
target_lin = - logdens_linear.dot(target_linear)
target_off = cond_mean - target_lin.dot(observed_target)
if np.asarray(self.randomizer_prec).shape in [(), (0,)]:
_P = target_linear.T.dot(target_offset) * self.randomizer_prec
_prec = prec_target + (target_linear.T.dot(target_linear) * self.randomizer_prec) - target_lin.T.dot(
prec_opt).dot(
target_lin)
else:
_P = target_linear.T.dot(self.randomizer_prec).dot(target_offset)
_prec = prec_target + (target_linear.T.dot(self.randomizer_prec).dot(target_linear)) - target_lin.T.dot(
prec_opt).dot(target_lin)
C = target_cov.dot(_P - target_lin.T.dot(prec_opt).dot(target_off))
conjugate_arg = prec_opt.dot(cond_mean)
val, soln, hess = solve_barrier_affine_jacobian_py(conjugate_arg,
prec_opt,
init_soln,
linear_part,
offset,
self.C,
self.active_dirs,
useJacobian,
**solve_args)
final_estimator = target_cov.dot(_prec).dot(observed_target) \
+ target_cov.dot(target_lin.T.dot(prec_opt.dot(cond_mean - soln))) + C
unbiased_estimator = target_cov.dot(_prec).dot(observed_target) + target_cov.dot(
_P - target_lin.T.dot(prec_opt).dot(target_off))
L = target_lin.T.dot(prec_opt)
observed_info_natural = _prec + L.dot(target_lin) - L.dot(hess.dot(L.T))
observed_info_mean = target_cov.dot(observed_info_natural.dot(target_cov))
Z_scores = final_estimator / np.sqrt(np.diag(observed_info_mean))
pvalues = ndist.cdf(Z_scores)
pvalues = 2 * np.minimum(pvalues, 1 - pvalues)
alpha = 1 - level
quantile = ndist.ppf(1 - alpha / 2.)
intervals = np.vstack([final_estimator -
quantile * np.sqrt(np.diag(observed_info_mean)),
final_estimator +
quantile * np.sqrt(np.diag(observed_info_mean))]).T
log_ref = val + conjugate_arg.T.dot(cond_cov).dot(conjugate_arg) / 2.
result = pd.DataFrame({'MLE': final_estimator,
'SE': np.sqrt(np.diag(observed_info_mean)),
'Zvalue': Z_scores,
'pvalue': pvalues,
'lower_confidence': intervals[:, 0],
'upper_confidence': intervals[:, 1],
'unbiased': unbiased_estimator})
return result, observed_info_mean, log_ref
def selected_targets(self,
dispersion=None,
solve_args={'tol': 1.e-12, 'min_its': 50}):
X, y = self.loglike.data
n, p = X.shape
XE = self.XE
Q = self.Q
observed_target = restricted_estimator(self.loglike, self.ordered_vars, solve_args=solve_args)
_score_linear = -XE.T.dot(self._W[:, None] * X).T
alternatives = ['twosided'] * len(self.active)
if dispersion is None: # use Pearson's X^2
dispersion = ((y - self.loglike.saturated_loss.mean_function(
XE.dot(observed_target))) ** 2 / self._W).sum() / (n - XE.shape[1])
cov_target = self.QI * dispersion
crosscov_target_score = _score_linear.dot(self.QI).T * dispersion
return (observed_target,
cov_target,
crosscov_target_score,
alternatives)
class approximate_grid_inference(object):
def __init__(self,
query,
dispersion,
solve_args={'tol': 1.e-12},
useIP=True):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
query : `gaussian_query`
A Gaussian query which has information
to describe implied Gaussian.
observed_target : ndarray
Observed estimate of target.
target_cov : ndarray
Estimated covaraince of target.
target_score_cov : ndarray
Estimated covariance of target and score of randomized query.
solve_args : dict, optional
Arguments passed to solver.
"""
self.solve_args = solve_args
result, inverse_info = query.selective_MLE(dispersion=dispersion)[:2]
self.linear_part = query.linear_part
self.offset = query.offset
self.logdens_linear = query.logdens_linear
self.cond_mean = query.cond_mean
self.prec_opt = np.linalg.inv(query.cond_cov)
self.cond_cov = query.cond_cov
self.C = query.C
self.active_dirs = query.active_dirs
(observed_target, target_cov, target_score_cov, alternatives) = query.selected_targets(dispersion)
self.observed_target = observed_target
self.target_score_cov = target_score_cov
self.target_cov = target_cov
self.init_soln = query.observed_opt_state
self.randomizer_prec = query.randomizer_prec
self.score_offset = query.observed_score_state + query.opt_offset
self.ntarget = ntarget = target_cov.shape[0]
_scale = 4 * np.sqrt(np.diag(inverse_info))
if useIP == False:
ngrid = 1000
self.stat_grid = np.zeros((ntarget, ngrid))
for j in range(ntarget):
self.stat_grid[j, :] = np.linspace(observed_target[j] - 1.5 * _scale[j],
observed_target[j] + 1.5 * _scale[j],
num=ngrid)
else:
ngrid = 100
self.stat_grid = np.zeros((ntarget, ngrid))
for j in range(ntarget):
self.stat_grid[j, :] = np.linspace(observed_target[j] - 1.5 * _scale[j],
observed_target[j] + 1.5 * _scale[j],
num=ngrid)
self.opt_linear = query.opt_linear
self.useIP = useIP
def summary(self,
alternatives=None,
parameter=None,
level=0.9):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
alternatives : [str], optional
Sequence of strings describing the alternatives,
should be values of ['twosided', 'less', 'greater']
parameter : np.array
Hypothesized value for parameter -- defaults to 0.
level : float
Confidence level.
"""
if parameter is not None:
pivots = self._approx_pivots(parameter,
alternatives=alternatives)
else:
pivots = None
pvalues = self._approx_pivots(np.zeros_like(self.observed_target),
alternatives=alternatives)
lower, upper = self._approx_intervals(level=level)
result = pd.DataFrame({'target': self.observed_target,
'pvalue': pvalues,
'lower_confidence': lower,
'upper_confidence': upper})
if not np.all(parameter == 0):
result.insert(4, 'pivot', pivots)
result.insert(5, 'parameter', parameter)
return result
def log_reference(self,
observed_target,
target_cov,
target_score_cov,
grid):
"""
Approximate the log of the reference density on a grid.
"""
if np.asarray(observed_target).shape in [(), (0,)]:
raise ValueError('no target specified')
prec_target = np.linalg.inv(target_cov)
target_lin = - self.logdens_linear.dot(target_score_cov.T.dot(prec_target))
ref_hat = []
for k in range(grid.shape[0]):
# in the usual D = N + Gamma theta.hat,
# target_lin is "something" times Gamma,
# where "something" comes from implied Gaussian
# cond_mean is "something" times D
# Gamma is target_score_cov.T.dot(prec_target)
num_opt = self.prec_opt.shape[0]
num_con = self.linear_part.shape[0]
cond_mean_grid = (target_lin.dot(np.atleast_1d(grid[k] - observed_target)) +
self.cond_mean)
#direction for decomposing o
eta = -self.prec_opt.dot(self.logdens_linear.dot(target_score_cov.T))
implied_mean = np.asscalar(eta.T.dot(cond_mean_grid))
implied_cov = np.asscalar(eta.T.dot(self.cond_cov).dot(eta))
implied_prec = 1./implied_cov
_A = self.cond_cov.dot(eta) * implied_prec
R = np.identity(num_opt) - _A.dot(eta.T)
A = self.linear_part.dot(_A).reshape((-1,))
b = self.offset-self.linear_part.dot(R).dot(self.init_soln)
conjugate_arg = implied_mean * implied_prec
val, soln, _ = solver(np.asarray([conjugate_arg]),
np.reshape(implied_prec, (1,1)),
eta.T.dot(self.init_soln),
A.reshape((A.shape[0],1)),
b,
**self.solve_args)
gamma_ = _A.dot(soln) + R.dot(self.init_soln)
log_jacob = jacobian_grad_hess(gamma_, self.C, self.active_dirs)
ref_hat.append(-val - ((conjugate_arg ** 2) * implied_cov)/ 2. + log_jacob[0])
return np.asarray(ref_hat)
def _construct_families(self):
self._construct_density()
self._families = []
for m in range(self.ntarget):
p = self.target_score_cov.shape[1]
observed_target_uni = (self.observed_target[m]).reshape((1,))
target_cov_uni = (np.diag(self.target_cov)[m]).reshape((1, 1))
target_score_cov_uni = self.target_score_cov[m, :].reshape((1, p))
var_target = 1. / ((self.precs[m])[0, 0])
log_ref = self.log_reference(observed_target_uni,
target_cov_uni,
target_score_cov_uni,
self.stat_grid[m])
if self.useIP == False:
logW = (log_ref - 0.5 * (self.stat_grid[m] - self.observed_target[m]) ** 2 / var_target)
logW -= logW.max()
self._families.append(discrete_family(self.stat_grid[m],
np.exp(logW)))
else:
approx_fn = interp1d(self.stat_grid[m],
log_ref,
kind='quadratic',
bounds_error=False,
fill_value='extrapolate')
grid = np.linspace(self.stat_grid[m].min(), self.stat_grid[m].max(), 1000)
logW = (approx_fn(grid) -
0.5 * (grid - self.observed_target[m]) ** 2 / var_target)
logW -= logW.max()
self._families.append(discrete_family(grid,
np.exp(logW)))
def _approx_pivots(self,
mean_parameter,
alternatives=None):
if not hasattr(self, "_families"):
self._construct_families()
if alternatives is None:
alternatives = ['twosided'] * self.ntarget
pivot = []
for m in range(self.ntarget):
family = self._families[m]
var_target = 1. / ((self.precs[m])[0, 0])
mean = self.S[m].dot(mean_parameter[m].reshape((1,))) + self.r[m]
_cdf = family.cdf((mean[0] - self.observed_target[m]) / var_target, x=self.observed_target[m])
print("variable completed ", m)
if alternatives[m] == 'twosided':
pivot.append(2 * min(_cdf, 1 - _cdf))
elif alternatives[m] == 'greater':
pivot.append(1 - _cdf)
elif alternatives[m] == 'less':
pivot.append(_cdf)
else:
raise ValueError('alternative should be in ["twosided", "less", "greater"]')
return pivot
def _approx_intervals(self,
level=0.9):
if not hasattr(self, "_families"):
self._construct_families()
lower, upper = [], []
for m in range(self.ntarget):
# construction of intervals from families follows `selectinf.learning.core`
family = self._families[m]
observed_target = self.observed_target[m]
l, u = family.equal_tailed_interval(observed_target,
alpha=1 - level)
var_target = 1. / ((self.precs[m])[0, 0])
lower.append(l * var_target + observed_target)
upper.append(u * var_target + observed_target)
return np.asarray(lower), np.asarray(upper)
### Private method
def _construct_density(self):
precs = {}
S = {}
r = {}
p = self.target_score_cov.shape[1]
for m in range(self.ntarget):
observed_target_uni = (self.observed_target[m]).reshape((1,))
target_cov_uni = (np.diag(self.target_cov)[m]).reshape((1, 1))
prec_target = 1. / target_cov_uni
target_score_cov_uni = self.target_score_cov[m, :].reshape((1, p))
target_linear = target_score_cov_uni.T.dot(prec_target)
target_offset = (self.score_offset - target_linear.dot(observed_target_uni)).reshape(
(target_linear.shape[0],))
target_lin = -self.logdens_linear.dot(target_linear)
target_off = (self.cond_mean - target_lin.dot(observed_target_uni)).reshape((target_lin.shape[0],))
_prec = prec_target + (target_linear.T.dot(target_linear) * self.randomizer_prec) - target_lin.T.dot(
self.prec_opt).dot(target_lin)
_P = target_linear.T.dot(target_offset) * self.randomizer_prec
_r = (1. / _prec).dot(target_lin.T.dot(self.prec_opt).dot(target_off) - _P)
_S = np.linalg.inv(_prec).dot(prec_target)
S[m] = _S
r[m] = _r
precs[m] = _prec
self.precs = precs
self.S = S
self.r = r
def solve_barrier_affine_jacobian_py(conjugate_arg,
precision,
feasible_point,
con_linear,
con_offset,
C,
active_dirs,
useJacobian=True,
step=1,
nstep=2000,
min_its=500,
tol=1.e-12):
"""
This needs to be updated to actually use the Jacobian information (in self.C)
arguments
conjugate_arg: \\bar{\\Sigma}^{-1} \bar{\\mu}
precision: \\bar{\\Sigma}^{-1}
feasible_point: gamma's from fitting
con_linear: linear part of affine constraint used for barrier function
con_offset: offset part of affine constraint used for barrier function
C: V^T Q^{-1} \\Lambda V
active_dirs:
"""
scaling = np.sqrt(np.diag(con_linear.dot(precision).dot(con_linear.T)))
if feasible_point is None:
feasible_point = 1. / scaling
def objective(gs):
p1 = -gs.T.dot(conjugate_arg)
p2 = gs.T.dot(precision).dot(gs) / 2.
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[0]
else:
p3 = 0
p4 = log(1. + 1. / ((con_offset - con_linear.dot(gs)) / scaling)).sum()
return p1 + p2 + p3 + p4
def grad(gs):
p1 = -conjugate_arg + precision.dot(gs)
p2 = -con_linear.T.dot(1. / (scaling + con_offset - con_linear.dot(gs)))
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[1]
else:
p3 = 0
p4 = 1. / (con_offset - con_linear.dot(gs))
return p1 + p2 + p3 + p4
def barrier_hessian(gs): # contribution of barrier and jacobian to hessian
p1 = con_linear.T.dot(np.diag(-1. / ((scaling + con_offset - con_linear.dot(gs)) ** 2.)
+ 1. / ((con_offset - con_linear.dot(gs)) ** 2.))).dot(con_linear)
if useJacobian:
p2 = - jacobian_grad_hess(gs, C, active_dirs)[2]
else:
p2 = 0
return p1 + p2
current = feasible_point
current_value = np.inf
for itercount in range(nstep):
cur_grad = grad(current)
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * cur_grad
if np.all(con_offset - con_linear.dot(proposal) > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
count += 1
proposal = current - step * cur_grad
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
if count >= 20:
if not (np.isnan(proposed_value) or np.isnan(current_value)):
break
else:
raise ValueError('value is NaN: %f, %f' % (proposed_value, current_value))
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value) and itercount >= min_its:
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
hess = inv(precision + barrier_hessian(current))
return current_value, current, hess
# Jacobian calculations
def calc_GammaMinus(gamma, active_dirs):
"""Calculate Gamma^minus (as a function of gamma vector, active directions)
"""
to_diag = [[g] * (ug.size - 1) for (g, ug) in zip(gamma, active_dirs.values())]
return block_diag(*[i for gp in to_diag for i in gp])
def jacobian_grad_hess(gamma, C, active_dirs):
""" Calculate the log-Jacobian (scalar), gradient (gamma.size vector) and hessian (gamma.size square matrix)
"""
if C.shape == (0, 0): # when all groups are size one, C will be an empty array
return 0, 0, 0
else:
GammaMinus = calc_GammaMinus(gamma, active_dirs)
# eigendecomposition
#evalues, evectors = eig(GammaMinus + C)
# log Jacobian
#J = log(evalues).sum()
J = np.log(np.linalg.det(GammaMinus + C))
# inverse
#GpC_inv = evectors.dot(np.diag(1 / evalues).dot(evectors.T))
GpC_inv = np.linalg.inv(GammaMinus + C)
# summing matrix (gamma.size by C.shape[0])
S = block_diag(*[np.ones((1, ug.size - 1)) for ug in active_dirs.values()])
# gradient
grad_J = S.dot(GpC_inv.diagonal())
# hessian
hess_J = -S.dot(np.multiply(GpC_inv, GpC_inv.T).dot(S.T))
return J, grad_J, hess_J
def _check_groups(groups):
"""Make sure that the user-specific groups are ok
There are a number of assumptions that group_lasso makes about
how groups are specified. Specifically, we assume that
`groups` is a 1-d array_like of integers that are sorted in
increasing order, start at 0, and have no gaps (e.g., if there
is a group 2 and a group 4, there must also be at least one
feature in group 3).
This function checks the user-specified group scheme and
raises an exception if it finds any problems.
Sorting feature groups is potentially tedious for the user and
in future we might do this for them.
"""
# check array_like
agroups = np.array(groups)
# check dimension
if len(agroups.shape) != 1:
raise ValueError("Groups are not a 1D array_like")
# check sorted
if np.any(agroups[:-1] > agroups[1:]) < 0:
raise ValueError("Groups are not sorted")
# check integers
if not np.issubdtype(agroups.dtype, np.integer):
raise TypeError("Groups are not integers")
# check starts with 0
if not np.amin(agroups) == 0:
raise ValueError("First group is not 0")
# check for no skipped groups
if not np.all(np.diff(np.unique(agroups)) == 1):
raise ValueError("Some group is skipped")
| [((31360, 31406), 'scipy.linalg.block_diag', 'block_diag', (['*[i for gp in to_diag for i in gp]'], {}), '(*[i for gp in to_diag for i in gp])\n', (31370, 31406), False, 'from scipy.linalg import block_diag\n'), ((33085, 33101), 'numpy.array', 'np.array', (['groups'], {}), '(groups)\n', (33093, 33101), True, 'import numpy as np\n'), ((2537, 2568), 'numpy.ones', 'np.ones', (['self.nfeature', 'np.bool'], {}), '(self.nfeature, np.bool)\n', (2544, 2568), True, 'import numpy as np\n'), ((4189, 4211), 'numpy.hstack', 'np.hstack', (['ordered_opt'], {}), '(ordered_opt)\n', (4198, 4211), True, 'import numpy as np\n'), ((4463, 4486), 'numpy.zeros', 'np.zeros', (['self.nfeature'], {}), '(self.nfeature)\n', (4471, 4486), True, 'import numpy as np\n'), ((4748, 4802), 'numpy.dot', 'np.dot', (['X.T', '(X[:, (ordered_vars)] * W[:, (np.newaxis)])'], {}), '(X.T, X[:, (ordered_vars)] * W[:, (np.newaxis)])\n', (4754, 4802), True, 'import numpy as np\n'), ((5147, 5173), 'numpy.sign', 'np.sign', (['self.initial_soln'], {}), '(self.initial_soln)\n', (5154, 5173), True, 'import numpy as np\n'), ((5191, 5219), 'numpy.flatnonzero', 'np.flatnonzero', (['active_signs'], {}), '(active_signs)\n', (5205, 5219), True, 'import numpy as np\n'), ((5952, 5967), 'scipy.linalg.block_diag', 'block_diag', (['*Vs'], {}), '(*Vs)\n', (5962, 5967), False, 'from scipy.linalg import block_diag\n'), ((6056, 6071), 'scipy.linalg.block_diag', 'block_diag', (['*Ls'], {}), '(*Ls)\n', (6066, 6071), False, 'from scipy.linalg import block_diag\n'), ((6207, 6213), 'numpy.linalg.inv', 'inv', (['Q'], {}), '(Q)\n', (6210, 6213), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((6661, 6703), 'numpy.zeros', 'np.zeros', (['self.observed_opt_state.shape[0]'], {}), '(self.observed_opt_state.shape[0])\n', (6669, 6703), True, 'import numpy as np\n'), ((7143, 7209), 'regreg.api.identity_quadratic', 'rr.identity_quadratic', (['self.ridge_term', '(0)', '(-self._initial_omega)', '(0)'], {}), '(self.ridge_term, 0, -self._initial_omega, 0)\n', (7164, 7209), True, 'import regreg.api as rr\n'), ((7340, 7385), 'regreg.api.simple_problem', 'rr.simple_problem', (['self.loglike', 'self.penalty'], {}), '(self.loglike, self.penalty)\n', (7357, 7385), True, 'import regreg.api as rr\n'), ((8199, 8264), 'regreg.api.glm.gaussian', 'rr.glm.gaussian', (['X', 'Y'], {'coef': '(1.0 / sigma ** 2)', 'quadratic': 'quadratic'}), '(X, Y, coef=1.0 / sigma ** 2, quadratic=quadratic)\n', (8214, 8264), True, 'import regreg.api as rr\n'), ((11580, 11610), 'numpy.atleast_1d', 'np.atleast_1d', (['observed_target'], {}), '(observed_target)\n', (11593, 11610), True, 'import numpy as np\n'), ((11633, 11648), 'numpy.linalg.inv', 'inv', (['target_cov'], {}), '(target_cov)\n', (11636, 11648), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((14248, 14267), 'scipy.stats.norm.cdf', 'ndist.cdf', (['Z_scores'], {}), '(Z_scores)\n', (14257, 14267), True, 'from scipy.stats import norm as ndist\n'), ((14370, 14396), 'scipy.stats.norm.ppf', 'ndist.ppf', (['(1 - alpha / 2.0)'], {}), '(1 - alpha / 2.0)\n', (14379, 14396), True, 'from scipy.stats import norm as ndist\n'), ((17277, 17306), 'numpy.linalg.inv', 'np.linalg.inv', (['query.cond_cov'], {}), '(query.cond_cov)\n', (17290, 17306), True, 'import numpy as np\n'), ((19777, 19900), 'pandas.DataFrame', 'pd.DataFrame', (["{'target': self.observed_target, 'pvalue': pvalues, 'lower_confidence':\n lower, 'upper_confidence': upper}"], {}), "({'target': self.observed_target, 'pvalue': pvalues,\n 'lower_confidence': lower, 'upper_confidence': upper})\n", (19789, 19900), True, 'import pandas as pd\n'), ((20548, 20573), 'numpy.linalg.inv', 'np.linalg.inv', (['target_cov'], {}), '(target_cov)\n', (20561, 20573), True, 'import numpy as np\n'), ((22410, 22429), 'numpy.asarray', 'np.asarray', (['ref_hat'], {}), '(ref_hat)\n', (22420, 22429), True, 'import numpy as np\n'), ((32043, 32072), 'numpy.linalg.inv', 'np.linalg.inv', (['(GammaMinus + C)'], {}), '(GammaMinus + C)\n', (32056, 32072), True, 'import numpy as np\n'), ((33243, 33277), 'numpy.any', 'np.any', (['(agroups[:-1] > agroups[1:])'], {}), '(agroups[:-1] > agroups[1:])\n', (33249, 33277), True, 'import numpy as np\n'), ((33366, 33406), 'numpy.issubdtype', 'np.issubdtype', (['agroups.dtype', 'np.integer'], {}), '(agroups.dtype, np.integer)\n', (33379, 33406), True, 'import numpy as np\n'), ((1435, 1487), 'regreg.api.weighted_l1norm', 'rr.weighted_l1norm', ([], {'weights': 'weights_np', 'lagrange': '(1.0)'}), '(weights=weights_np, lagrange=1.0)\n', (1453, 1487), True, 'import regreg.api as rr\n'), ((1574, 1627), 'regreg.api.group_lasso', 'rr.group_lasso', (['groups'], {'weights': 'weights', 'lagrange': '(1.0)'}), '(groups, weights=weights, lagrange=1.0)\n', (1588, 1627), True, 'import regreg.api as rr\n'), ((2979, 3001), 'numpy.unique', 'np.unique', (['self.groups'], {}), '(self.groups)\n', (2988, 3001), True, 'import numpy as np\n'), ((6598, 6638), 'numpy.eye', 'np.eye', (['self.observed_opt_state.shape[0]'], {}), '(self.observed_opt_state.shape[0])\n', (6604, 6638), True, 'import numpy as np\n'), ((9138, 9157), 'numpy.linalg.inv', 'inv', (['cond_precision'], {}), '(cond_precision)\n', (9141, 9157), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((9341, 9360), 'numpy.linalg.inv', 'inv', (['cond_precision'], {}), '(cond_precision)\n', (9344, 9360), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((14291, 14323), 'numpy.minimum', 'np.minimum', (['pvalues', '(1 - pvalues)'], {}), '(pvalues, 1 - pvalues)\n', (14301, 14323), True, 'import numpy as np\n'), ((18024, 18050), 'numpy.zeros', 'np.zeros', (['(ntarget, ngrid)'], {}), '((ntarget, ngrid))\n', (18032, 18050), True, 'import numpy as np\n'), ((18395, 18421), 'numpy.zeros', 'np.zeros', (['(ntarget, ngrid)'], {}), '((ntarget, ngrid))\n', (18403, 18421), True, 'import numpy as np\n'), ((19598, 19633), 'numpy.zeros_like', 'np.zeros_like', (['self.observed_target'], {}), '(self.observed_target)\n', (19611, 19633), True, 'import numpy as np\n'), ((20006, 20028), 'numpy.all', 'np.all', (['(parameter == 0)'], {}), '(parameter == 0)\n', (20012, 20028), True, 'import numpy as np\n'), ((25922, 25939), 'numpy.asarray', 'np.asarray', (['lower'], {}), '(lower)\n', (25932, 25939), True, 'import numpy as np\n'), ((25941, 25958), 'numpy.asarray', 'np.asarray', (['upper'], {}), '(upper)\n', (25951, 25958), True, 'import numpy as np\n'), ((31905, 31934), 'numpy.linalg.det', 'np.linalg.det', (['(GammaMinus + C)'], {}), '(GammaMinus + C)\n', (31918, 31934), True, 'import numpy as np\n'), ((33497, 33513), 'numpy.amin', 'np.amin', (['agroups'], {}), '(agroups)\n', (33504, 33513), True, 'import numpy as np\n'), ((3156, 3178), 'numpy.linalg.norm', 'norm', (['soln[group_mask]'], {}), '(soln[group_mask])\n', (3160, 3178), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((4095, 4108), 'numpy.sign', 'np.sign', (['soln'], {}), '(soln)\n', (4102, 4108), True, 'import numpy as np\n'), ((5444, 5449), 'numpy.linalg.qr', 'qr', (['Z'], {}), '(Z)\n', (5446, 5449), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((5544, 5560), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {}), '((1, 0))\n', (5552, 5560), True, 'import numpy as np\n'), ((5755, 5765), 'numpy.eye', 'np.eye', (['pg'], {}), '(pg)\n', (5761, 5765), True, 'import numpy as np\n'), ((8422, 8436), 'numpy.sqrt', 'np.sqrt', (['(n - 1)'], {}), '(n - 1)\n', (8429, 8436), True, 'import numpy as np\n'), ((8545, 8567), 'numpy.sqrt', 'np.sqrt', (['(n / (n - 1.0))'], {}), '(n / (n - 1.0))\n', (8552, 8567), True, 'import numpy as np\n'), ((9002, 9018), 'numpy.asarray', 'np.asarray', (['prec'], {}), '(prec)\n', (9012, 9018), True, 'import numpy as np\n'), ((11452, 11479), 'numpy.asarray', 'np.asarray', (['observed_target'], {}), '(observed_target)\n', (11462, 11479), True, 'import numpy as np\n'), ((12307, 12339), 'numpy.asarray', 'np.asarray', (['self.randomizer_prec'], {}), '(self.randomizer_prec)\n', (12317, 12339), True, 'import numpy as np\n'), ((14200, 14227), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14207, 14227), True, 'import numpy as np\n'), ((17919, 17940), 'numpy.diag', 'np.diag', (['inverse_info'], {}), '(inverse_info)\n', (17926, 17940), True, 'import numpy as np\n'), ((18127, 18229), 'numpy.linspace', 'np.linspace', (['(observed_target[j] - 1.5 * _scale[j])', '(observed_target[j] + 1.5 * _scale[j])'], {'num': 'ngrid'}), '(observed_target[j] - 1.5 * _scale[j], observed_target[j] + 1.5 *\n _scale[j], num=ngrid)\n', (18138, 18229), True, 'import numpy as np\n'), ((18498, 18600), 'numpy.linspace', 'np.linspace', (['(observed_target[j] - 1.5 * _scale[j])', '(observed_target[j] + 1.5 * _scale[j])'], {'num': 'ngrid'}), '(observed_target[j] - 1.5 * _scale[j], observed_target[j] + 1.5 *\n _scale[j], num=ngrid)\n', (18509, 18600), True, 'import numpy as np\n'), ((20424, 20451), 'numpy.asarray', 'np.asarray', (['observed_target'], {}), '(observed_target)\n', (20434, 20451), True, 'import numpy as np\n'), ((21600, 21620), 'numpy.identity', 'np.identity', (['num_opt'], {}), '(num_opt)\n', (21611, 21620), True, 'import numpy as np\n'), ((21858, 21885), 'numpy.asarray', 'np.asarray', (['[conjugate_arg]'], {}), '([conjugate_arg])\n', (21868, 21885), True, 'import numpy as np\n'), ((21921, 21953), 'numpy.reshape', 'np.reshape', (['implied_prec', '(1, 1)'], {}), '(implied_prec, (1, 1))\n', (21931, 21953), True, 'import numpy as np\n'), ((23507, 23611), 'scipy.interpolate.interp1d', 'interp1d', (['self.stat_grid[m]', 'log_ref'], {'kind': '"""quadratic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(self.stat_grid[m], log_ref, kind='quadratic', bounds_error=False,\n fill_value='extrapolate')\n", (23515, 23611), False, 'from scipy.interpolate import interp1d\n'), ((30706, 30745), 'numpy.fabs', 'np.fabs', (['(current_value - proposed_value)'], {}), '(current_value - proposed_value)\n', (30713, 30745), True, 'import numpy as np\n'), ((1231, 1248), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (1240, 1248), True, 'import numpy as np\n'), ((3187, 3197), 'numpy.linalg.norm', 'norm', (['soln'], {}), '(soln)\n', (3191, 3197), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((3355, 3381), 'numpy.flatnonzero', 'np.flatnonzero', (['group_mask'], {}), '(group_mask)\n', (3369, 3381), True, 'import numpy as np\n'), ((3657, 3679), 'numpy.linalg.norm', 'norm', (['soln[group_mask]'], {}), '(soln[group_mask])\n', (3661, 3679), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((8389, 8398), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (8395, 8398), True, 'import numpy as np\n'), ((8401, 8419), 'numpy.sqrt', 'np.sqrt', (['mean_diag'], {}), '(mean_diag)\n', (8408, 8419), True, 'import numpy as np\n'), ((8533, 8542), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (8539, 8542), True, 'import numpy as np\n'), ((14838, 14865), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14845, 14865), True, 'import numpy as np\n'), ((21131, 21171), 'numpy.atleast_1d', 'np.atleast_1d', (['(grid[k] - observed_target)'], {}), '(grid[k] - observed_target)\n', (21144, 21171), True, 'import numpy as np\n'), ((27155, 27175), 'numpy.linalg.inv', 'np.linalg.inv', (['_prec'], {}), '(_prec)\n', (27168, 27175), True, 'import numpy as np\n'), ((30754, 30776), 'numpy.fabs', 'np.fabs', (['current_value'], {}), '(current_value)\n', (30761, 30776), True, 'import numpy as np\n'), ((32151, 32176), 'numpy.ones', 'np.ones', (['(1, ug.size - 1)'], {}), '((1, ug.size - 1))\n', (32158, 32176), True, 'import numpy as np\n'), ((33630, 33648), 'numpy.unique', 'np.unique', (['agroups'], {}), '(agroups)\n', (33639, 33648), True, 'import numpy as np\n'), ((3598, 3620), 'numpy.linalg.norm', 'norm', (['soln[group_mask]'], {}), '(soln[group_mask])\n', (3602, 3620), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((5400, 5418), 'numpy.eye', 'np.eye', (['pg', '(pg - 1)'], {}), '(pg, pg - 1)\n', (5406, 5418), True, 'import numpy as np\n'), ((8506, 8524), 'numpy.sqrt', 'np.sqrt', (['mean_diag'], {}), '(mean_diag)\n', (8513, 8524), True, 'import numpy as np\n'), ((22721, 22745), 'numpy.diag', 'np.diag', (['self.target_cov'], {}), '(self.target_cov)\n', (22728, 22745), True, 'import numpy as np\n'), ((23446, 23458), 'numpy.exp', 'np.exp', (['logW'], {}), '(logW)\n', (23452, 23458), True, 'import numpy as np\n'), ((24122, 24134), 'numpy.exp', 'np.exp', (['logW'], {}), '(logW)\n', (24128, 24134), True, 'import numpy as np\n'), ((26254, 26278), 'numpy.diag', 'np.diag', (['self.target_cov'], {}), '(self.target_cov)\n', (26261, 26278), True, 'import numpy as np\n'), ((30451, 30475), 'numpy.isnan', 'np.isnan', (['proposed_value'], {}), '(proposed_value)\n', (30459, 30475), True, 'import numpy as np\n'), ((30479, 30502), 'numpy.isnan', 'np.isnan', (['current_value'], {}), '(current_value)\n', (30487, 30502), True, 'import numpy as np\n'), ((32316, 32347), 'numpy.multiply', 'np.multiply', (['GpC_inv', 'GpC_inv.T'], {}), '(GpC_inv, GpC_inv.T)\n', (32327, 32347), True, 'import numpy as np\n'), ((14496, 14523), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14503, 14523), True, 'import numpy as np\n'), ((14625, 14652), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14632, 14652), True, 'import numpy as np\n')] |
mattjj/pyhsmm-collapsedinfinite | internals/states.py | 81a60c025beec6fb065bc9f4e23cea43b6f6725c | from __future__ import division
import numpy as np
na = np.newaxis
import collections, itertools
import abc
from pyhsmm.util.stats import sample_discrete, sample_discrete_from_log, combinedata
from pyhsmm.util.general import rle as rle
# NOTE: assumes censoring. can make no censoring by adding to score of last
# segment
SAMPLING = -1 # special constant for indicating a state or state range that is being resampled
NEW = -2 # special constant indicating a potentially new label
ABIGNUMBER = 10000 # state labels are sampled uniformly from 0 to abignumber exclusive
####################
# States Classes #
####################
# TODO an array class that maintains its own rle
# must override set methods
# type(x).__setitem__(x,i) classmethod
# also has members norep and lens (or something)
# that are either read-only or also override setters
# for now, i'll just make sure outside that anything that sets self.stateseq
# also sets self.stateseq_norep and self.durations
# it should also call beta updates...
class collapsed_states(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def resample(self):
pass
@abc.abstractmethod
def _counts_from(self,k):
pass
@abc.abstractmethod
def _counts_to(self,k):
pass
@abc.abstractmethod
def _counts_fromto(self,k):
pass
def _new_label(self,ks):
assert SAMPLING not in ks
newlabel = np.random.randint(ABIGNUMBER)
while newlabel in ks:
newlabel = np.random.randint(ABIGNUMBER)
newweight = self.beta.betavec[newlabel] # instantiate, needed if new state at beginning of seq
return newlabel
def _data_withlabel(self,k):
assert k != SAMPLING
return self.data[self.stateseq == k]
def _occupied(self):
return set(self.stateseq) - set((SAMPLING,))
def plot(self,colors_dict):
from matplotlib import pyplot as plt
stateseq_norep, durations = rle(self.stateseq)
X,Y = np.meshgrid(np.hstack((0,durations.cumsum())),(0,1))
if colors_dict is not None:
C = np.array([[colors_dict[state] for state in stateseq_norep]])
else:
C = stateseq_norep[na,:]
plt.pcolor(X,Y,C,vmin=0,vmax=1)
plt.ylim((0,1))
plt.xlim((0,len(self.stateseq)))
plt.yticks([])
class collapsed_stickyhdphmm_states(collapsed_states):
def __init__(self,model,beta,alpha_0,kappa,obs,data=None,T=None,stateseq=None):
self.alpha_0 = alpha_0
self.kappa = kappa
self.model = model
self.beta = beta
self.obs = obs
self.data = data
if (data,stateseq) == (None,None):
# generating
assert T is not None, 'must pass in T when generating'
self._generate(T)
elif data is None:
self.T = stateseq.shape[0]
self.stateseq = stateseq
elif stateseq is None:
self.data = data
self._generate(data.shape[0])
else:
assert data.shape[0] == stateseq.shape[0]
self.stateseq = stateseq
self.data = data
self.T = data.shape[0]
def _generate(self,T):
self.T = T
alpha, kappa = self.alpha_0, self.kappa
betavec = self.beta.betavec
stateseq = np.zeros(T,dtype=np.int)
model = self.model
self.stateseq = stateseq[:0]
# NOTE: we have a choice of what state to start in; it's just a
# definition choice that isn't specified in the HDP-HMM
# Here, we choose just to sample from beta. Note that if this is the
# first chain being sampled in this model, this will always sample
# zero, since no states will be occupied.
ks = list(model._occupied()) + [None]
firststate = sample_discrete(np.arange(len(ks)))
if firststate == len(ks)-1:
stateseq[0] = self._new_label(ks)
else:
stateseq[0] = ks[firststate]
# runs a CRF with fixed weights beta forwards
for t in range(1,T):
self.stateseq = stateseq[:t]
ks = list(model._occupied() | self._occupied())
betarest = 1-sum(betavec[k] for k in ks)
# get the counts of new states coming out of our current state
# going to all other states
fromto_counts = np.array([model._counts_fromto(stateseq[t-1],k)
+ self._counts_fromto(stateseq[t-1],k)
for k in ks])
# for those states plus a new one, sample proportional to
scores = np.array([(alpha*betavec[k] + (kappa if k == stateseq[t+1] else 0) + ft)
for k,ft in zip(ks,fromto_counts)] + [alpha*betarest])
nextstateidx = sample_discrete(scores)
if nextstateidx == scores.shape[0]-1:
stateseq[t] = self._new_label(ks)
else:
stateseq[t] = ks[nextstateidx]
self.stateseq = stateseq
def resample(self):
model = self.model
for t in np.random.permutation(self.T):
# throw out old value
self.stateseq[t] = SAMPLING
ks = list(model._occupied())
self.beta.housekeeping(ks)
# form the scores and sample from them
scores = np.array([self._score(k,t) for k in ks]+[self._new_score(ks,t)])
idx = sample_discrete_from_log(scores)
# set the state
if idx == scores.shape[0]-1:
self.stateseq[t] = self._new_label(ks)
else:
self.stateseq[t] = ks[idx]
def _score(self,k,t):
alpha, kappa = self.alpha_0, self.kappa
betavec, model, o = self.beta.betavec, self.model, self.obs
data, stateseq = self.data, self.stateseq
score = 0
# left transition score
if t > 0:
score += np.log( (alpha*betavec[k] + (kappa if k == stateseq[t-1] else 0)
+ model._counts_fromto(stateseq[t-1],k))
/ (alpha+kappa+model._counts_from(stateseq[t-1])) )
# right transition score
if t < self.T - 1:
# indicators since we may need to include the left transition in
# counts (since we are scoring exchangeably, not independently)
another_from = 1 if t > 0 and stateseq[t-1] == k else 0
another_fromto = 1 if (t > 0 and stateseq[t-1] == k and stateseq[t+1] == k) else 0
score += np.log( (alpha*betavec[stateseq[t+1]] + (kappa if k == stateseq[t+1] else 0)
+ model._counts_fromto(k,stateseq[t+1]) + another_fromto)
/ (alpha+kappa+model._counts_from(k) + another_from) )
# observation score
score += o.log_predictive(data[t],model._data_withlabel(k))
return score
def _new_score(self,ks,t):
alpha, kappa = self.alpha_0, self.kappa
betavec, model, o = self.beta.betavec, self.model, self.obs
data, stateseq = self.data, self.stateseq
score = 0
# left transition score
if t > 0:
betarest = 1-sum(betavec[k] for k in ks)
score += np.log(alpha*betarest/(alpha+kappa+model._counts_from(stateseq[t-1])))
# right transition score
if t < self.T-1:
score += np.log(betavec[stateseq[t+1]])
# observation score
score += o.log_marginal_likelihood(data[t])
return score
def _counts_from(self,k):
assert k != SAMPLING
assert np.sum(self.stateseq == SAMPLING) in (0,1)
temp = np.sum(self.stateseq[:-1] == k)
if SAMPLING in self.stateseq[1:] and \
self.stateseq[np.where(self.stateseq == SAMPLING)[0]-1] == k:
temp -= 1
return temp
def _counts_to(self,k):
assert k != SAMPLING
assert np.sum(self.stateseq == SAMPLING) in (0,1)
temp = np.sum(self.stateseq[1:] == k)
if SAMPLING in self.stateseq[:-1] and \
self.stateseq[np.where(self.stateseq == SAMPLING)[0]+1] == k:
temp -= 1
return temp
def _counts_fromto(self,k1,k2):
assert k1 != SAMPLING and k2 != SAMPLING
if k1 not in self.stateseq or k2 not in self.stateseq:
return 0
else:
from_indices, = np.where(self.stateseq[:-1] == k1) # EXCEPT last
return np.sum(self.stateseq[from_indices+1] == k2)
class collapsed_hdphsmm_states(collapsed_states):
def __init__(self,model,beta,alpha_0,obs,dur,data=None,T=None,stateseq=None):
self.alpha_0 = alpha_0
self.model = model
self.beta = beta
self.obs = obs
self.dur = dur
self.data = data
if (data,stateseq) == (None,None):
# generating
assert T is not None, 'must pass in T when generating'
self._generate(T)
elif data is None:
self.T = stateseq.shape[0]
self.stateseq = stateseq
elif stateseq is None:
self.data = data
# self._generate(data.shape[0]) # initialized from the prior
# self.stateseq = self.stateseq[:self.T]
self.stateseq = np.random.randint(25,size=data.shape[0])
self.T = data.shape[0]
else:
assert data.shape[0] == stateseq.shape[0]
self.stateseq = stateseq
self.stateseq_norep, self.durations = rle(stateseq)
self.data = data
self.T = data.shape[0]
def _generate(self,T):
alpha = self.alpha_0
betavec = self.beta.betavec
model = self.model
self.stateseq = np.array([])
ks = list(model._occupied()) + [None]
firststateidx = sample_discrete(np.arange(len(ks)))
if firststateidx == len(ks)-1:
firststate = self._new_label(ks)
else:
firststate = ks[firststateidx]
self.dur.resample(combinedata((model._durs_withlabel(firststate),self._durs_withlabel(firststate))))
firststate_dur = self.dur.rvs()
self.stateseq = np.ones(firststate_dur,dtype=int)*firststate
t = firststate_dur
# run a family-CRF (CRF with durations) forwards
while t < T:
ks = list(model._occupied() | self._occupied())
betarest = 1-sum(betavec[k] for k in ks)
fromto_counts = np.array([model._counts_fromto(self.stateseq[t-1],k)
+ self._counts_fromto(self.stateseq[t-1],k)
for k in ks])
scores = np.array([(alpha*betavec[k] + ft if k != self.stateseq[t-1] else 0)
for k,ft in zip(ks,fromto_counts)]
+ [alpha*(1-betavec[self.stateseq[t-1]])*betarest])
nextstateidx = sample_discrete(scores)
if nextstateidx == scores.shape[0]-1:
nextstate = self._new_label(ks)
else:
nextstate = ks[nextstateidx]
# now get the duration of nextstate!
self.dur.resample(combinedata((model._durs_withlabel(nextstate),self._durs_withlabel(nextstate))))
nextstate_dur = self.dur.rvs()
self.stateseq = np.concatenate((self.stateseq,np.ones(nextstate_dur,dtype=int)*nextstate))
t += nextstate_dur
self.T = len(self.stateseq)
def resample(self):
self.resample_label_version()
def _durs_withlabel(self,k):
assert k != SAMPLING
if len(self.stateseq) > 0:
stateseq_norep, durations = rle(self.stateseq)
return durations[stateseq_norep == k]
else:
return []
def _counts_fromto(self,k1,k2):
assert k1 != SAMPLING and k2 != SAMPLING
if k1 not in self.stateseq or k2 not in self.stateseq or k1 == k2:
return 0
else:
stateseq_norep, _ = rle(self.stateseq)
from_indices, = np.where(stateseq_norep[:-1] == k1) # EXCEPT last
return np.sum(stateseq_norep[from_indices+1] == k2)
def _counts_from(self,k):
assert k != SAMPLING
stateseq_norep, _ = rle(self.stateseq)
temp = np.sum(stateseq_norep[:-1] == k)
if SAMPLING in stateseq_norep[1:] and \
stateseq_norep[np.where(stateseq_norep == SAMPLING)[0]-1] == k:
temp -= 1
return temp
def _counts_to(self,k):
assert k != SAMPLING
stateseq_norep, _ = rle(self.stateseq)
temp = np.sum(stateseq_norep[1:] == k)
if SAMPLING in stateseq_norep[:-1] and \
stateseq_norep[np.where(stateseq_norep == SAMPLING)[0]+1] == k:
temp -= 1
return temp
### label sampler stuff
def resample_label_version(self):
# NOTE never changes first label: we assume the initial state
# distribution is a delta at that label
for t in (np.random.permutation(self.T-1)+1):
self.stateseq[t] = SAMPLING
ks = self.model._occupied()
self.beta.housekeeping(ks)
ks = list(ks)
# sample a new value
scores = np.array([self._label_score(t,k) for k in ks] + [self._new_label_score(t,ks)])
newlabelidx = sample_discrete_from_log(scores)
if newlabelidx == scores.shape[0]-1:
self.stateseq[t] = self._new_label(ks)
else:
self.stateseq[t] = ks[newlabelidx]
def _label_score(self,t,k):
assert t > 0
score = 0.
# unpack variables
model = self.model
alpha = self.alpha_0
beta = self.beta.betavec
stateseq = self.stateseq
obs, durs = self.obs, self.dur
# left transition (if there is one)
if stateseq[t-1] != k:
score += np.log(alpha * beta[k] + model._counts_fromto(stateseq[t-1],k)) \
- np.log(alpha * (1-beta[stateseq[t-1]]) + model._counts_from(stateseq[t-1]))
# right transition (if there is one)
if t < self.T-1 and stateseq[t+1] != k:
score += np.log(alpha * beta[stateseq[t+1]] + model._counts_fromto(k,stateseq[t+1])) \
- np.log(alpha * (1-beta[k]) + model._counts_from(k))
# predictive likelihoods
for (data,otherdata), (dur,otherdurs) in self._local_group(t,k):
score += obs.log_predictive(data,otherdata) + durs.log_predictive(dur,otherdurs)
return score
def _new_label_score(self,t,ks):
assert t > 0
score = 0.
# unpack
model = self.model
alpha = self.alpha_0
beta = self.beta.betavec
stateseq = self.stateseq
obs, durs = self.obs, self.dur
# left transition (only from counts, no to counts)
score += np.log(alpha) - np.log(alpha*(1.-beta[stateseq[t-1]])
+ model._counts_from(stateseq[t-1]))
# add in right transition (no counts)
if t < self.T-1:
score += np.log(beta[stateseq[t+1]])
# add in sum over k factor
if t < self.T-1:
betas = np.random.beta(1,self.beta.gamma_0,size=200)
betas[1:] *= (1-betas[:-1]).cumprod()
score += np.log(self.beta.remaining*(betas/(1-betas)).sum())
else:
score += np.log(self.beta.remaining)
# add in obs/dur scores of local pieces
for (data,otherdata), (dur,otherdurs) in self._local_group(t,NEW):
score += obs.log_predictive(data,otherdata) + durs.log_predictive(dur,otherdurs)
return score
def _local_group(self,t,k):
'''
returns a sequence of length between 1 and 3, where each sequence element is
((data,otherdata), (dur,otherdurs))
'''
# temporarily modifies members, like self.stateseq and maybe self.data
assert self.stateseq[t] == SAMPLING
orig_stateseq = self.stateseq.copy()
# temporarily set stateseq to hypothetical stateseq
# so that we can get the indicator sequence
# TODO if i write the special stateseq class, this will need fixing
self.stateseq[t] = k
wholegroup, pieces = self._local_slices(self.stateseq,t)
self.stateseq[t] = SAMPLING
# build local group of statistics
localgroup = []
self.stateseq[wholegroup] = SAMPLING
for piece, val in pieces:
# get all the other data
otherdata, otherdurs = self.model._data_withlabel(val), self.model._durs_withlabel(val)
# add a piece to our localgroup
localgroup.append(((self.data[piece],otherdata),(piece.stop-piece.start,otherdurs)))
# remove the used piece from the exclusion
self.stateseq[piece] = orig_stateseq[piece]
# restore original views
self.stateseq = orig_stateseq
# return
return localgroup
@classmethod
def _local_slices(cls,stateseq,t):
'''
returns slices: wholegroup, (piece1, ...)
'''
A,B = fill(stateseq,t-1), fill(stateseq,t+1)
if A == B:
return A, ((A,stateseq[A.start]),)
elif A.start <= t < A.stop or B.start <= t < B.stop:
return slice(A.start,B.stop), [(x,stateseq[x.start]) for x in (A,B) if x.stop - x.start > 0]
else:
It = slice(t,t+1)
return slice(A.start,B.stop), [(x,stateseq[x.start]) for x in (A,It,B) if x.stop - x.start > 0]
#######################
# Utility Functions #
#######################
def fill(seq,t):
if t < 0:
return slice(0,0)
elif t > seq.shape[0]-1:
return slice(seq.shape[0],seq.shape[0])
else:
endindices, = np.where(np.diff(seq) != 0) # internal end indices (not incl -1 and T-1)
startindices = np.concatenate(((0,),endindices+1,(seq.shape[0],))) # incl 0 and T
idx = np.where(startindices <= t)[0][-1]
return slice(startindices[idx],startindices[idx+1])
def canonize(seq):
seq = seq.copy()
canondict = collections.defaultdict(itertools.count().next)
for idx,s in enumerate(seq):
seq[idx] = canondict[s]
reversedict = {}
for k,v in canondict.iteritems():
reversedict[v] = k
return seq, canondict, reversedict
class dummytrans(object):
def __init__(self,A):
self.A = A
def resample(self,*args,**kwargs):
pass
| [((1433, 1462), 'numpy.random.randint', 'np.random.randint', (['ABIGNUMBER'], {}), '(ABIGNUMBER)\n', (1450, 1462), True, 'import numpy as np\n'), ((1974, 1992), 'pyhsmm.util.general.rle', 'rle', (['self.stateseq'], {}), '(self.stateseq)\n', (1977, 1992), True, 'from pyhsmm.util.general import rle as rle\n'), ((2234, 2269), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['X', 'Y', 'C'], {'vmin': '(0)', 'vmax': '(1)'}), '(X, Y, C, vmin=0, vmax=1)\n', (2244, 2269), True, 'from matplotlib import pyplot as plt\n'), ((2274, 2290), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (2282, 2290), True, 'from matplotlib import pyplot as plt\n'), ((2339, 2353), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2349, 2353), True, 'from matplotlib import pyplot as plt\n'), ((3345, 3370), 'numpy.zeros', 'np.zeros', (['T'], {'dtype': 'np.int'}), '(T, dtype=np.int)\n', (3353, 3370), True, 'import numpy as np\n'), ((5141, 5170), 'numpy.random.permutation', 'np.random.permutation', (['self.T'], {}), '(self.T)\n', (5162, 5170), True, 'import numpy as np\n'), ((7739, 7770), 'numpy.sum', 'np.sum', (['(self.stateseq[:-1] == k)'], {}), '(self.stateseq[:-1] == k)\n', (7745, 7770), True, 'import numpy as np\n'), ((8069, 8099), 'numpy.sum', 'np.sum', (['(self.stateseq[1:] == k)'], {}), '(self.stateseq[1:] == k)\n', (8075, 8099), True, 'import numpy as np\n'), ((9818, 9830), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9826, 9830), True, 'import numpy as np\n'), ((12333, 12351), 'pyhsmm.util.general.rle', 'rle', (['self.stateseq'], {}), '(self.stateseq)\n', (12336, 12351), True, 'from pyhsmm.util.general import rle as rle\n'), ((12367, 12399), 'numpy.sum', 'np.sum', (['(stateseq_norep[:-1] == k)'], {}), '(stateseq_norep[:-1] == k)\n', (12373, 12399), True, 'import numpy as np\n'), ((12656, 12674), 'pyhsmm.util.general.rle', 'rle', (['self.stateseq'], {}), '(self.stateseq)\n', (12659, 12674), True, 'from pyhsmm.util.general import rle as rle\n'), ((12690, 12721), 'numpy.sum', 'np.sum', (['(stateseq_norep[1:] == k)'], {}), '(stateseq_norep[1:] == k)\n', (12696, 12721), True, 'import numpy as np\n'), ((1516, 1545), 'numpy.random.randint', 'np.random.randint', (['ABIGNUMBER'], {}), '(ABIGNUMBER)\n', (1533, 1545), True, 'import numpy as np\n'), ((2113, 2173), 'numpy.array', 'np.array', (['[[colors_dict[state] for state in stateseq_norep]]'], {}), '([[colors_dict[state] for state in stateseq_norep]])\n', (2121, 2173), True, 'import numpy as np\n'), ((4849, 4872), 'pyhsmm.util.stats.sample_discrete', 'sample_discrete', (['scores'], {}), '(scores)\n', (4864, 4872), False, 'from pyhsmm.util.stats import sample_discrete, sample_discrete_from_log, combinedata\n'), ((5482, 5514), 'pyhsmm.util.stats.sample_discrete_from_log', 'sample_discrete_from_log', (['scores'], {}), '(scores)\n', (5506, 5514), False, 'from pyhsmm.util.stats import sample_discrete, sample_discrete_from_log, combinedata\n'), ((7472, 7504), 'numpy.log', 'np.log', (['betavec[stateseq[t + 1]]'], {}), '(betavec[stateseq[t + 1]])\n', (7478, 7504), True, 'import numpy as np\n'), ((7681, 7714), 'numpy.sum', 'np.sum', (['(self.stateseq == SAMPLING)'], {}), '(self.stateseq == SAMPLING)\n', (7687, 7714), True, 'import numpy as np\n'), ((8011, 8044), 'numpy.sum', 'np.sum', (['(self.stateseq == SAMPLING)'], {}), '(self.stateseq == SAMPLING)\n', (8017, 8044), True, 'import numpy as np\n'), ((8480, 8514), 'numpy.where', 'np.where', (['(self.stateseq[:-1] == k1)'], {}), '(self.stateseq[:-1] == k1)\n', (8488, 8514), True, 'import numpy as np\n'), ((8548, 8593), 'numpy.sum', 'np.sum', (['(self.stateseq[from_indices + 1] == k2)'], {}), '(self.stateseq[from_indices + 1] == k2)\n', (8554, 8593), True, 'import numpy as np\n'), ((10254, 10288), 'numpy.ones', 'np.ones', (['firststate_dur'], {'dtype': 'int'}), '(firststate_dur, dtype=int)\n', (10261, 10288), True, 'import numpy as np\n'), ((10988, 11011), 'pyhsmm.util.stats.sample_discrete', 'sample_discrete', (['scores'], {}), '(scores)\n', (11003, 11011), False, 'from pyhsmm.util.stats import sample_discrete, sample_discrete_from_log, combinedata\n'), ((11751, 11769), 'pyhsmm.util.general.rle', 'rle', (['self.stateseq'], {}), '(self.stateseq)\n', (11754, 11769), True, 'from pyhsmm.util.general import rle as rle\n'), ((12084, 12102), 'pyhsmm.util.general.rle', 'rle', (['self.stateseq'], {}), '(self.stateseq)\n', (12087, 12102), True, 'from pyhsmm.util.general import rle as rle\n'), ((12131, 12166), 'numpy.where', 'np.where', (['(stateseq_norep[:-1] == k1)'], {}), '(stateseq_norep[:-1] == k1)\n', (12139, 12166), True, 'import numpy as np\n'), ((12200, 12246), 'numpy.sum', 'np.sum', (['(stateseq_norep[from_indices + 1] == k2)'], {}), '(stateseq_norep[from_indices + 1] == k2)\n', (12206, 12246), True, 'import numpy as np\n'), ((13097, 13130), 'numpy.random.permutation', 'np.random.permutation', (['(self.T - 1)'], {}), '(self.T - 1)\n', (13118, 13130), True, 'import numpy as np\n'), ((13438, 13470), 'pyhsmm.util.stats.sample_discrete_from_log', 'sample_discrete_from_log', (['scores'], {}), '(scores)\n', (13462, 13470), False, 'from pyhsmm.util.stats import sample_discrete, sample_discrete_from_log, combinedata\n'), ((14992, 15005), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (14998, 15005), True, 'import numpy as np\n'), ((15200, 15229), 'numpy.log', 'np.log', (['beta[stateseq[t + 1]]'], {}), '(beta[stateseq[t + 1]])\n', (15206, 15229), True, 'import numpy as np\n'), ((15309, 15355), 'numpy.random.beta', 'np.random.beta', (['(1)', 'self.beta.gamma_0'], {'size': '(200)'}), '(1, self.beta.gamma_0, size=200)\n', (15323, 15355), True, 'import numpy as np\n'), ((15512, 15539), 'numpy.log', 'np.log', (['self.beta.remaining'], {}), '(self.beta.remaining)\n', (15518, 15539), True, 'import numpy as np\n'), ((18010, 18065), 'numpy.concatenate', 'np.concatenate', (['((0,), endindices + 1, (seq.shape[0],))'], {}), '(((0,), endindices + 1, (seq.shape[0],)))\n', (18024, 18065), True, 'import numpy as np\n'), ((18267, 18284), 'itertools.count', 'itertools.count', ([], {}), '()\n', (18282, 18284), False, 'import collections, itertools\n'), ((9365, 9406), 'numpy.random.randint', 'np.random.randint', (['(25)'], {'size': 'data.shape[0]'}), '(25, size=data.shape[0])\n', (9382, 9406), True, 'import numpy as np\n'), ((9596, 9609), 'pyhsmm.util.general.rle', 'rle', (['stateseq'], {}), '(stateseq)\n', (9599, 9609), True, 'from pyhsmm.util.general import rle as rle\n'), ((17923, 17935), 'numpy.diff', 'np.diff', (['seq'], {}), '(seq)\n', (17930, 17935), True, 'import numpy as np\n'), ((18091, 18118), 'numpy.where', 'np.where', (['(startindices <= t)'], {}), '(startindices <= t)\n', (18099, 18118), True, 'import numpy as np\n'), ((11436, 11469), 'numpy.ones', 'np.ones', (['nextstate_dur'], {'dtype': 'int'}), '(nextstate_dur, dtype=int)\n', (11443, 11469), True, 'import numpy as np\n'), ((7848, 7883), 'numpy.where', 'np.where', (['(self.stateseq == SAMPLING)'], {}), '(self.stateseq == SAMPLING)\n', (7856, 7883), True, 'import numpy as np\n'), ((8178, 8213), 'numpy.where', 'np.where', (['(self.stateseq == SAMPLING)'], {}), '(self.stateseq == SAMPLING)\n', (8186, 8213), True, 'import numpy as np\n'), ((12479, 12515), 'numpy.where', 'np.where', (['(stateseq_norep == SAMPLING)'], {}), '(stateseq_norep == SAMPLING)\n', (12487, 12515), True, 'import numpy as np\n'), ((12802, 12838), 'numpy.where', 'np.where', (['(stateseq_norep == SAMPLING)'], {}), '(stateseq_norep == SAMPLING)\n', (12810, 12838), True, 'import numpy as np\n')] |
scrapinghub/exporters | exporters/contrib/writers/odo_writer.py | b14f70530826bbbd6163d9e56e74345e762a9189 | import six
import json
import gzip
from exporters.default_retries import retry_long
from exporters.writers.base_writer import BaseWriter
class ODOWriter(BaseWriter):
"""
Writes items to a odo destination. https://odo.readthedocs.org/en/latest/
Needed parameters:
- schema (object)
schema object.
- odo_uri (str)
ODO valid destination uri.
"""
requirements = {
'schema': {'type': object, 'required': True},
'odo_uri': {'type': six.string_types, 'required': True}
}
def __init__(self, options):
super(ODOWriter, self).__init__(options)
from flatson import Flatson
schema = self.read_option('schema', None)
self.odo_uri = self.read_option('odo_uri', None)
self.flatson = Flatson(schema)
self.logger.info('ODOWriter has been initiated. Writing to: {}'.format(self.odo_uri))
@retry_long
def write(self, dump_path, group_key=''):
from odo import odo, resource, discover
import pandas as pd
with gzip.open(dump_path) as f:
lines = [json.loads(line.replace('\n', '')) for line in f.readlines()]
flattened_lines = (self.flatson.flatten(line) for line in lines)
pf = pd.DataFrame(flattened_lines, columns=self.flatson.fieldnames)
dshape = discover(pf)
odo(pf, resource(self.odo_uri), dshape=dshape)
| [((800, 815), 'flatson.Flatson', 'Flatson', (['schema'], {}), '(schema)\n', (807, 815), False, 'from flatson import Flatson\n'), ((1258, 1320), 'pandas.DataFrame', 'pd.DataFrame', (['flattened_lines'], {'columns': 'self.flatson.fieldnames'}), '(flattened_lines, columns=self.flatson.fieldnames)\n', (1270, 1320), True, 'import pandas as pd\n'), ((1338, 1350), 'odo.discover', 'discover', (['pf'], {}), '(pf)\n', (1346, 1350), False, 'from odo import odo, resource, discover\n'), ((1062, 1082), 'gzip.open', 'gzip.open', (['dump_path'], {}), '(dump_path)\n', (1071, 1082), False, 'import gzip\n'), ((1367, 1389), 'odo.resource', 'resource', (['self.odo_uri'], {}), '(self.odo_uri)\n', (1375, 1389), False, 'from odo import odo, resource, discover\n')] |
gribbg/x7-geom | x7/geom/needs_test.py | a01ef29dc47f1587e3390b552decf92db0bbaa20 | """
Simple file to validate that maketests is working. Call maketests via:
>>> from x7.shell import *; maketests('x7.sample.needs_tests')
"""
def needs_a_test(a, b):
return a+b
| [] |
feketebv/SCA_proof_SHA3-512 | python/SHA3_hashlib_based_concept.py | 5a7689ea307463d5b797e49142c349b02cdcda03 | '''
Written by: Balazs Valer Fekete [email protected] [email protected]
Last updated: 29.01.2021
'''
# the concept is to generate a side channel resistant initialisation of the hashing function based on
# one secret key and several openly known initialisation vectors (IV) in a manner that the same input
# is not hashed too more than two times, which is hopefully not sufficient for side channel
# measurements based computations: the number of consecutive measurements for a successful attack on
# the CHI function in a practically noiseless computer simulation (see "chi_cpa.py") takes around a
# 100 measurements
# this concept is achieved by taking a counter of a certain bitlength, and twice as many IVs as bits in
# the counter: "IV0s" and "IV1s" and compute a series of hashes starting with the secret key then with a
# correspong IV of the sets 0 and 1 based on whether the counter's corresponding bit - starting at MSB -
# is 0 or 1; this way every hash output is exactly used 2 times if the intermediate values are STORTED
# and the entire series of initial hashes are NOT fully recomputed only such whose corresponding
# counter bits has changed and all the next levels too down to the LSB of the counter
# the working solution is going to based on the algorithms presented here, although
# in this file the algorithm here does the full padding so the results won't equal to
# a scheme where the rate is fully filled with IVs and the data comes only afterwards...
import hashlib
# KEY DATA STRUCTURES' INTERPRETATION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
IV0s = [658678, 6785697, 254376, 67856, 1432543, 786, 124345, 5443654]
IV1s = [2565, 256658, 985, 218996, 255, 685652, 28552, 3256565]
# LSB ... MSB
hash_copies = [None for i in range(len(IV0s))]
# LSB ... MSB
# counter
# MSB ... LSB
# COMPUTING HASHES FOR EVERY COUNTER VALUE INDIVIDUALLY
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for counter in range(11):
hash = hashlib.sha3_512()
# looping from MSB to LSB in counter too
for i in range(len(IV0s)-1, -1, -1):
if (counter>>i) & 1 == 1:
IV = bytes(IV1s[i])
else:
IV = bytes(IV0s[i])
hash.update(IV)
print(hash.hexdigest())
print()
# COMPUTING HASHES BASED ON THE NATURE OF BINARY INCREMENTATION:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# only fewer values need to be recomputed, those whose corresponding
# bits have changed, down until LSB
# initialize
hash = hashlib.sha3_512()
# looping from MSB to LSB
for i in range(len(IV0s)-1, -1, -1):
# addressing "MSB" of IVs at first, "LSB" at last!
IV = bytes(IV0s[i])
hash.update(IV)
# index 0 of hash_copies changes the most frequently ie. according to counter's LSB
hash_copies[i] = hash.copy()
# compute
last_counter = 0
for counter in range(11):
IV_mask = last_counter ^ counter
last_counter = counter
# determine the highest non-zero bit of IV_mask, LSB is 1, 0 means there was no change
nz = 0
while IV_mask > 0:
IV_mask >>= 1
nz += 1
# initialize hash to the last value whose corresponding counter bit didn't switch
# have to copy object otherwise the originally pointed version gets updated!
hash = hash_copies[nz].copy() # LSB is index 0
# compute only the remaining hashes
while nz != 0: # nz=0 is the initial condition, nothing needs to be done
nz -= 1
if (counter>>nz) & 1 == 1:
IV = bytes(IV1s[nz])
else:
IV = bytes(IV0s[nz])
hash.update(IV)
# needs to be copied again because of object orientation
hash_copies[nz] = hash.copy()
# showing the hash copies' entire table after each computation
#for hashes in hash_copies:
# print(hashes.hexdigest())
print(hash_copies[0].hexdigest())
| [((2560, 2578), 'hashlib.sha3_512', 'hashlib.sha3_512', ([], {}), '()\n', (2576, 2578), False, 'import hashlib\n'), ((2007, 2025), 'hashlib.sha3_512', 'hashlib.sha3_512', ([], {}), '()\n', (2023, 2025), False, 'import hashlib\n')] |
pengboomouch/graphstar | graphstar/utils.py | f7f3537aa92118765b358dd3a47b4fa5cea8587c | """
graphstar.utils
~~~~~~~~~~~~~~~
Cristian Cornea
A simple bedirectional graph with A* and breadth-first pathfinding.
Utils are either used by the search algorithm, or when needed :)
Pretty self explainatory (I hope)
For more information see the examples and tests folder
"""
def smooth_path(p):
# If the path is only two nodes long, then
# we can’t smooth it, so return
if len(p) == 2:
return p
# Compile an output path
output = [p[0]]
# Keep track of where we are in the input path
# We start at 2, because we assume two adjacent
# nodes will pass the ray cast
i = 2
# Loop until we find the last item in the input
while i < len(p)-1:
# Do the ray cast
if not ray_clear(output[len(output)-1], p[i]):
# The ray text failed, add the last node that
# passed to the output list
output += p[i-1]
# Consider the next node
i += 1
# We’ve reached the end of the input path, add the
# end node to the output and return it
output += p[len(p)-1]
return output
def clean_route_list(route_stack: list, goal_node_id: int):
"""
Creates an ordered route list from start to finish
with all node ids needed to traverse to the goal.
:param route_stack: All routes found until goal
:param goal_node: int ID of the goal node
:return: list A ordered list from start to goal
"""
r = []
next_node = goal_node_id
reversed_stack = reversed(route_stack)
for c in reversed_stack:
if c.to_node.id == next_node:
r.append(c.to_node.id)
r.append(c.from_node.id)
next_node = c.from_node.id
return list(set(r))
| [] |
JASTYN/pythonmaster | design_patterns/pubsub/simple_events/__init__.py | 46638ab09d28b65ce5431cd0759fe6df272fb85d | class Event:
def __init__(self):
self.handlers = set()
def subscribe(self, func):
self.handlers.add(func)
def unsubscribe(self, func):
self.handlers.remove(func)
def emit(self, *args):
for func in self.handlers:
func(*args)
| [] |
jiz148/py-test | jinchi/demo/foobar.py | d976265d065c760f2e8b55302dedbfebd01bec28 | import os
def check_env(env_var_name):
"""
Check and return the type of an environment variable.
supported types:
None
Integer
String
@param env_var_name: environment variable name
@return: string of the type name.
"""
try:
val = os.getenv(env_var_name)
if val is None:
return 'None'
except Exception as ex:
return "None"
try:
int_val = int(val)
return 'Integer'
except ValueError:
return 'String'
| [((294, 317), 'os.getenv', 'os.getenv', (['env_var_name'], {}), '(env_var_name)\n', (303, 317), False, 'import os\n')] |
Anirudhchoudhary/ApnaGanna__backend | sound/serializers.py | 52e6c3100fdb289e8bf64a1a4007eeb2eb66a022 | from .models import Sound , Album
from rest_framework import serializers
class SoundSerializer(serializers.ModelSerializer):
class Meta:
model = Sound
fields = ["name" , "song_image" , "pk" , "like" , "played" , "tag" , "singer" , "upload_date"]
class SoundDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Sound
fields = "__all__"
class AlbumSerializer(serializers.ModelSerializer):
sound = serializers.SerializerMethodField()
class Meta:
model = Album
fields = ["name" , "datepublish" , "category" , "sound"]
depth = 1
def get_sound(self , obj):
print("WORKING")
return SoundSerializer(instance=obj.sound , many=True).data
| [((474, 509), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (507, 509), False, 'from rest_framework import serializers\n')] |
WGBH/django-tracking | tracking/utils.py | 80e8bc44521820eab956d2264d6df0b6987429e0 | from datetime import datetime
from django.conf import settings
import pytz
def check_tracker(obj, simple=True):
if simple:
if obj.status > 0:
return True
return False
# we have a gatekeeper
now = datetime.now(pytz.utc)
if obj.tracker_publish_status < 0:
return False
if obj.tracker_publish_status > 0:
return True
# Checking live_as_of ...
# is live_as_of set?
if not obj.tracker_live_as_of: # No live_as_of --- bail
return False
# has it happened yet?
if now < obj.tracker_live_as_of: # live_as_of --- not yet!
return False
# is there an expiration date?
if obj.tracker_expires and now > obj.tracker_expires: # EXPIRED!
return False
# it's OK then
return True
DEFAULT_TRACKER_POSITIONS = [
('tracker-head-top', 'Head - near top'),
('tracker-head-bottom', 'Head - near bottom'),
('tracker-body-top', 'Body - near top'),
('tracker-body-bottom', 'Body - near bottom')
]
def get_tracker_position_options():
"""
This creates the dropdown in the Admin for where to put each tracker.
It defaults to the obvious 4 location (top/bottom of the head/body);
however the user can create more by adding a list of 3-ples in the settings
file under ADDITIONAL_TRACKER_POSITIONS.
(2-letter-code, description, block name), e.g.
('HN', 'Header Navigation', 'header-navigation-trackers')
would allow for the user to have tracking code in a navbar (no, I don't know
why they'd want this) if they put
{% block header-navigation-trackers %}{% generate_trackers 'HN' %}{% endblock %}
in their template.
"""
tracker_position_list = DEFAULT_TRACKER_POSITIONS
additional_tracker_positions = getattr(settings, "ADDITIONAL_TRACKER_POSITIONS", [])
full_list = list()
for x in (tracker_position_list + additional_tracker_positions):
full_list.append((x[0], x[1]))
return full_list | [((246, 268), 'datetime.datetime.now', 'datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (258, 268), False, 'from datetime import datetime\n')] |
ankeshkhemani/devtools | devtools/api/health.py | beb9a46c27b6b4c02a2e8729af0c971cc175f134 | import datetime
from fastapi import APIRouter
router = APIRouter()
@router.get("", tags=["health"])
async def get_health():
return {
"results": [],
"status": "success",
"timestamp": datetime.datetime.now().timestamp()
}
| [((57, 68), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (66, 68), False, 'from fastapi import APIRouter\n'), ((214, 237), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (235, 237), False, 'import datetime\n')] |
y-x-c/Heliot | computation/Tests/Jetson/TF_model.py | b98646966fd1d437e308abeed59668df640932de | import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import json
import time
import cv2
PATH_TO_FROZEN_GRAPH = '../data/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224_frozen.pb'
info='Time taken to load Model into memory:'
start_time=time.time()
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
end_time=time.time()
time_taken=end_time-start_time
print(info,time_taken)
# Load the labels
#Load categories
categories = []
with open('../data/' + 'categories.txt', 'r') as f:
for line in f:
cat = line.split('\n')[0]
if cat != 'classes':
categories.append(cat)
f.close()
print('Number of categories:', len(categories))
# Load image size
with open('../data/' + 'inputsize.txt', 'r') as f:
reqsize = int(f.readline().split('\n')[0])
#print(reqsize)
#image_filename = '../data/' + 'image1.jpg'
def Load_and_process_img(image_filename):
img = cv2.imread(image_filename)#.astype(numpy.float32)
img = cv2.resize(img, (reqsize, reqsize))
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = img.astype(float)
#img values are scaled from -1 to 1
img /= 255.0
img -= 0.5
img *= 2.0
return img
sess=tf.Session(graph=detection_graph)
def run_inference_b1(key_name,image, graph,no_of_run):
#model output layer name
ops = graph.get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
#print(all_tensor_names)
tensor_dict = {}
for key in [key_name]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = graph.get_tensor_by_name(tensor_name)
image=image.reshape(1,image.shape[0],image.shape[1],image.shape[2])
image_tensor = graph.get_tensor_by_name('input:0')
#Demo run, so that graph is loaded into TF memory
sess.run(tensor_dict,feed_dict={image_tensor: image})
# Run inference
info='Time taken to run inference: run_inference_b1:'+str(no_of_run)+' Times: '
start_time=time.time()
for i in range(no_of_run):
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image})
end_time=time.time()
time_taken=end_time-start_time
print(info,time_taken)
#print(output_dict)
top_inds = output_dict[key_name][0].argsort()[::-1][:5]
result=[]
for i in range(5):
result.append([top_inds[i], categories[top_inds[i]], output_dict[key_name][0][top_inds[i]]])
return result, time_taken
image_filename = '../data/' + 'Tiger.jpg'
img = Load_and_process_img(image_filename)
key_name='MobilenetV2/Predictions/Reshape_1'
result,time_taken=run_inference_b1(key_name,img,detection_graph,1000)
print('Time Taken to run Inference is:',time_taken)
print(result)
| [((471, 482), 'time.time', 'time.time', ([], {}), '()\n', (480, 482), False, 'import time\n'), ((502, 512), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (510, 512), True, 'import tensorflow as tf\n'), ((779, 790), 'time.time', 'time.time', ([], {}), '()\n', (788, 790), False, 'import time\n'), ((1643, 1676), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'detection_graph'}), '(graph=detection_graph)\n', (1653, 1676), True, 'import tensorflow as tf\n'), ((565, 578), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (576, 578), True, 'import tensorflow as tf\n'), ((1366, 1392), 'cv2.imread', 'cv2.imread', (['image_filename'], {}), '(image_filename)\n', (1376, 1392), False, 'import cv2\n'), ((1426, 1461), 'cv2.resize', 'cv2.resize', (['img', '(reqsize, reqsize)'], {}), '(img, (reqsize, reqsize))\n', (1436, 1461), False, 'import cv2\n'), ((1470, 1506), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1482, 1506), False, 'import cv2\n'), ((2459, 2470), 'time.time', 'time.time', ([], {}), '()\n', (2468, 2470), False, 'import time\n'), ((2620, 2631), 'time.time', 'time.time', ([], {}), '()\n', (2629, 2631), False, 'import time\n'), ((586, 628), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_FROZEN_GRAPH', '"""rb"""'], {}), "(PATH_TO_FROZEN_GRAPH, 'rb')\n", (600, 628), True, 'import tensorflow as tf\n'), ((726, 768), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (745, 768), True, 'import tensorflow as tf\n')] |
chentaoz/frappe | frappe/patches/v13_0/update_date_filters_in_user_settings.py | ee3c4943bf6177ad3b410cdb0d802af486751a65 | from __future__ import unicode_literals
import frappe, json
from frappe.model.utils.user_settings import update_user_settings, sync_user_settings
def execute():
users = frappe.db.sql("select distinct(user) from `__UserSettings`", as_dict=True)
for user in users:
user_settings = frappe.db.sql('''
select
* from `__UserSettings`
where
user="{user}"
'''.format(user = user.user), as_dict=True)
for setting in user_settings:
data = frappe.parse_json(setting.get('data'))
if data:
for key in data:
update_user_setting_filters(data, key, setting)
sync_user_settings()
def update_user_setting_filters(data, key, user_setting):
timespan_map = {
'1 week': 'week',
'1 month': 'month',
'3 months': 'quarter',
'6 months': '6 months',
'1 year': 'year',
}
period_map = {
'Previous': 'last',
'Next': 'next'
}
if data.get(key):
update = False
if isinstance(data.get(key), dict):
filters = data.get(key).get('filters')
if filters and isinstance(filters, list):
for f in filters:
if f[2] == 'Next' or f[2] == 'Previous':
update = True
f[3] = period_map[f[2]] + ' ' + timespan_map[f[3]]
f[2] = 'Timespan'
if update:
data[key]['filters'] = filters
update_user_settings(user_setting['doctype'], json.dumps(data), for_update=True)
| [((171, 245), 'frappe.db.sql', 'frappe.db.sql', (['"""select distinct(user) from `__UserSettings`"""'], {'as_dict': '(True)'}), "('select distinct(user) from `__UserSettings`', as_dict=True)\n", (184, 245), False, 'import frappe, json\n'), ((584, 604), 'frappe.model.utils.user_settings.sync_user_settings', 'sync_user_settings', ([], {}), '()\n', (602, 604), False, 'from frappe.model.utils.user_settings import update_user_settings, sync_user_settings\n'), ((1289, 1305), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1299, 1305), False, 'import frappe, json\n')] |
peguerosdc/ml4phy-quantum-oscillators | miniproject/train.py | 5ce2cc8ea9ad00e23dab45d898e51f484fca5934 | import BoltzmannMachine as bm
import QHO as qho
import numpy as np
import datetime
# Visualization imports
from IPython.display import clear_output
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['figure.dpi']=300
def sigmoid(x):
return .5 * (1 + np.tanh(x / 2.))
# Set the quantum gas with N particles, a limit of 10 for the
# quantum numbers and default temperature and frequency
N = 10*10
gas = qho.QHOGas(N=N)
n_max = 10
training_size = 100000
# the amount of hidden units was set by trial and error
hidden_units = 70
# the recipe suggests to set the batchsize to 10, though it can range
# from 10 to 100
batchsize = 10
# the recipe suggests a learning rate that makes the weight updates about
# 1e-3 times the weights (to within an order of magnitude)
eta = 0.005
# the amount of steps was set by trial and error
nsteps = 300000
# define the validation set to be used in training_visualization
validation_set = gas.generate(amount=20)
def training_visualization(machine, current_step, total_steps, eta, a, b, w, da, db, dw):
# Every now and then (every 50k steps), let us know that the training
# is still running
if current_step%50000 == 0:
print("{:08d} / {:08d}".format(current_step, total_steps), end=" \r")
# After 'checkpoint_steps', show the suggested plots
checkpoint_steps = 10000
if current_step%checkpoint_steps == 0 or current_step == total_steps-1:
print(f"Showing at step {current_step}.")
# Produce a sample starting from the validation set after 100 steps
v_prime = machine.generate(validation_set, 100, a=a, b=b, w=w)
# print useful plots for training
plot_training(validation_set, v_prime, eta, a, b, w, da, db, dw)
def plot_training(v, v_prime, eta, a, b, w, da, db, dw):
clear_output(wait=True)
# Show how the weights light up for the state v
hMean = sigmoid(np.dot(v, w) + b)
image = Image.fromarray(hMean * 256).show()
# Create the grid for all the other plots we want
plt.rcParams.update({'font.size': 2})
# plot histogram of initial vs generated
n = np.arange(0,10)
generated_quantum_numbers = np.rint(v_prime*10)
plt.hist( generated_quantum_numbers.flatten(), bins=np.arange(0,10), density=True, label="Sampled" )
plt.plot( n, gas.p_n(n), label="Theor." )
plt.xlabel('n')
plt.ylabel('P(n)')
plt.legend()
# plot histogram of visible, hidden, weights
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(ncols=3, nrows=2)
def plotit(axis, values, title):
axis.hist(values)
axis.set_title(f"{title}: mm = {np.mean(np.fabs(values))}")
plotit(fig.add_subplot(gs[0,0]), a, 'a')
plotit(fig.add_subplot(gs[0,1]), w.flatten(), 'w')
plotit(fig.add_subplot(gs[0,2]), b, 'b')
# plot histogram of d_visible, d_hidden, d_weights
plotit(fig.add_subplot(gs[1,0]), eta*da, 'da')
plotit(fig.add_subplot(gs[1,1]), eta*dw.flatten(), 'dw')
plotit(fig.add_subplot(gs[1,2]), eta*db, 'db')
# show free energies of the average of samples
x = lambda vv : b + np.dot(vv, w)
free_training = -np.dot(v, a) - np.sum( np.log(1 + np.exp(x(v))), axis=1)
free_valdation = -np.dot(v_prime, a) - np.sum( np.log(1 + np.exp(x(v_prime))), axis=1)
print(f"\nF_training={np.average(free_training)} vs F_validation={np.average(free_valdation)}\n")
# Show.
# CAUTION! This will freeze the execution
plt.show()
# Init the boltzmann machine and train it while visualizing the suggested plots
training_set = gas.generate(amount=training_size, n_max=n_max)
m = bm.BoltzmannMachine(num_hidden=hidden_units)
a,b,w = m.train(training_set, batchsize=batchsize, eta=eta, nsteps=nsteps, do_while_training=None)
# Store in a file
run_id = int(datetime.datetime.now().timestamp())
np.savetxt(f"a_{run_id}.csv", a, delimiter=',')
np.savetxt(f"b_{run_id}.csv", b, delimiter=',')
np.savetxt(f"w_{run_id}.csv", w, delimiter=',')
| [((447, 462), 'QHO.QHOGas', 'qho.QHOGas', ([], {'N': 'N'}), '(N=N)\n', (457, 462), True, 'import QHO as qho\n'), ((3637, 3681), 'BoltzmannMachine.BoltzmannMachine', 'bm.BoltzmannMachine', ([], {'num_hidden': 'hidden_units'}), '(num_hidden=hidden_units)\n', (3656, 3681), True, 'import BoltzmannMachine as bm\n'), ((3849, 3896), 'numpy.savetxt', 'np.savetxt', (['f"""a_{run_id}.csv"""', 'a'], {'delimiter': '""","""'}), "(f'a_{run_id}.csv', a, delimiter=',')\n", (3859, 3896), True, 'import numpy as np\n'), ((3897, 3944), 'numpy.savetxt', 'np.savetxt', (['f"""b_{run_id}.csv"""', 'b'], {'delimiter': '""","""'}), "(f'b_{run_id}.csv', b, delimiter=',')\n", (3907, 3944), True, 'import numpy as np\n'), ((3945, 3992), 'numpy.savetxt', 'np.savetxt', (['f"""w_{run_id}.csv"""', 'w'], {'delimiter': '""","""'}), "(f'w_{run_id}.csv', w, delimiter=',')\n", (3955, 3992), True, 'import numpy as np\n'), ((1829, 1852), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (1841, 1852), False, 'from IPython.display import clear_output\n'), ((2050, 2087), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 2}"], {}), "({'font.size': 2})\n", (2069, 2087), True, 'import matplotlib.pyplot as plt\n'), ((2142, 2158), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (2151, 2158), True, 'import numpy as np\n'), ((2190, 2211), 'numpy.rint', 'np.rint', (['(v_prime * 10)'], {}), '(v_prime * 10)\n', (2197, 2211), True, 'import numpy as np\n'), ((2365, 2380), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""n"""'], {}), "('n')\n", (2375, 2380), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2403), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P(n)"""'], {}), "('P(n)')\n", (2395, 2403), True, 'import matplotlib.pyplot as plt\n'), ((2408, 2420), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2418, 2420), True, 'import matplotlib.pyplot as plt\n'), ((2481, 2516), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (2491, 2516), True, 'import matplotlib.pyplot as plt\n'), ((3478, 3488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3486, 3488), True, 'import matplotlib.pyplot as plt\n'), ((295, 311), 'numpy.tanh', 'np.tanh', (['(x / 2.0)'], {}), '(x / 2.0)\n', (302, 311), True, 'import numpy as np\n'), ((1925, 1937), 'numpy.dot', 'np.dot', (['v', 'w'], {}), '(v, w)\n', (1931, 1937), True, 'import numpy as np\n'), ((1955, 1983), 'PIL.Image.fromarray', 'Image.fromarray', (['(hMean * 256)'], {}), '(hMean * 256)\n', (1970, 1983), False, 'from PIL import Image\n'), ((2266, 2282), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (2275, 2282), True, 'import numpy as np\n'), ((3130, 3143), 'numpy.dot', 'np.dot', (['vv', 'w'], {}), '(vv, w)\n', (3136, 3143), True, 'import numpy as np\n'), ((3166, 3178), 'numpy.dot', 'np.dot', (['v', 'a'], {}), '(v, a)\n', (3172, 3178), True, 'import numpy as np\n'), ((3245, 3263), 'numpy.dot', 'np.dot', (['v_prime', 'a'], {}), '(v_prime, a)\n', (3251, 3263), True, 'import numpy as np\n'), ((3812, 3835), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3833, 3835), False, 'import datetime\n'), ((3340, 3365), 'numpy.average', 'np.average', (['free_training'], {}), '(free_training)\n', (3350, 3365), True, 'import numpy as np\n'), ((3384, 3410), 'numpy.average', 'np.average', (['free_valdation'], {}), '(free_valdation)\n', (3394, 3410), True, 'import numpy as np\n'), ((2672, 2687), 'numpy.fabs', 'np.fabs', (['values'], {}), '(values)\n', (2679, 2687), True, 'import numpy as np\n')] |
diCagri/content | Tests/Marketplace/prepare_public_index_for_private_testing.py | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | import time
import os
import sys
import shutil
import json
import argparse
from zipfile import ZipFile
from contextlib import contextmanager
from datetime import datetime
from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, \
extract_packs_artifacts
from Tests.Marketplace.marketplace_services import init_storage_client
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
MAX_SECONDS_TO_WAIT_FOR_LOCK = 600
LOCK_FILE_PATH = 'lock.txt'
@contextmanager
def lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
try:
acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
yield
except Exception:
logging.exception("Error in dummy index lock context manager.")
finally:
release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
def change_pack_price_to_zero(path_to_pack_metadata):
with open(path_to_pack_metadata, 'r') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
pack_metadata['price'] = 0
with open(path_to_pack_metadata, 'w') as pack_metadata_file:
json.dump(pack_metadata, pack_metadata_file, indent=4)
def change_packs_price_to_zero(public_index_folder_path):
paths_to_packs_in_merged_index = [pack_dir.path for pack_dir in os.scandir(public_index_folder_path) if
pack_dir.is_dir()]
for path_to_pack in paths_to_packs_in_merged_index:
path_to_pack_metadata = os.path.join(path_to_pack, 'metadata.json')
change_pack_price_to_zero(path_to_pack_metadata)
def merge_private_index_into_public_index(public_index_folder_path, private_index_folder_path):
packs_in_private_index = [pack_dir.name for pack_dir in os.scandir(private_index_folder_path) if pack_dir.is_dir()]
for pack_name in packs_in_private_index:
path_to_pack_in_private_index = os.path.join(private_index_folder_path, pack_name)
path_to_pack_in_public_index = os.path.join(public_index_folder_path, pack_name)
shutil.copy(path_to_pack_in_private_index, path_to_pack_in_public_index)
def upload_modified_index(public_index_folder_path, extract_destination_path, public_ci_dummy_index_blob, build_number,
private_packs):
"""Upload updated index zip to cloud storage.
Args:
public_index_folder_path (str): public index folder full path.
extract_destination_path (str): extract folder full path.
public_ci_dummy_index_blob (Blob): google cloud storage object that represents the dummy index.zip blob.
build_number (str): circleCI build number, used as an index revision.
private_packs (list): List of private packs and their price.
"""
with open(os.path.join(public_index_folder_path, "index.json"), "w+") as index_file:
for private_pack in private_packs:
private_pack['price'] = 0
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'packs': private_packs
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(public_index_folder_path)
index_zip_path = shutil.make_archive(base_name=public_index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
public_ci_dummy_index_blob.reload()
public_ci_dummy_index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
public_ci_dummy_index_blob.upload_from_filename(index_zip_path)
logging.success("Finished uploading index.zip to storage.")
except Exception:
logging.exception("Failed in uploading index. Mismatch in index file generation.")
sys.exit(1)
finally:
shutil.rmtree(public_index_folder_path)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-b', '--public_bucket_name', help="CI public bucket name", required=True)
parser.add_argument('-pb', '--private_bucket_name', help="CI private bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=True)
parser.add_argument('-e', '--extract_public_index_path', help="Full path of folder to extract the public index",
required=True)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-p', '--pack_name', help="Modified pack to upload to gcs.")
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-ea', '--extract_artifacts_path', help="Full path of folder to extract wanted packs",
required=True)
parser.add_argument('-di', '--dummy_index_dir_path', help="Full path to the dummy index in the private CI bucket",
required=True)
# disable-secrets-detection-end
return parser.parse_args()
def is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
return dummy_index_lock_blob.exists()
def lock_dummy_index(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
with open(LOCK_FILE_PATH, 'w') as lock_file:
lock_file.write('locked')
with open(LOCK_FILE_PATH, 'rb') as lock_file:
dummy_index_lock_blob.upload_from_file(lock_file)
def acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
total_seconds_waited = 0
while is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
if total_seconds_waited >= MAX_SECONDS_TO_WAIT_FOR_LOCK:
logging.critical("Error: Failed too long to acquire lock, exceeded max wait time.")
sys.exit(1)
if total_seconds_waited % 60 == 0:
# Printing a message every minute to keep the machine from dying due to no output
logging.info("Waiting to acquire lock.")
total_seconds_waited += 10
time.sleep(10)
lock_dummy_index(public_storage_bucket, dummy_index_lock_path)
def release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
dummy_index_lock_blob.delete()
os.remove(LOCK_FILE_PATH)
def add_private_packs_from_dummy_index(private_packs, dummy_index_blob):
downloaded_dummy_index_path = 'current_dummy_index.zip'
extracted_dummy_index_path = 'dummy_index'
dummy_index_json_path = os.path.join(extracted_dummy_index_path, 'index', 'index.json')
dummy_index_blob.download_to_filename(downloaded_dummy_index_path)
os.mkdir(extracted_dummy_index_path)
if os.path.exists(downloaded_dummy_index_path):
with ZipFile(downloaded_dummy_index_path, 'r') as index_zip:
index_zip.extractall(extracted_dummy_index_path)
with open(dummy_index_json_path) as index_file:
index_json = json.load(index_file)
packs_from_dummy_index = index_json.get('packs', [])
for pack in private_packs:
is_pack_in_dummy_index = any(
[pack['id'] == dummy_index_pack['id'] for dummy_index_pack in packs_from_dummy_index])
if not is_pack_in_dummy_index:
packs_from_dummy_index.append(pack)
os.remove(downloaded_dummy_index_path)
shutil.rmtree(extracted_dummy_index_path)
return packs_from_dummy_index
def main():
install_logging('prepare_public_index_for_private_testing.log', logger=logging)
upload_config = option_handler()
service_account = upload_config.service_account
build_number = upload_config.ci_build_number
public_bucket_name = upload_config.public_bucket_name
private_bucket_name = upload_config.private_bucket_name
storage_base_path = upload_config.storage_base_path
extract_public_index_path = upload_config.extract_public_index_path
changed_pack = upload_config.pack_name
extract_destination_path = upload_config.extract_artifacts_path
packs_artifacts_path = upload_config.artifacts_path
dummy_index_dir_path = upload_config.dummy_index_dir_path
dummy_index_path = os.path.join(dummy_index_dir_path, 'index.zip')
dummy_index_lock_path = os.path.join(dummy_index_dir_path, 'lock.txt')
storage_client = init_storage_client(service_account)
public_storage_bucket = storage_client.bucket(public_bucket_name)
private_storage_bucket = storage_client.bucket(private_bucket_name)
dummy_index_blob = public_storage_bucket.blob(dummy_index_path)
with lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
public_index_folder_path, public_index_blob, _ = download_and_extract_index(public_storage_bucket,
extract_public_index_path, storage_base_path)
# In order for the packs to be downloaded successfully, their price has to be 0
change_packs_price_to_zero(public_index_folder_path)
private_packs, private_index_path, private_index_blob = update_index_with_priced_packs(private_storage_bucket,
extract_destination_path,
public_index_folder_path,
changed_pack, True,
storage_base_path)
private_packs = add_private_packs_from_dummy_index(private_packs, dummy_index_blob)
upload_modified_index(public_index_folder_path, extract_public_index_path, dummy_index_blob, build_number,
private_packs)
if __name__ == '__main__':
main()
| [((3268, 3310), 'os.path.basename', 'os.path.basename', (['public_index_folder_path'], {}), '(public_index_folder_path)\n', (3284, 3310), False, 'import os\n'), ((3332, 3465), 'shutil.make_archive', 'shutil.make_archive', ([], {'base_name': 'public_index_folder_path', 'format': '"""zip"""', 'root_dir': 'extract_destination_path', 'base_dir': 'index_zip_name'}), "(base_name=public_index_folder_path, format='zip',\n root_dir=extract_destination_path, base_dir=index_zip_name)\n", (3351, 3465), False, 'import shutil\n'), ((4149, 4217), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Store packs in cloud storage."""'}), "(description='Store packs in cloud storage.')\n", (4172, 4217), False, 'import argparse\n'), ((7524, 7549), 'os.remove', 'os.remove', (['LOCK_FILE_PATH'], {}), '(LOCK_FILE_PATH)\n', (7533, 7549), False, 'import os\n'), ((7760, 7823), 'os.path.join', 'os.path.join', (['extracted_dummy_index_path', '"""index"""', '"""index.json"""'], {}), "(extracted_dummy_index_path, 'index', 'index.json')\n", (7772, 7823), False, 'import os\n'), ((7899, 7935), 'os.mkdir', 'os.mkdir', (['extracted_dummy_index_path'], {}), '(extracted_dummy_index_path)\n', (7907, 7935), False, 'import os\n'), ((7943, 7986), 'os.path.exists', 'os.path.exists', (['downloaded_dummy_index_path'], {}), '(downloaded_dummy_index_path)\n', (7957, 7986), False, 'import os\n'), ((8555, 8593), 'os.remove', 'os.remove', (['downloaded_dummy_index_path'], {}), '(downloaded_dummy_index_path)\n', (8564, 8593), False, 'import os\n'), ((8598, 8639), 'shutil.rmtree', 'shutil.rmtree', (['extracted_dummy_index_path'], {}), '(extracted_dummy_index_path)\n', (8611, 8639), False, 'import shutil\n'), ((8692, 8771), 'Tests.scripts.utils.log_util.install_logging', 'install_logging', (['"""prepare_public_index_for_private_testing.log"""'], {'logger': 'logging'}), "('prepare_public_index_for_private_testing.log', logger=logging)\n", (8707, 8771), False, 'from Tests.scripts.utils.log_util import install_logging\n'), ((9408, 9455), 'os.path.join', 'os.path.join', (['dummy_index_dir_path', '"""index.zip"""'], {}), "(dummy_index_dir_path, 'index.zip')\n", (9420, 9455), False, 'import os\n'), ((9484, 9530), 'os.path.join', 'os.path.join', (['dummy_index_dir_path', '"""lock.txt"""'], {}), "(dummy_index_dir_path, 'lock.txt')\n", (9496, 9530), False, 'import os\n'), ((9553, 9589), 'Tests.Marketplace.marketplace_services.init_storage_client', 'init_storage_client', (['service_account'], {}), '(service_account)\n', (9572, 9589), False, 'from Tests.Marketplace.marketplace_services import init_storage_client\n'), ((1095, 1124), 'json.load', 'json.load', (['pack_metadata_file'], {}), '(pack_metadata_file)\n', (1104, 1124), False, 'import json\n'), ((1230, 1284), 'json.dump', 'json.dump', (['pack_metadata', 'pack_metadata_file'], {'indent': '(4)'}), '(pack_metadata, pack_metadata_file, indent=4)\n', (1239, 1284), False, 'import json\n'), ((1598, 1641), 'os.path.join', 'os.path.join', (['path_to_pack', '"""metadata.json"""'], {}), "(path_to_pack, 'metadata.json')\n", (1610, 1641), False, 'import os\n'), ((2002, 2052), 'os.path.join', 'os.path.join', (['private_index_folder_path', 'pack_name'], {}), '(private_index_folder_path, pack_name)\n', (2014, 2052), False, 'import os\n'), ((2092, 2141), 'os.path.join', 'os.path.join', (['public_index_folder_path', 'pack_name'], {}), '(public_index_folder_path, pack_name)\n', (2104, 2141), False, 'import os\n'), ((2150, 2222), 'shutil.copy', 'shutil.copy', (['path_to_pack_in_private_index', 'path_to_pack_in_public_index'], {}), '(path_to_pack_in_private_index, path_to_pack_in_public_index)\n', (2161, 2222), False, 'import shutil\n'), ((3207, 3245), 'json.dump', 'json.dump', (['index', 'index_file'], {'indent': '(4)'}), '(index, index_file, indent=4)\n', (3216, 3245), False, 'import json\n'), ((3745, 3804), 'Tests.scripts.utils.logging_wrapper.success', 'logging.success', (['"""Finished uploading index.zip to storage."""'], {}), "('Finished uploading index.zip to storage.')\n", (3760, 3804), True, 'from Tests.scripts.utils import logging_wrapper as logging\n'), ((3959, 3998), 'shutil.rmtree', 'shutil.rmtree', (['public_index_folder_path'], {}), '(public_index_folder_path)\n', (3972, 3998), False, 'import shutil\n'), ((7246, 7260), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (7256, 7260), False, 'import time\n'), ((8192, 8213), 'json.load', 'json.load', (['index_file'], {}), '(index_file)\n', (8201, 8213), False, 'import json\n'), ((9895, 9966), 'Tests.private_build.upload_packs_private.extract_packs_artifacts', 'extract_packs_artifacts', (['packs_artifacts_path', 'extract_destination_path'], {}), '(packs_artifacts_path, extract_destination_path)\n', (9918, 9966), False, 'from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, extract_packs_artifacts\n'), ((10024, 10123), 'Tests.private_build.upload_packs_private.download_and_extract_index', 'download_and_extract_index', (['public_storage_bucket', 'extract_public_index_path', 'storage_base_path'], {}), '(public_storage_bucket, extract_public_index_path,\n storage_base_path)\n', (10050, 10123), False, 'from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, extract_packs_artifacts\n'), ((10419, 10572), 'Tests.private_build.upload_packs_private.update_index_with_priced_packs', 'update_index_with_priced_packs', (['private_storage_bucket', 'extract_destination_path', 'public_index_folder_path', 'changed_pack', '(True)', 'storage_base_path'], {}), '(private_storage_bucket,\n extract_destination_path, public_index_folder_path, changed_pack, True,\n storage_base_path)\n', (10449, 10572), False, 'from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, extract_packs_artifacts\n'), ((794, 857), 'Tests.scripts.utils.logging_wrapper.exception', 'logging.exception', (['"""Error in dummy index lock context manager."""'], {}), "('Error in dummy index lock context manager.')\n", (811, 857), True, 'from Tests.scripts.utils import logging_wrapper as logging\n'), ((1413, 1449), 'os.scandir', 'os.scandir', (['public_index_folder_path'], {}), '(public_index_folder_path)\n', (1423, 1449), False, 'import os\n'), ((1857, 1894), 'os.scandir', 'os.scandir', (['private_index_folder_path'], {}), '(private_index_folder_path)\n', (1867, 1894), False, 'import os\n'), ((2868, 2920), 'os.path.join', 'os.path.join', (['public_index_folder_path', '"""index.json"""'], {}), "(public_index_folder_path, 'index.json')\n", (2880, 2920), False, 'import os\n'), ((3835, 3922), 'Tests.scripts.utils.logging_wrapper.exception', 'logging.exception', (['"""Failed in uploading index. Mismatch in index file generation."""'], {}), "(\n 'Failed in uploading index. Mismatch in index file generation.')\n", (3852, 3922), True, 'from Tests.scripts.utils import logging_wrapper as logging\n'), ((3926, 3937), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3934, 3937), False, 'import sys\n'), ((6903, 6991), 'Tests.scripts.utils.logging_wrapper.critical', 'logging.critical', (['"""Error: Failed too long to acquire lock, exceeded max wait time."""'], {}), "(\n 'Error: Failed too long to acquire lock, exceeded max wait time.')\n", (6919, 6991), True, 'from Tests.scripts.utils import logging_wrapper as logging\n'), ((6999, 7010), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7007, 7010), False, 'import sys\n'), ((7161, 7201), 'Tests.scripts.utils.logging_wrapper.info', 'logging.info', (['"""Waiting to acquire lock."""'], {}), "('Waiting to acquire lock.')\n", (7173, 7201), True, 'from Tests.scripts.utils import logging_wrapper as logging\n'), ((8001, 8042), 'zipfile.ZipFile', 'ZipFile', (['downloaded_dummy_index_path', '"""r"""'], {}), "(downloaded_dummy_index_path, 'r')\n", (8008, 8042), False, 'from zipfile import ZipFile\n'), ((3104, 3121), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3119, 3121), False, 'from datetime import datetime\n')] |
Phantomxm2021/ARMOD-Dashboard | ARMODServers/Apps/ARExperiences/apps.py | 383cf0a5e72dc5a2651f43e693f06773d5b88bbd | from django.apps import AppConfig
class ArexperiencesConfig(AppConfig):
name = 'Apps.ARExperiences'
| [] |
zhouzaida/mmflow | configs/_base_/datasets/flyingchairs_320x448.py | b34f0801061469f04a83133d7f5652dead1f93ce | dataset_type = 'FlyingChairs'
data_root = 'data/FlyingChairs_release'
img_norm_cfg = dict(mean=[0., 0., 0.], std=[255., 255., 255.], to_rgb=False)
global_transform = dict(
translates=(0.05, 0.05),
zoom=(1.0, 1.5),
shear=(0.86, 1.16),
rotate=(-10., 10.))
relative_transform = dict(
translates=(0.00375, 0.00375),
zoom=(0.985, 1.015),
shear=(1.0, 1.0),
rotate=(-1.0, 1.0))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='ColorJitter',
brightness=0.5,
contrast=0.5,
saturation=0.5,
hue=0.5),
dict(type='RandomGamma', gamma_range=(0.7, 1.5)),
dict(type='Normalize', **img_norm_cfg),
dict(type='GaussianNoise', sigma_range=(0, 0.04), clamp_range=(0., 1.)),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='RandomFlip', prob=0.5, direction='vertical'),
dict(
type='RandomAffine',
global_transform=global_transform,
relative_transform=relative_transform),
dict(type='RandomCrop', crop_size=(320, 448)),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['imgs', 'flow_gt'],
meta_keys=[
'img_fields', 'ann_fields', 'filename1', 'filename2',
'ori_filename1', 'ori_filename2', 'filename_flow',
'ori_filename_flow', 'ori_shape', 'img_shape', 'img_norm_cfg'
]),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='InputResize', exponent=6),
dict(type='Normalize', **img_norm_cfg),
dict(type='TestFormatBundle'),
dict(
type='Collect',
keys=['imgs'],
meta_keys=[
'flow_gt', 'filename1', 'filename2', 'ori_filename1',
'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg',
'scale_factor', 'pad_shape'
])
]
flyingchairs_train = dict(
type=dataset_type,
pipeline=train_pipeline,
data_root=data_root,
split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt')
data = dict(
train_dataloader=dict(
samples_per_gpu=1,
workers_per_gpu=2,
drop_last=True,
persistent_workers=True),
val_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False),
train=flyingchairs_train,
val=dict(
type=dataset_type,
pipeline=test_pipeline,
data_root=data_root,
test_mode=True,
split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'),
test=dict(
type=dataset_type,
pipeline=test_pipeline,
data_root=data_root,
test_mode=True,
split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'))
| [] |
ZhouXiaolin/plaidml | plaidml2/edsl/__init__.py | dac460b6ae19a62299d15eeb17b402d8c26d0c2b | # Copyright 2019 Intel Corporation.
import logging
from collections import namedtuple
import numpy as np
import six
from plaidml2 import DType
from plaidml2.core import TensorShape, Buffer
from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib
logger = logging.getLogger(__name__)
def __init():
"""Docstring for function plaidml2.edsl.__init"""
ffi_call(lib.plaidml_edsl_init)
ffi.init_once(__init, 'plaidml_edsl_init')
class LogicalShape(ForeignObject):
"""Docstring for class LogicalShape"""
__ffi_del__ = lib.plaidml_logical_shape_free
__ffi_repr__ = lib.plaidml_logical_shape_repr
def __init__(self, dtype=None, dims=[], ptr=None):
if ptr:
ffi_obj = ptr
elif dtype is not None:
raw_dims = ffi.new('int64_t[]', [0 if x is None else x for x in dims])
ffi_obj = ffi_call(lib.plaidml_logical_shape_alloc, dtype, len(dims), raw_dims)
else:
raise ValueError('One of dtype= or ptr= must be specified.')
super(LogicalShape, self).__init__(ffi_obj)
@property
def dtype(self):
return DType(ffi_call(lib.plaidml_logical_shape_get_dtype, self.as_ptr()))
@property
def ndims(self):
return ffi_call(lib.plaidml_logical_shape_get_ndims, self.as_ptr())
@property
def int_dims(self):
"""Returns the dimensions of a LogicalShape as a list.
Args:
self (pointer): The object pointer for a LogicalShape
Returns:
list (int): Integer dimensions of the LogicalShape.
"""
return [
ffi_call(lib.plaidml_logical_shape_get_dim_int, self.as_ptr(), i)
for i in range(self.ndims)
]
def into_TensorShape(self):
return TensorShape(
ptr=ffi_call(lib.plaidml_logical_shape_into_tensor_shape, self.as_ptr()))
Constraint = namedtuple('Constraint', ['lhs', 'rhs'])
def wrap_dim(x):
if isinstance(x, six.integer_types):
return TensorDim(expr=ffi_call(lib.plaidml_dim_expr_int, x))
return x
def dim_op(op, *args):
args = [wrap_dim(x) for x in args]
raw_args = [x.as_ptr() for x in args]
return ffi_call(lib.plaidml_dim_expr_op, op, len(args), raw_args)
class TensorDim(ForeignObject):
"""Docstring for class TensorDim"""
__ffi_del__ = lib.plaidml_dim_expr_free
__ffi_repr__ = lib.plaidml_dim_expr_repr
def __init__(self, expr=None):
if expr is None:
expr = ffi_call(lib.plaidml_dim_expr_none)
super(TensorDim, self).__init__(expr)
def _bind(self, expr):
self.take_ptr(expr)
def __neg__(self):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_NEG, self))
def __add__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_ADD, self, other))
def __radd__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_ADD, other, self))
def __sub__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_SUB, self, other))
def __rsub__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_SUB, other, self))
def __mul__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_MUL, self, other))
def __rmul__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_MUL, other, self))
def __floordiv__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_DIV, self, other))
def __rfloordiv__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_DIV, other, self))
def wrap_poly(x):
if isinstance(x, six.integer_types):
return TensorIndex(expr=ffi_call(lib.plaidml_poly_expr_literal, x))
if isinstance(x, TensorDim):
return TensorIndex(expr=ffi_call(lib.plaidml_poly_expr_dim, x.as_ptr()))
return x
def poly_op(op, *args):
args = [wrap_poly(x) for x in args]
raw_args = [x.as_ptr() for x in args]
return ffi_call(lib.plaidml_poly_expr_op, op, len(args), raw_args)
class TensorIndex(ForeignObject):
"""Docstring for class TensorIndex"""
__ffi_del__ = lib.plaidml_poly_expr_free
__ffi_repr__ = lib.plaidml_poly_expr_repr
def __init__(self, expr=None, name=''):
if expr is None:
expr = ffi_call(lib.plaidml_poly_expr_index, name.encode())
super(TensorIndex, self).__init__(expr)
def __lt__(self, rhs):
return Constraint(self, wrap_dim(rhs))
def __neg__(self):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_NEG, self))
def __add__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_ADD, self, rhs))
def __radd__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_ADD, lhs, self))
def __sub__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_SUB, self, rhs))
def __rsub__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_SUB, lhs, self))
def __mul__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_MUL, self, rhs))
def __rmul__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_MUL, lhs, self))
def __floordiv__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_DIV, self, rhs))
def __rfloordiv__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_DIV, lhs, self))
class _IndexMap(ForeignObject):
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
def __init__(self, ref, key):
if isinstance(key, tuple) or isinstance(key, list):
idxs = key
else:
idxs = [key]
idxs = [wrap_poly(x) for x in idxs]
raw_idxs = [x.as_ptr() for x in idxs]
expr = ffi_call(lib.plaidml_expr_index_map, ref.as_ptr(), len(idxs), raw_idxs)
super(_IndexMap, self).__init__(expr)
class _SizeMap(ForeignObject):
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
def __init__(self, dims):
dims = [wrap_dim(x) for x in dims]
raw_dims = [x.as_ptr() for x in dims]
expr = ffi_call(lib.plaidml_expr_size_map, len(dims), raw_dims)
super(_SizeMap, self).__init__(expr)
class _Contraction(ForeignObject):
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
def __init__(self, agg_op, combo_op, src_idxs, sink_idxs, sink_sizes, name):
src_idxs = [x.as_ptr() for x in src_idxs]
expr = ffi_call(
lib.plaidml_expr_contraction,
agg_op,
combo_op,
sink_idxs.as_ptr(),
sink_sizes.as_ptr(),
len(src_idxs),
src_idxs,
name.encode(),
)
super(_Contraction, self).__init__(expr)
_ContractionPart = namedtuple('_ContractionPart', ['op', 'args'])
class IndexedTensor(object):
"""Docstring for class IndexedTensor"""
def __init__(self, impl, tensor=None):
self._impl = impl
self._tensor = tensor
def __repr__(self):
return repr(self._impl)
# Represents an aggregation_op of SUM in a contraction
def __iadd__(self, rhs):
return IndexedTensor(self._make_contraction(lib.PLAIDML_AGG_OP_SUM, rhs))
# Represents an aggregation_op of PROD in a contraction
def __imul__(self, rhs):
return IndexedTensor(self._make_contraction(lib.PLAIDML_AGG_OP_PROD, rhs))
# Represents an aggregation_op of MAX in a contraction
def __ge__(self, rhs):
self._tensor._set_contraction(self._make_contraction(lib.PLAIDML_AGG_OP_MAX, rhs))
# Represents an aggregation_op of MIN in a contraction
def __le__(self, rhs):
self._tensor._set_contraction(self._make_contraction(lib.PLAIDML_AGG_OP_MIN, rhs))
# Represents a combo_op of PLUS in a contraction
def __add__(self, rhs):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_ADD, (self, rhs)))
# Represents a combo_op of MULTIPLY in a contraction
def __mul__(self, rhs):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_MUL, (self, rhs)))
# Represents a combo_op of EQ in a contraction
def __eq__(self, rhs):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_EQ, (self, rhs)))
def _make_contraction(self, agg_op, rhs):
# Extract combo_op and inputs
if isinstance(rhs._impl, _IndexMap):
# Unary op
combo_op = lib.PLAIDML_COMBO_OP_NONE
inputs = [rhs._impl]
elif isinstance(rhs._impl, _ContractionPart):
# Binary/Ternary op
combo_op = rhs._impl.op
inputs = [x._impl for x in rhs._impl.args]
else:
raise ValueError('Invalid impl')
return _Contraction(
agg_op,
combo_op,
inputs,
self._impl,
_SizeMap(self._tensor._dims),
self._tensor._name,
)
class Tensor(ForeignObject):
"""Docstring for class Tensor"""
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
_dims = None
_is_contraction = False
def __init__(self, shape=None, dims=None, expr=None, value=None, name='', buffer=None):
self._name = name
self._buffer = buffer
if shape:
if buffer is None:
raw_buffer = ffi.NULL
else:
raw_buffer = buffer.as_ptr()
expr = ffi_call(lib.plaidml_expr_placeholder, shape.as_ptr(), raw_buffer,
name.encode())
elif dims is not None:
self._dims = dims
expr = None
elif value is not None:
if isinstance(value, six.integer_types):
expr = ffi_call(lib.plaidml_expr_int, value)
elif isinstance(value, float):
expr = ffi_call(lib.plaidml_expr_float, value)
else:
raise TypeError('Invalid type for value={}'.format(value))
elif expr is None:
raise ValueError('One of dims=, shape=, or expr= must be specified.')
super(Tensor, self).__init__(expr)
def set_param_value(self, buffer):
# Changes the value of a parameter tensor (i.e. one explicitly set to a buffer value)
# Illegal on other tensors
ffi_call(lib.plaidml_expr_param_reset, self.__ffi_obj__, buffer.as_ptr())
def __hash__(self):
return hash((self.as_ptr(), self._dims, self._is_contraction))
def __getitem__(self, key):
return IndexedTensor(_IndexMap(self, key), tensor=self)
def __setitem__(self, key, value):
if isinstance(value._impl, _Contraction):
# standard contraction
self._set_contraction(value._impl)
elif isinstance(value, Tensor):
pass
elif isinstance(value._impl, _IndexMap):
# Unary ASSIGN contraction
self._set_contraction(
_Contraction(
lib.PLAIDML_AGG_OP_ASSIGN,
lib.PLAIDML_COMBO_OP_NONE,
[value._impl],
_IndexMap(self, key),
_SizeMap(self._dims),
self._name,
))
elif isinstance(value._impl, _ContractionPart):
# Binary or ternary ASSIGN contraction
self._set_contraction(
_Contraction(
lib.PLAIDML_AGG_OP_ASSIGN,
value._impl.op,
[x._impl for x in value._impl.args],
_IndexMap(self, key),
_SizeMap(self._dims),
self._name,
))
else:
raise ValueError('Invalid impl when assigning to a Tensor (Type: {})'.format(
type(value._impl)))
def _set_contraction(self, cion):
self._is_contraction = True
self.take_ptr(cion)
# Represents an eltwise negation
def __neg__(self):
return call('neg', self)
# Represents an eltwise bit_not
def __invert__(self):
return call('bit_not', self)
# Represents an eltwise addition
def __add__(self, rhs):
return call('add', self, rhs)
def __radd__(self, lhs):
return call('add', lhs, self)
# Represents an eltwise subtraction
def __sub__(self, rhs):
return call('sub', self, rhs)
def __rsub__(self, lhs):
return call('sub', lhs, self)
# Represents an eltwise multiplication
def __mul__(self, rhs):
return call('mul', self, rhs)
def __rmul__(self, lhs):
return call('mul', lhs, self)
# Represents an eltwise division
def __div__(self, rhs):
return call('div', self, rhs)
def __rdiv__(self, lhs):
return call('div', lhs, self)
# Represents an eltwise division
def __truediv__(self, rhs):
return call('div', self, rhs)
def __rtruediv__(self, lhs):
return call('div', lhs, self)
# Represents an eltwise cmp_eq
def __eq__(self, rhs):
return call('cmp_eq', self, rhs)
# Represents an eltwise cmp_ne
def __ne__(self, rhs):
return call('cmp_ne', self, rhs)
# Represents an eltwise cmp_lt
def __lt__(self, rhs):
return call('cmp_lt', self, rhs)
# Represents an eltwise cmp_gt
def __gt__(self, rhs):
return call('cmp_gt', self, rhs)
# Represents an eltwise cmp_le
def __le__(self, rhs):
return call('cmp_le', self, rhs)
# Represents an eltwise cmp_ge
def __ge__(self, rhs):
return call('cmp_ge', self, rhs)
# Represents an eltwise bit_left
def __lshift__(self, rhs):
return call('bit_left', self, rhs)
def __rlshift__(self, lhs):
return call('bit_left', lhs, self)
# Represents an eltwise bit_right
def __rshift__(self, rhs):
return call('bit_right', self, rhs)
def __rrshift__(self, lhs):
return call('bit_right', lhs, self)
# Represents an eltwise bit_and
def __and__(self, rhs):
return call('bit_and', self, rhs)
def __rand__(self, lhs):
return call('bit_and', lhs, self)
# Represents an eltwise bit_or
def __or__(self, rhs):
return call('bit_or', self, rhs)
def __ror__(self, lhs):
return call('bit_or', lhs, self)
# Represents an eltwise bit_xor
def __xor__(self, rhs):
return call('bit_xor', self, rhs)
def __rxor__(self, lhs):
return call('bit_xor', lhs, self)
# Enable no_reduce on a contraction
def no_reduce(self):
if not self._is_contraction:
raise TypeError('no_reduce can only be specified on a contraction.')
ffi_call(lib.plaidml_expr_contraction_set_no_reduce, self.as_ptr(), True)
return self
# Set use_default on a contraction
def use_default(self, rhs):
if not self._is_contraction:
raise TypeError('use_default can only be specified on a contraction.')
ffi_call(lib.plaidml_expr_contraction_set_use_default, self.as_ptr(), rhs.as_ptr())
return self
def add_constraint(self, constraint):
ffi_call(
lib.plaidml_expr_contraction_add_constraint,
self.as_ptr(),
constraint.lhs.as_ptr(),
constraint.rhs.as_ptr(),
)
def add_constraints(self, constraints):
for constraint in constraints:
self.add_constraint(constraint)
# Return the tensor's shape
@property
def shape(self):
return LogicalShape(ptr=ffi_call(lib.plaidml_expr_get_shape, self.as_ptr()))
# Verify that the specified dims match the dims of this tensor.
def bind_dims(self, *dims):
raw_dims = [x.as_ptr() for x in dims]
ffi_call(lib.plaidml_expr_bind_dims, self.as_ptr(), len(raw_dims), raw_dims)
# bind a concrete shape to this tensor
def bind(self, shape):
ffi_call(lib.plaidml_expr_bind_shape, self.as_ptr(), shape.as_ptr())
class TensorRef:
"""Docstring for class TensorRef"""
def __init__(self, tensor):
self.tensor = tensor
def __hash__(self):
return hash(ffi_call(lib.plaidml_expr_ptr, self.tensor.as_ptr()))
def __eq__(self, other):
if isinstance(other, Tensor):
return self.__hash__() == TensorRef(other).__hash__()
return self.__hash__() == other.__hash__()
class Value(ForeignObject):
"""Docstring for class Value"""
__ffi_del__ = lib.plaidml_value_free
__ffi_repr__ = lib.plaidml_value_repr
def __init__(self, value):
# logger.debug('Value({})'.format(value))
if isinstance(value, np.ndarray):
if value.ndim == 0:
value = value.item()
else:
value = value.tolist()
if value is None:
ffi_obj = ffi_call(lib.plaidml_value_none)
elif isinstance(value, (six.integer_types, bool)):
ffi_obj = ffi_call(lib.plaidml_value_int, value)
elif isinstance(value, float):
ffi_obj = ffi_call(lib.plaidml_value_float, value)
elif isinstance(value, TensorDim):
ffi_obj = ffi_call(lib.plaidml_value_dim, value.as_ptr())
elif isinstance(value, Tensor):
ffi_obj = ffi_call(lib.plaidml_value_expr, value.as_ptr())
elif isinstance(value, (list, tuple)):
self._elts = [Value(x) for x in value]
raw_elts = [x.as_ptr() for x in self._elts]
ffi_obj = ffi_call(lib.plaidml_value_tuple, len(raw_elts), raw_elts)
elif isinstance(value, six.string_types):
ffi_obj = ffi_call(lib.plaidml_value_str, value.encode('utf-8'))
elif isinstance(value, ffi.CData) and ffi.typeof(value) is ffi.typeof('plaidml_value*'):
ffi_obj = value
else:
raise TypeError('Unsupported type {} for value={}'.format(type(value), value))
super(Value, self).__init__(ffi_obj)
def as_tensor(self):
return Tensor(expr=ffi_call(lib.plaidml_value_expr_get, self.as_ptr()))
def TensorOutput(*args):
return Tensor(dims=args)
def TensorDims(count):
return [TensorDim() for i in range(count)]
def TensorIndexes(count):
return [TensorIndex() for i in range(count)]
class ProgramArgument:
"""Docstring for class ProgramArgument"""
def __init__(self, arg):
self.is_input = arg.is_input
self.ref = TensorRef(Tensor(expr=ffi_call(lib.plaidml_expr_clone, arg.tensor)))
self.shape = LogicalShape(ptr=ffi_call(lib.plaidml_logical_shape_clone, arg.shape))
if arg.buffer:
tensor_shape = self.shape.into_TensorShape()
self.buffer = Buffer(tensor_shape, ptr=ffi_call(lib.plaidml_buffer_clone, arg.buffer))
else:
self.buffer = None
class Program(ForeignObject):
"""Docstring for class Program"""
__ffi_del__ = lib.plaidml_program_free
__ffi_repr__ = lib.plaidml_program_repr
def __init__(self, name, outputs, updates=[]):
raw_outputs = [x.as_ptr() for x in outputs]
dst_updates = [x[0].as_ptr() for x in updates]
src_updates = [x[1].as_ptr() for x in updates]
raw_args = ffi.new('plaidml_program_args**')
ffi_obj = ffi_call(
lib.plaidml_program_evaluate,
name.encode(),
len(raw_outputs),
raw_outputs,
len(updates),
src_updates,
dst_updates,
raw_args,
)
self.args = [ProgramArgument(raw_args[0].args[i]) for i in range(raw_args[0].nargs)]
ffi_call(lib.plaidml_program_args_free, raw_args[0])
super(Program, self).__init__(ffi_obj)
@property
def inputs(self):
return [x for x in self.args if x.is_input]
@property
def outputs(self):
return [x for x in self.args if not x.is_input]
def wrap_tensor(x):
if isinstance(x, six.integer_types):
return Tensor(expr=ffi_call(lib.plaidml_expr_int, x))
if np.issubdtype(type(x), np.integer):
return Tensor(expr=ffi_call(lib.plaidml_expr_int, x.item()))
if isinstance(x, float):
return Tensor(expr=ffi_call(lib.plaidml_expr_float, x))
if isinstance(x, TensorDim):
return Tensor(expr=ffi_call(lib.plaidml_expr_dim, x.as_ptr()))
if isinstance(x, Tensor):
return x
raise TypeError('Unexpected type for call argument: {}. fn: {}, args: {}, bad arg: {}'.format(
type(x), fn, args, x))
def call(fn, *args):
args = [wrap_tensor(x) for x in args]
raw_args = [x.as_ptr() for x in args]
return Tensor(expr=ffi_call(lib.plaidml_expr_call, fn.encode(), len(args), raw_args))
def cast(x, dtype):
return Tensor(expr=ffi_call(lib.plaidml_expr_cast, wrap_tensor(x).as_ptr(), dtype))
def as_bool(x):
return cast(x, DType.BOOLEAN)
def as_float(x, bit_size):
map = {
16: DType.FLOAT16,
32: DType.FLOAT32,
64: DType.FLOAT64,
}
dtype = map.get(bit_size)
if not dtype:
raise 'Unsupport bit_size for as_float'
return cast(x, dtype)
def as_int(x, bit_size):
map = {
8: DType.INT8,
16: DType.INT16,
32: DType.INT32,
64: DType.INT64,
}
dtype = map.get(bit_size)
if not dtype:
raise 'Unsupport bit_size for as_int'
return cast(x, dtype)
def as_uint(x, bit_size):
map = {
8: DType.UINT8,
16: DType.UINT16,
32: DType.UINT32,
64: DType.UINT64,
}
dtype = map.get(bit_size)
if not dtype:
raise 'Unsupport bit_size for as_uint'
return cast(x, dtype)
def ceil(x):
return call('ceil', x)
def cond(lhs, rhs, true_case):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_COND, (lhs, rhs, true_case)))
def cos(x):
return call('cos', x)
def exp(x):
return call('exp', x)
def floor(x):
return call('floor', x)
def gather(x, y):
return call('gather', x, y)
def gradients(loss, variables):
wrts = [x.as_ptr() for x in variables]
raw_grads = ffi.new('plaidml_expr*[]', len(wrts))
ffi_call(
lib.plaidml_expr_gradient,
len(wrts),
wrts,
loss.as_ptr(),
raw_grads,
)
return [Tensor(expr=x) for x in raw_grads]
def ident(x):
return call('ident', x)
def index(x, axis):
return call('index', x, axis)
def jacobian(loss, variables):
wrts = [x.as_ptr() for x in variables]
raw_grads = ffi.new('plaidml_expr*[]', len(wrts))
ffi_call(
lib.plaidml_expr_jacobian,
len(wrts),
wrts,
loss.as_ptr(),
raw_grads,
)
return [Tensor(expr=x) for x in raw_grads]
def log(x):
return call('log', x)
def max(x, y):
return call('max', x, y)
def min(x, y):
return call('min', x, y)
def pow(x, y):
return call('pow', x, y)
def prng(state, shape):
return call('prng', state, *shape)
def reshape(x, dims):
return call('reshape', x, *dims)
def round(x):
return call('round', x)
def scatter(x, y, z):
return call('scatter', x, y, z)
def select(cond, true_case, false_case):
return call('cond', cond, true_case, false_case)
def shape(x):
return call('shape', x)
def sin(x):
return call('sin', x)
def sqrt(x):
return call('sqrt', x)
def tan(x):
return call('tan', x)
def tanh(x):
return call('tanh', x)
| [((261, 288), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (278, 288), False, 'import logging\n'), ((397, 439), 'plaidml2.ffi.ffi.init_once', 'ffi.init_once', (['__init', '"""plaidml_edsl_init"""'], {}), "(__init, 'plaidml_edsl_init')\n", (410, 439), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((1878, 1918), 'collections.namedtuple', 'namedtuple', (['"""Constraint"""', "['lhs', 'rhs']"], {}), "('Constraint', ['lhs', 'rhs'])\n", (1888, 1918), False, 'from collections import namedtuple\n'), ((6734, 6780), 'collections.namedtuple', 'namedtuple', (['"""_ContractionPart"""', "['op', 'args']"], {}), "('_ContractionPart', ['op', 'args'])\n", (6744, 6780), False, 'from collections import namedtuple\n'), ((363, 394), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_edsl_init'], {}), '(lib.plaidml_edsl_init)\n', (371, 394), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((19169, 19202), 'plaidml2.ffi.ffi.new', 'ffi.new', (['"""plaidml_program_args**"""'], {}), "('plaidml_program_args**')\n", (19176, 19202), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((19564, 19616), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_program_args_free', 'raw_args[0]'], {}), '(lib.plaidml_program_args_free, raw_args[0])\n', (19572, 19616), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((2480, 2515), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_dim_expr_none'], {}), '(lib.plaidml_dim_expr_none)\n', (2488, 2515), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((16810, 16842), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_value_none'], {}), '(lib.plaidml_value_none)\n', (16818, 16842), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((772, 833), 'plaidml2.ffi.ffi.new', 'ffi.new', (['"""int64_t[]"""', '[(0 if x is None else x) for x in dims]'], {}), "('int64_t[]', [(0 if x is None else x) for x in dims])\n", (779, 833), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((2009, 2046), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_dim_expr_int', 'x'], {}), '(lib.plaidml_dim_expr_int, x)\n', (2017, 2046), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((3620, 3662), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_poly_expr_literal', 'x'], {}), '(lib.plaidml_poly_expr_literal, x)\n', (3628, 3662), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((16924, 16962), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_value_int', 'value'], {}), '(lib.plaidml_value_int, value)\n', (16932, 16962), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((18501, 18553), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_logical_shape_clone', 'arg.shape'], {}), '(lib.plaidml_logical_shape_clone, arg.shape)\n', (18509, 18553), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((19937, 19970), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_expr_int', 'x'], {}), '(lib.plaidml_expr_int, x)\n', (19945, 19970), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((20140, 20175), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_expr_float', 'x'], {}), '(lib.plaidml_expr_float, x)\n', (20148, 20175), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((17024, 17064), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_value_float', 'value'], {}), '(lib.plaidml_value_float, value)\n', (17032, 17064), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((18416, 18460), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_expr_clone', 'arg.tensor'], {}), '(lib.plaidml_expr_clone, arg.tensor)\n', (18424, 18460), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((18686, 18732), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_buffer_clone', 'arg.buffer'], {}), '(lib.plaidml_buffer_clone, arg.buffer)\n', (18694, 18732), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((9703, 9740), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_expr_int', 'value'], {}), '(lib.plaidml_expr_int, value)\n', (9711, 9740), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((9807, 9846), 'plaidml2.ffi.ffi_call', 'ffi_call', (['lib.plaidml_expr_float', 'value'], {}), '(lib.plaidml_expr_float, value)\n', (9815, 9846), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((17697, 17714), 'plaidml2.ffi.ffi.typeof', 'ffi.typeof', (['value'], {}), '(value)\n', (17707, 17714), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n'), ((17718, 17746), 'plaidml2.ffi.ffi.typeof', 'ffi.typeof', (['"""plaidml_value*"""'], {}), "('plaidml_value*')\n", (17728, 17746), False, 'from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib\n')] |
AnastasiaaSenina/openvino_training_extensions | pytorch_toolkit/face_recognition/model/common.py | 267425d64372dff5b9083dc0ca6abfc305a71449 | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
from functools import partial
import torch.nn as nn
class ModelInterface(nn.Module):
"""Abstract class for models"""
@abstractmethod
def set_dropout_ratio(self, ratio):
"""Sets dropout ratio of the model"""
@abstractmethod
def get_input_res(self):
"""Returns input resolution"""
from .rmnet_angular import RMNetAngular
from .mobilefacenet import MobileFaceNet
from .landnet import LandmarksNet
from .se_resnet_angular import SEResNetAngular
from .shufflenet_v2_angular import ShuffleNetV2Angular
from .backbones.se_resnet import se_resnet50, se_resnet101, se_resnet152
from .backbones.resnet import resnet50
from .backbones.se_resnext import se_resnext50, se_resnext101, se_resnext152
models_backbones = {'rmnet': RMNetAngular,
'mobilenetv2': MobileFaceNet,
'mobilenetv2_2x': partial(MobileFaceNet, width_multiplier=2.0),
'mobilenetv2_1_5x': partial(MobileFaceNet, width_multiplier=1.5),
'resnet50': partial(SEResNetAngular, base=resnet50),
'se_resnet50': partial(SEResNetAngular, base=se_resnet50),
'se_resnet101': partial(SEResNetAngular, base=se_resnet101),
'se_resnet152': partial(SEResNetAngular, base=se_resnet152),
'se_resnext50': partial(SEResNetAngular, base=se_resnext50),
'se_resnext101': partial(SEResNetAngular, base=se_resnext101),
'se_resnext152': partial(SEResNetAngular, base=se_resnext152),
'shufflenetv2': ShuffleNetV2Angular}
models_landmarks = {'landnet': LandmarksNet}
| [((1469, 1513), 'functools.partial', 'partial', (['MobileFaceNet'], {'width_multiplier': '(2.0)'}), '(MobileFaceNet, width_multiplier=2.0)\n', (1476, 1513), False, 'from functools import partial\n'), ((1555, 1599), 'functools.partial', 'partial', (['MobileFaceNet'], {'width_multiplier': '(1.5)'}), '(MobileFaceNet, width_multiplier=1.5)\n', (1562, 1599), False, 'from functools import partial\n'), ((1633, 1672), 'functools.partial', 'partial', (['SEResNetAngular'], {'base': 'resnet50'}), '(SEResNetAngular, base=resnet50)\n', (1640, 1672), False, 'from functools import partial\n'), ((1709, 1751), 'functools.partial', 'partial', (['SEResNetAngular'], {'base': 'se_resnet50'}), '(SEResNetAngular, base=se_resnet50)\n', (1716, 1751), False, 'from functools import partial\n'), ((1789, 1832), 'functools.partial', 'partial', (['SEResNetAngular'], {'base': 'se_resnet101'}), '(SEResNetAngular, base=se_resnet101)\n', (1796, 1832), False, 'from functools import partial\n'), ((1870, 1913), 'functools.partial', 'partial', (['SEResNetAngular'], {'base': 'se_resnet152'}), '(SEResNetAngular, base=se_resnet152)\n', (1877, 1913), False, 'from functools import partial\n'), ((1951, 1994), 'functools.partial', 'partial', (['SEResNetAngular'], {'base': 'se_resnext50'}), '(SEResNetAngular, base=se_resnext50)\n', (1958, 1994), False, 'from functools import partial\n'), ((2033, 2077), 'functools.partial', 'partial', (['SEResNetAngular'], {'base': 'se_resnext101'}), '(SEResNetAngular, base=se_resnext101)\n', (2040, 2077), False, 'from functools import partial\n'), ((2116, 2160), 'functools.partial', 'partial', (['SEResNetAngular'], {'base': 'se_resnext152'}), '(SEResNetAngular, base=se_resnext152)\n', (2123, 2160), False, 'from functools import partial\n')] |
kpavel/pyclay | src/dataclay/util/logs.py | 275bc8af5c57301231a20cca1cc88556a9c84c79 |
""" Class description goes here. """
import json
import logging
class JSONFormatter(logging.Formatter):
"""Simple JSON formatter for the logging facility."""
def format(self, obj):
"""Note that obj is a LogRecord instance."""
# Copy the dictionary
ret = dict(obj.__dict__)
# Perform the message substitution
args = ret.pop("args")
msg = ret.pop("msg")
ret["message"] = msg % args
# Exceptions must be formatted (they are not JSON-serializable
try:
ei = ret.pop("exc_info")
except KeyError:
pass
else:
if ei is not None:
ret["exc_info"] = self.formatException(ei)
# Dump the dictionary in JSON form
return json.dumps(ret, skipkeys=True)
| [((776, 806), 'json.dumps', 'json.dumps', (['ret'], {'skipkeys': '(True)'}), '(ret, skipkeys=True)\n', (786, 806), False, 'import json\n')] |
davxy/numeric | python/orthogonal_test.py | 1e8b44a72e1d570433a5ba81ae0795a750ce5921 | # Orthogonal linear system solver tests
from math import sqrt
import numpy as np
from orthogonal import orthogonal
################################################################################
# 2x2 orthogonal matrix
A = np.matrix('1 1;'
'1 -1', float)
A = A*1.0/sqrt(2.0)
# Known terms vector
b = np.matrix('2; 3')
# Solve the system
x = orthogonal(A, b, 1)
# Check
if np.allclose(b, A*x) == False:
raise Exception('Orthogonal test failure')
################################################################################
# 2x2 orthogonal matrix
A = np.matrix('2 -2 1;'
'1 2 2;'
'2 1 -2', float)
A = A*1.0/3.0
# Known terms vector
b = np.matrix('2; 3; 4')
# Solve the system
x = orthogonal(A, b)
# Check
if np.allclose(b, A*x) == False:
raise Exception('Orthogonal test failure') | [((228, 257), 'numpy.matrix', 'np.matrix', (['"""1 1;1 -1"""', 'float'], {}), "('1 1;1 -1', float)\n", (237, 257), True, 'import numpy as np\n'), ((320, 337), 'numpy.matrix', 'np.matrix', (['"""2; 3"""'], {}), "('2; 3')\n", (329, 337), True, 'import numpy as np\n'), ((361, 380), 'orthogonal.orthogonal', 'orthogonal', (['A', 'b', '(1)'], {}), '(A, b, 1)\n', (371, 380), False, 'from orthogonal import orthogonal\n'), ((580, 623), 'numpy.matrix', 'np.matrix', (['"""2 -2 1;1 2 2;2 1 -2"""', 'float'], {}), "('2 -2 1;1 2 2;2 1 -2', float)\n", (589, 623), True, 'import numpy as np\n'), ((697, 717), 'numpy.matrix', 'np.matrix', (['"""2; 3; 4"""'], {}), "('2; 3; 4')\n", (706, 717), True, 'import numpy as np\n'), ((741, 757), 'orthogonal.orthogonal', 'orthogonal', (['A', 'b'], {}), '(A, b)\n', (751, 757), False, 'from orthogonal import orthogonal\n'), ((285, 294), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (289, 294), False, 'from math import sqrt\n'), ((392, 413), 'numpy.allclose', 'np.allclose', (['b', '(A * x)'], {}), '(b, A * x)\n', (403, 413), True, 'import numpy as np\n'), ((769, 790), 'numpy.allclose', 'np.allclose', (['b', '(A * x)'], {}), '(b, A * x)\n', (780, 790), True, 'import numpy as np\n')] |
adbmd/autonlp | src/autonlp/project.py | 8f7b5559d88775850b6818a09f178dc3407b2ab8 | import os
import shutil
from dataclasses import dataclass
from datetime import datetime
from typing import Dict, List, Optional
from huggingface_hub import Repository
from loguru import logger
from prettytable import PrettyTable
from .splits import TEST_SPLIT, TRAIN_SPLIT, VALID_SPLIT
from .tasks import TASKS
from .utils import BOLD_TAG, CYAN_TAG, GREEN_TAG, PURPLE_TAG, RESET_TAG, YELLOW_TAG, http_get, http_post
from .validation import validate_file
FILE_STATUS = (
"☁ Uploaded",
"⌚ Queued",
"⚙ In Progress...",
"✅ Success!",
"❌ Failed: file not found",
"❌ Failed: unsupported file type",
"❌ Failed: server error",
"❌ Invalid column mapping, please fix it and re-upload the file.",
)
JOB_STATUS = (
("⌚", "queued"),
("🚀", "start"),
("⚙", "data_munging"),
("🏃", "model_training"),
("✅", "success"),
("❌", "failed"),
)
PROJECT_STATUS = (
("✨", "Created"),
("🚀", "Data processing started"),
("✅", "Data processing successful"),
("❌", "Failed to download data files from the huggingface hub"),
("❌", "Missing 'train' or 'valid' split in data files"),
("❌", "Failed to process data files"),
("❌", "Failed to upload processed data files to the huggingface hub"),
)
SPLITS = (TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT)
@dataclass
class TrainingJob:
"""A training job in AutoNLP"""
job_id: int
status: str
status_emoji: str
created_at: datetime
updated_at: datetime
@classmethod
def from_json_resp(cls, json_resp: dict):
return cls(
job_id=json_resp["id"],
status_emoji=JOB_STATUS[json_resp["status"] - 1][0],
status=JOB_STATUS[json_resp["status"] - 1][1],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
)
def __str__(self):
return "\n".join(
[
f"📚 Model # {self.job_id}",
f" • {BOLD_TAG}Status{RESET_TAG}: {self.status_emoji} {self.status}",
f" • {BOLD_TAG}Created at{RESET_TAG}: {self.created_at.strftime('%Y-%m-%d %H:%M Z')}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
]
)
@dataclass
class UploadedFile:
"""A file uploaded to an AutoNLP project"""
file_id: int
filename: str
processing_status: str
split: str
col_mapping: Dict[str, str]
created_at: datetime
updated_at: datetime
@classmethod
def from_json_resp(cls, json_resp: dict):
return cls(
file_id=json_resp["data_file_id"],
filename=json_resp["fname"],
processing_status=FILE_STATUS[json_resp["download_status"] - 1],
split=SPLITS[json_resp["split"] - 1],
col_mapping=json_resp["col_mapping"],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
)
def __str__(self):
return "\n".join(
[
f"📁 {CYAN_TAG}{self.filename}{RESET_TAG} (id # {self.file_id})",
f" • {BOLD_TAG}Split{RESET_TAG}: {self.split}",
f" • {BOLD_TAG}Processing status{RESET_TAG}: {self.processing_status}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
]
)
@dataclass
class Project:
"""An AutoNLP project"""
_token: str
proj_id: int
name: str
user: str
task: str
status_emoji: str
status: str
language: str
created_at: datetime
updated_at: datetime
dataset_id: str
files: Optional[List[UploadedFile]] = None
training_jobs: Optional[List] = None
@classmethod
def from_json_resp(cls, json_resp: dict, token: str):
"""Build a Project from the API response, JSON-encoded"""
return cls(
proj_id=json_resp["id"],
name=json_resp["proj_name"],
user=json_resp["username"],
task=list(filter(lambda key: TASKS[key] == json_resp["task"], TASKS.keys()))[0],
status_emoji=PROJECT_STATUS[json_resp["status"] - 1][0],
status=PROJECT_STATUS[json_resp["status"] - 1][1],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
dataset_id=json_resp["dataset_id"],
language=json_resp["config"]["language"],
_token=token,
)
def refresh(self):
"""Update information about uploaded files and models attached to the project"""
logger.info("🔄 Refreshing uploaded files information...")
resp = http_get(path=f"/projects/{self.proj_id}/data", token=self._token)
json_files = resp.json()
self.files = [UploadedFile.from_json_resp(file) for file in json_files]
logger.info("🔄 Refreshing models information...")
resp = http_get(path=f"/projects/{self.proj_id}/jobs", token=self._token)
json_jobs = resp.json()
self.training_jobs = [TrainingJob.from_json_resp(job) for job in json_jobs]
def upload(self, filepaths: List[str], split: str, col_mapping: Dict[str, str]):
"""Uploads files to the project"""
local_dataset_dir = os.path.expanduser(f"~/.huggingface/autonlp/projects/{self.dataset_id}")
if os.path.exists(local_dataset_dir):
if os.path.isdir(os.path.join(local_dataset_dir, "git")):
clone_from = None
else:
shutil.rmtree(local_dataset_dir)
clone_from = "https://huggingface.co/datasets/" + self.dataset_id
else:
clone_from = "https://huggingface.co/datasets/" + self.dataset_id
dataset_repo = Repository(
local_dir=local_dataset_dir,
clone_from=clone_from,
use_auth_token=self._token,
)
dataset_repo.git_pull()
for idx, file_path in enumerate(filepaths):
if not os.path.isfile(file_path):
logger.error(f"[{idx + 1}/{len(filepaths)}] ❌ '{file_path}' does not exist or is not a file!")
continue
file_name = os.path.basename(file_path)
file_extension = file_name.split(".")[-1]
src = os.path.expanduser(file_path)
dst = os.path.join(local_dataset_dir, "raw", file_name)
logger.info(f"[{idx + 1}/{len(filepaths)}] 📦 Copying {src} to {dst}...")
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copyfile(src, dst)
logger.info(f"[{idx + 1}/{len(filepaths)}] 🔎 Validating {dst} and column mapping...")
validate_file(path=dst, task=self.task, file_ext=file_extension, col_mapping=col_mapping)
dataset_repo.lfs_track(patterns=[f"raw/*.{file_extension}"])
dataset_repo.git_pull()
try:
logger.info("☁ Uploading files to the dataset hub...")
dataset_repo.push_to_hub(commit_message="Upload from AutoNLP CLI")
logger.info("✅ Successfully uploaded the files!")
except OSError as err:
if "nothing to commit, working tree clean" in err.args[0]:
logger.info("❔ Files did not change since last upload!")
dataset_repo.git_push()
return
logger.error("❌ Something went wrong when uploading the files!")
raise
for idx, file_path in enumerate(filepaths):
file_name = os.path.basename(file_path)
logger.info(f"[{idx + 1}/{len(filepaths)}] 📁 Registering file {file_name} into project '{file_name}'...")
payload = {
"split": split,
"col_mapping": col_mapping,
"data_files": [{"fname": file_name, "username": self.user}],
}
http_post(path=f"/projects/{self.proj_id}/data/add", payload=payload, token=self._token)
logger.info(f"[{idx + 1}/{len(filepaths)}] ✅ Success!")
def train(self):
"""Starts training on the models"""
http_get(path=f"/projects/{self.proj_id}/data/start_process", token=self._token)
logger.info("🔥🔥 Training started!")
def __str__(self):
header = "\n".join(
[
f"AutoNLP Project (id # {self.proj_id})",
"~" * 35,
f" • {BOLD_TAG}Name{RESET_TAG}: {PURPLE_TAG}{self.name}{RESET_TAG}",
f" • {BOLD_TAG}Owner{RESET_TAG}: {GREEN_TAG}{self.user}{RESET_TAG}",
f" • {BOLD_TAG}Status{RESET_TAG}: {BOLD_TAG}{self.status_emoji} {self.status}{RESET_TAG}",
f" • {BOLD_TAG}Task{RESET_TAG}: {YELLOW_TAG}{self.task.title().replace('_', ' ')}{RESET_TAG}",
f" • {BOLD_TAG}Created at{RESET_TAG}: {self.created_at.strftime('%Y-%m-%d %H:%M Z')}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
"",
]
)
printout = [header]
# Uploaded files information
if self.files is None:
descriptions = ["❓ Files information unknown, update the project"]
else:
if len(self.files) == 0:
descriptions = ["🤷 No files uploaded yet!"]
else:
sorted_files = sorted(self.files, key=lambda file: file.split) # Sort by split
descriptions = [str(file) for file in sorted_files]
printout.append(
"\n".join(
[
"~" * 14 + f" {BOLD_TAG}Files{RESET_TAG} " + "~" * 14,
"",
"Dataset ID:",
f"{CYAN_TAG}{self.dataset_id}{RESET_TAG}",
"",
]
+ descriptions
)
)
# Training jobs information
if self.training_jobs is None:
jobs_str = "❓ Models information unknown, update the project"
else:
if len(self.training_jobs) == 0:
jobs_str = "🤷 No train jobs started yet!"
else:
model_table = PrettyTable(["", "ID", "Status", "Creation date", "Last update"])
for job in sorted(self.training_jobs, key=lambda job: job.job_id):
model_table.add_row(
[
job.status_emoji,
job.job_id,
job.status,
job.created_at.strftime("%Y-%m-%d %H:%M Z"),
job.updated_at.strftime("%Y-%m-%d %H:%M Z"),
]
)
jobs_str = str(model_table)
printout.append("\n".join(["", "~" * 12 + f" {BOLD_TAG}Models{RESET_TAG} " + "~" * 11, "", jobs_str]))
return "\n".join(printout)
| [((4759, 4816), 'loguru.logger.info', 'logger.info', (['"""🔄 Refreshing uploaded files information..."""'], {}), "('🔄 Refreshing uploaded files information...')\n", (4770, 4816), False, 'from loguru import logger\n'), ((5021, 5070), 'loguru.logger.info', 'logger.info', (['"""🔄 Refreshing models information..."""'], {}), "('🔄 Refreshing models information...')\n", (5032, 5070), False, 'from loguru import logger\n'), ((5426, 5498), 'os.path.expanduser', 'os.path.expanduser', (['f"""~/.huggingface/autonlp/projects/{self.dataset_id}"""'], {}), "(f'~/.huggingface/autonlp/projects/{self.dataset_id}')\n", (5444, 5498), False, 'import os\n'), ((5510, 5543), 'os.path.exists', 'os.path.exists', (['local_dataset_dir'], {}), '(local_dataset_dir)\n', (5524, 5543), False, 'import os\n'), ((5913, 6007), 'huggingface_hub.Repository', 'Repository', ([], {'local_dir': 'local_dataset_dir', 'clone_from': 'clone_from', 'use_auth_token': 'self._token'}), '(local_dir=local_dataset_dir, clone_from=clone_from,\n use_auth_token=self._token)\n', (5923, 6007), False, 'from huggingface_hub import Repository\n'), ((8334, 8369), 'loguru.logger.info', 'logger.info', (['"""🔥🔥 Training started!"""'], {}), "('🔥🔥 Training started!')\n", (8345, 8369), False, 'from loguru import logger\n'), ((6342, 6369), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (6358, 6369), False, 'import os\n'), ((6442, 6471), 'os.path.expanduser', 'os.path.expanduser', (['file_path'], {}), '(file_path)\n', (6460, 6471), False, 'import os\n'), ((6490, 6539), 'os.path.join', 'os.path.join', (['local_dataset_dir', '"""raw"""', 'file_name'], {}), "(local_dataset_dir, 'raw', file_name)\n", (6502, 6539), False, 'import os\n'), ((6698, 6723), 'shutil.copyfile', 'shutil.copyfile', (['src', 'dst'], {}), '(src, dst)\n', (6713, 6723), False, 'import shutil\n'), ((7058, 7112), 'loguru.logger.info', 'logger.info', (['"""☁ Uploading files to the dataset hub..."""'], {}), "('☁ Uploading files to the dataset hub...')\n", (7069, 7112), False, 'from loguru import logger\n'), ((7204, 7254), 'loguru.logger.info', 'logger.info', (['"""✅ Successfully uploaded the files!"""'], {}), "('✅ Successfully uploaded the files!')\n", (7215, 7254), False, 'from loguru import logger\n'), ((7665, 7692), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (7681, 7692), False, 'import os\n'), ((1743, 1790), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['created_at']"], {}), "(json_resp['created_at'])\n", (1765, 1790), False, 'from datetime import datetime\n'), ((1815, 1862), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['updated_at']"], {}), "(json_resp['updated_at'])\n", (1837, 1862), False, 'from datetime import datetime\n'), ((2924, 2971), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['created_at']"], {}), "(json_resp['created_at'])\n", (2946, 2971), False, 'from datetime import datetime\n'), ((2996, 3043), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['updated_at']"], {}), "(json_resp['updated_at'])\n", (3018, 3043), False, 'from datetime import datetime\n'), ((4379, 4426), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['created_at']"], {}), "(json_resp['created_at'])\n", (4401, 4426), False, 'from datetime import datetime\n'), ((4451, 4498), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['updated_at']"], {}), "(json_resp['updated_at'])\n", (4473, 4498), False, 'from datetime import datetime\n'), ((5574, 5612), 'os.path.join', 'os.path.join', (['local_dataset_dir', '"""git"""'], {}), "(local_dataset_dir, 'git')\n", (5586, 5612), False, 'import os\n'), ((5683, 5715), 'shutil.rmtree', 'shutil.rmtree', (['local_dataset_dir'], {}), '(local_dataset_dir)\n', (5696, 5715), False, 'import shutil\n'), ((6155, 6180), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (6169, 6180), False, 'import os\n'), ((6649, 6669), 'os.path.dirname', 'os.path.dirname', (['dst'], {}), '(dst)\n', (6664, 6669), False, 'import os\n'), ((7505, 7569), 'loguru.logger.error', 'logger.error', (['"""❌ Something went wrong when uploading the files!"""'], {}), "('❌ Something went wrong when uploading the files!')\n", (7517, 7569), False, 'from loguru import logger\n'), ((10329, 10394), 'prettytable.PrettyTable', 'PrettyTable', (["['', 'ID', 'Status', 'Creation date', 'Last update']"], {}), "(['', 'ID', 'Status', 'Creation date', 'Last update'])\n", (10340, 10394), False, 'from prettytable import PrettyTable\n'), ((7373, 7429), 'loguru.logger.info', 'logger.info', (['"""❔ Files did not change since last upload!"""'], {}), "('❔ Files did not change since last upload!')\n", (7384, 7429), False, 'from loguru import logger\n')] |
xuantan/viewfinder | backend/services/apns_util.py | 992209086d01be0ef6506f325cf89b84d374f969 | # -*- coding: utf-8 -*-
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Apple Push Notification service utilities.
Original copyright for this code: https://github.com/jayridge/apnstornado
TokenToBinary(): converts a hex-encoded token into a binary value
CreateMessage(): formats a binary APNs message from parameters
ParseResponse(): parses APNs binary response for status & identifier
ErrorStatusToString(): converts error status to error message
"""
__author__ = '[email protected] (Spencer Kimball)'
import base64
import json
import struct
import time
from tornado import escape
_MAX_PAYLOAD_BYTES = 256
"""Maximum number of bytes in the APNS payload."""
_ELLIPSIS_BYTES = escape.utf8(u'…')
"""UTF-8 encoding of the Unicode ellipsis character."""
def TokenToBinary(token):
return base64.b64decode(token)
def TokenFromBinary(bin_token):
return base64.b64encode(bin_token)
def CreateMessage(token, alert=None, badge=None, sound=None,
identifier=0, expiry=None, extra=None, allow_truncate=True):
token = TokenToBinary(token)
if len(token) != 32:
raise ValueError, u'Token must be a 32-byte binary string.'
if (alert is not None) and (not isinstance(alert, (basestring, dict))):
raise ValueError, u'Alert message must be a string or a dictionary.'
if expiry is None:
expiry = long(time.time() + 365 * 86400)
# Start by determining the length of the UTF-8 encoded JSON with no alert text. This allows us to
# determine how much space is left for the message.
# 'content-available': 1 is necessary to trigger iOS 7's background download processing.
aps = { 'alert' : '', 'content-available': 1 }
if badge is not None:
aps['badge'] = badge
if sound is not None:
aps['sound'] = sound
data = { 'aps' : aps }
if extra is not None:
data.update(extra)
# Create compact JSON representation with no extra space and no escaping of non-ascii chars (i.e. use
# direct UTF-8 representation rather than "\u1234" escaping). This maximizes the amount of space that's
# left for the alert text.
encoded = escape.utf8(json.dumps(escape.recursive_unicode(data), separators=(',', ':'), ensure_ascii=False))
bytes_left = _MAX_PAYLOAD_BYTES - len(encoded)
if allow_truncate and isinstance(alert, basestring):
alert = _TruncateAlert(alert, bytes_left)
elif alert and len(escape.utf8(alert)) > bytes_left:
raise ValueError, u'max payload(%d) exceeded: %d' % (_MAX_PAYLOAD_BYTES, len(escape.utf8(alert)))
# Now re-encode including the alert text.
aps['alert'] = alert
encoded = escape.utf8(json.dumps(escape.recursive_unicode(data), separators=(',', ':'), ensure_ascii=False))
length = len(encoded)
assert length <= _MAX_PAYLOAD_BYTES, (encoded, length)
return struct.pack('!bIIH32sH%(length)ds' % { 'length' : length },
1, identifier, expiry,
32, token, length, encoded)
def ParseResponse(bytes):
if len(bytes) != 6:
raise ValueError, u'response must be a 6-byte binary string.'
command, status, identifier = struct.unpack_from('!bbI', bytes, 0)
if command != 8:
raise ValueError, u'response command must equal 8.'
return status, identifier, ErrorStatusToString(status)
def ErrorStatusToString(status):
if status is 0:
return 'No errors encountered'
elif status is 1:
return 'Processing error'
elif status is 2:
return 'Missing device token'
elif status is 3:
return 'Missing topic'
elif status is 4:
return 'Missing payload'
elif status is 5:
return 'Invalid token size'
elif status is 6:
return 'Invalid topic size'
elif status is 7:
return 'Invalid payload size'
elif status is 8:
return 'Invalid token'
elif status is 255:
return 'None (unknown)'
else:
return ''
def _TruncateAlert(alert, max_bytes):
"""Converts the alert text to UTF-8 encoded JSON format, which is how
the alert will be stored in the APNS payload. If the number of
resulting bytes exceeds "max_bytes", then truncates the alert text
at a Unicode character boundary, taking care not to split JSON
escape sequences. Returns the truncated UTF-8 encoded alert text,
including a trailing ellipsis character.
"""
alert_json = escape.utf8(json.dumps(escape.recursive_unicode(alert), ensure_ascii=False))
# Strip quotes added by JSON.
alert_json = alert_json[1:-1]
# Check if alert fits with no truncation.
if len(alert_json) <= max_bytes:
return escape.utf8(alert)
# Make room for an appended ellipsis.
assert max_bytes >= len(_ELLIPSIS_BYTES), 'max_bytes must be at least %d' % len(_ELLIPSIS_BYTES)
max_bytes -= len(_ELLIPSIS_BYTES)
# Truncate the JSON UTF8 string at a Unicode character boundary.
truncated = alert_json[:max_bytes].decode('utf-8', errors='ignore')
# If JSON escape sequences were split, then the truncated string may not be valid JSON. Keep
# chopping trailing characters until the truncated string is valid JSON. It may take several
# tries, such as in the case where a "\u1234" sequence has been split.
while True:
try:
alert = json.loads(u'"%s"' % truncated)
break
except Exception:
truncated = truncated[:-1]
# Return the UTF-8 encoding of the alert with the ellipsis appended to it.
return escape.utf8(alert) + _ELLIPSIS_BYTES
| [] |
jamesellis1999/qml | demonstrations/tutorial_kernels_module.py | 33c9d66712b36861dc098f9c789ba2c3ab897fdb | r"""Training and evaluating quantum kernels
===========================================
.. meta::
:property="og:description": Kernels and alignment training with Pennylane.
:property="og:image": https://pennylane.ai/qml/_images/QEK_thumbnail.png
.. related::
tutorial_kernel_based_training Kernel-based training with scikit-learn
tutorial_data_reuploading_classifier Classification with data reuploading
*Authors: Peter-Jan Derks, Paul Fährmann, Elies Gil-Fuster, Tom
Hubregtsen, Johannes Jakob Meyer and David Wierichs. Posted: 24 June 2021*
Kernel methods are one of the cornerstones of classical machine learning.
Here we are concerned with kernels that can be evaluated on quantum computers,
*quantum kernels* for short.
In this tutorial you will learn how to evaluate kernels, use them for classification
and train them with gradient-based optimization, and all that using the
functionality of PennyLane's
`kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__.
The demo is based on Ref. [#Training_QEKs]_, a project from Xanadu's own
`QHack <https://qhack.ai/>`__ hackathon.
What are kernel methods?
------------------------
To understand what a kernel method does, let's first revisit
one of the simplest methods to assign binary labels to datapoints:
linear classification.
Imagine we want to discern two different classes of points that lie in
different corners of the plane. A linear classifier corresponds to
drawing a line and assigning different labels to the regions on opposing
sides of the line:
.. figure:: ../demonstrations/kernels_module/linear_classification.png
:align: center
:width: 30%
We can mathematically formalize this by assigning the label :math:`y`
via
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \boldsymbol{x}\rangle + b).
The vector :math:`\boldsymbol{w}` points perpendicular to the line and
thus determine its slope. The independent term :math:`b` specifies the
position on the plane. In this form, linear classification can also be
extended to higher dimensional vectors :math:`\boldsymbol{x}`, where a
line does not divide the entire space into two regions anymore. Instead
one needs a *hyperplane*. It is immediately clear that this method is
not very powerful, as datasets that are not separable by a hyperplane
can't be classified without error.
We can actually sneak around this limitation by performing a neat trick:
if we define some map :math:`\phi(\boldsymbol{x})` that *embeds* our
datapoints into a larger *feature space* and then perform linear
classification there, we could actually realise non-linear
classification in our original space!
.. figure:: ../demonstrations/kernels_module/embedding_nonlinear_classification.png
:align: center
:width: 65%
If we go back to the expression for our prediction and include the
embedding, we get
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \phi(\boldsymbol{x})\rangle + b).
We will forgo one tiny step, but it can be shown that for the purpose
of optimal classification, we can choose the vector defining the
decision boundary as a linear combination of the embedded datapoints
:math:`\boldsymbol{w} = \sum_i \alpha_i \phi(\boldsymbol{x}_i)`. Putting
this into the formula yields
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}\left(\sum_i \alpha_i \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x})\rangle + b\right).
This rewriting might not seem useful at first, but notice the above
formula only contains inner products between vectors in the embedding
space:
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x}_j)\rangle.
We call this function the *kernel*. It provides the advantage that we can often
find an explicit formula for the kernel :math:`k` that makes it
superfluous to actually perform the (potentially expensive) embedding
:math:`\phi`. Consider for example the following embedding and the
associated kernel:
.. math::
\phi((x_1, x_2)) &= (x_1^2, \sqrt{2} x_1 x_2, x_2^2) \\
k(\boldsymbol{x}, \boldsymbol{y}) &= x_1^2 y_1^2 + 2 x_1 x_2 y_1 y_2 + x_2^2 y_2^2 = \langle \boldsymbol{x}, \boldsymbol{y} \rangle^2.
This means by just replacing the regular scalar product in our linear
classification with the map :math:`k`, we can actually express much more
intricate decision boundaries!
This is very important, because in many interesting cases the embedding :math:`\phi`
will be much costlier to compute than the kernel :math:`k`.
In this demo, we will explore one particular kind of kernel
that can be realized on near-term quantum computers, namely *Quantum
Embedding Kernels (QEKs)*. These are kernels that arise from embedding
data into the space of quantum states. We formalize this by considering
a parameterised quantum circuit :math:`U(\boldsymbol{x})` that maps
a datapoint :math:`\boldsymbol{x}` to the state
.. math::
|\psi(\boldsymbol{x})\rangle = U(\boldsymbol{x}) |0 \rangle.
The kernel value is then given by the *overlap* of the associated
embedded quantum states
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = | \langle\psi(\boldsymbol{x}_i)|\psi(\boldsymbol{x}_j)\rangle|^2.
"""
##############################################################################
# A toy problem
# -------------
# In this demo, we will treat a toy problem that showcases the
# inner workings of classification with quantum embedding kernels,
# training variational embedding kernels and the available functionalities
# to do both in PennyLane. We of course need to start with some imports:
from pennylane import numpy as np
import matplotlib as mpl
np.random.seed(1359)
##############################################################################
# And we proceed right away to create a dataset to work with, the
# ``DoubleCake`` dataset. Firstly, we define two functions to enable us to
# generate the data.
# The details of these functions are not essential for understanding the demo,
# so don't mind them if they are confusing.
def _make_circular_data(num_sectors):
"""Generate datapoints arranged in an even circle."""
center_indices = np.array(range(0, num_sectors))
sector_angle = 2 * np.pi / num_sectors
angles = (center_indices + 0.5) * sector_angle
x = 0.7 * np.cos(angles)
y = 0.7 * np.sin(angles)
labels = 2 * np.remainder(np.floor_divide(angles, sector_angle), 2) - 1
return x, y, labels
def make_double_cake_data(num_sectors):
x1, y1, labels1 = _make_circular_data(num_sectors)
x2, y2, labels2 = _make_circular_data(num_sectors)
# x and y coordinates of the datapoints
x = np.hstack([x1, 0.5 * x2])
y = np.hstack([y1, 0.5 * y2])
# Canonical form of dataset
X = np.vstack([x, y]).T
labels = np.hstack([labels1, -1 * labels2])
# Canonical form of labels
Y = labels.astype(int)
return X, Y
##############################################################################
# Next, we define a function to help plot the ``DoubleCake`` data:
def plot_double_cake_data(X, Y, ax, num_sectors=None):
"""Plot double cake data and corresponding sectors."""
x, y = X.T
cmap = mpl.colors.ListedColormap(["#FF0000", "#0000FF"])
ax.scatter(x, y, c=Y, cmap=cmap, s=25, marker="s")
if num_sectors is not None:
sector_angle = 360 / num_sectors
for i in range(num_sectors):
color = ["#FF0000", "#0000FF"][(i % 2)]
other_color = ["#FF0000", "#0000FF"][((i + 1) % 2)]
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
1,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=color,
alpha=0.1,
width=0.5,
)
)
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
0.5,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=other_color,
alpha=0.1,
)
)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_aspect("equal")
ax.axis("off")
return ax
##############################################################################
# Let's now have a look at our dataset. In our example, we will work with
# 3 sectors:
import matplotlib.pyplot as plt
num_sectors = 3
X, Y = make_double_cake_data(num_sectors)
ax = plot_double_cake_data(X, Y, plt.gca(), num_sectors=num_sectors)
##############################################################################
# Defining a Quantum Embedding Kernel
# -----------------------------------
# PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__
# allows for a particularly simple
# implementation of Quantum Embedding Kernels. The first ingredient we
# need for this is an *ansatz*, which we will construct by repeating a
# layer as building block. Let's start by defining this layer:
import pennylane as qml
def layer(x, params, wires, i0=0, inc=1):
"""Building block of the embedding ansatz"""
i = i0
for j, wire in enumerate(wires):
qml.Hadamard(wires=[wire])
qml.RZ(x[i % len(x)], wires=[wire])
i += inc
qml.RY(params[0, j], wires=[wire])
qml.broadcast(unitary=qml.CRZ, pattern="ring", wires=wires, parameters=params[1])
##############################################################################
# To construct the ansatz, this layer is repeated multiple times, reusing
# the datapoint ``x`` but feeding different variational
# parameters ``params`` into each of them.
# Together, the datapoint and the variational parameters fully determine
# the embedding ansatz :math:`U(\boldsymbol{x})`.
# In order to construct the full kernel circuit, we also require its adjoint
# :math:`U(\boldsymbol{x})^\dagger`, which we can obtain via ``qml.adjoint``.
def ansatz(x, params, wires):
"""The embedding ansatz"""
for j, layer_params in enumerate(params):
layer(x, layer_params, wires, i0=j * len(wires))
adjoint_ansatz = qml.adjoint(ansatz)
def random_params(num_wires, num_layers):
"""Generate random variational parameters in the shape for the ansatz."""
return np.random.uniform(0, 2 * np.pi, (num_layers, 2, num_wires), requires_grad=True)
##############################################################################
# Together with the ansatz we only need a device to run the quantum circuit on.
# For the purpose of this tutorial we will use PennyLane's ``default.qubit``
# device with 5 wires in analytic mode.
dev = qml.device("default.qubit", wires=5, shots=None)
wires = dev.wires.tolist()
##############################################################################
# Let us now define the quantum circuit that realizes the kernel. We will compute
# the overlap of the quantum states by first applying the embedding of the first
# datapoint and then the adjoint of the embedding of the second datapoint. We
# finally extract the probabilities of observing each basis state.
@qml.qnode(dev)
def kernel_circuit(x1, x2, params):
ansatz(x1, params, wires=wires)
adjoint_ansatz(x2, params, wires=wires)
return qml.probs(wires=wires)
##############################################################################
# The kernel function itself is now obtained by looking at the probability
# of observing the all-zero state at the end of the kernel circuit -- because
# of the ordering in ``qml.probs``, this is the first entry:
def kernel(x1, x2, params):
return kernel_circuit(x1, x2, params)[0]
##############################################################################
#
# .. note::
# An alternative way to set up the kernel circuit in PennyLane would be
# to use the observable type
# `Projector <https://pennylane.readthedocs.io/en/latest/code/api/pennylane.Projector.html>`__.
# This is shown in the
# `demo on kernel-based training of quantum models <https://pennylane.ai/qml/demos/tutorial_kernel_based_training.html>`__, where you will also find more
# background information on the kernel circuit structure itself.
#
# Before focusing on the kernel values we have to provide values for the
# variational parameters. At this point we fix the number of layers in the
# ansatz circuit to :math:`6`.
init_params = random_params(num_wires=5, num_layers=6)
##############################################################################
# Now we can have a look at the kernel value between the first and the
# second datapoint:
kernel_value = kernel(X[0], X[1], init_params)
print(f"The kernel value between the first and second datapoint is {kernel_value:.3f}")
##############################################################################
# The mutual kernel values between all elements of the dataset form the
# *kernel matrix*. We can inspect it via the ``qml.kernels.square_kernel_matrix``
# method, which makes use of symmetry of the kernel,
# :math:`k(\boldsymbol{x}_i,\boldsymbol{x}_j) = k(\boldsymbol{x}_j, \boldsymbol{x}_i)`.
# In addition, the option ``assume_normalized_kernel=True`` ensures that we do not
# calculate the entries between the same datapoints, as we know them to be 1
# for our noiseless simulation. Overall this means that we compute
# :math:`\frac{1}{2}(N^2-N)` kernel values for :math:`N` datapoints.
# To include the variational parameters, we construct a ``lambda`` function that
# fixes them to the values we sampled above.
init_kernel = lambda x1, x2: kernel(x1, x2, init_params)
K_init = qml.kernels.square_kernel_matrix(X, init_kernel, assume_normalized_kernel=True)
with np.printoptions(precision=3, suppress=True):
print(K_init)
##############################################################################
# Using the Quantum Embedding Kernel for predictions
# --------------------------------------------------
# The quantum kernel alone can not be used to make predictions on a
# dataset, becaues it is essentially just a tool to measure the similarity
# between two datapoints. To perform an actual prediction we will make use
# of scikit-learn's Support Vector Classifier (SVC).
from sklearn.svm import SVC
##############################################################################
# To construct the SVM, we need to supply ``sklearn.svm.SVC`` with a function
# that takes two sets of datapoints and returns the associated kernel matrix.
# We can make use of the function ``qml.kernels.kernel_matrix`` that provides
# this functionality. It expects the kernel to not have additional parameters
# besides the datapoints, which is why we again supply the variational
# parameters via the ``lambda`` function from above.
# Once we have this, we can let scikit-learn adjust the SVM from our Quantum
# Embedding Kernel.
#
# .. note::
# This step does *not* modify the variational parameters in our circuit
# ansatz. What it does is solving a different optimization task for the
# :math:`\alpha` and :math:`b` vectors we introduced in the beginning.
svm = SVC(kernel=lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, init_kernel)).fit(X, Y)
##############################################################################
# To see how well our classifier performs we will measure which percentage
# of the dataset it classifies correctly.
def accuracy(classifier, X, Y_target):
return 1 - np.count_nonzero(classifier.predict(X) - Y_target) / len(Y_target)
accuracy_init = accuracy(svm, X, Y)
print(f"The accuracy of the kernel with random parameters is {accuracy_init:.3f}")
##############################################################################
# We are also interested in seeing what the decision boundaries in this
# classification look like. This could help us spotting overfitting issues
# visually in more complex data sets. To this end we will introduce a
# second helper method.
def plot_decision_boundaries(classifier, ax, N_gridpoints=14):
_xx, _yy = np.meshgrid(np.linspace(-1, 1, N_gridpoints), np.linspace(-1, 1, N_gridpoints))
_zz = np.zeros_like(_xx)
for idx in np.ndindex(*_xx.shape):
_zz[idx] = classifier.predict(np.array([_xx[idx], _yy[idx]])[np.newaxis, :])
plot_data = {"_xx": _xx, "_yy": _yy, "_zz": _zz}
ax.contourf(
_xx,
_yy,
_zz,
cmap=mpl.colors.ListedColormap(["#FF0000", "#0000FF"]),
alpha=0.2,
levels=[-1, 0, 1],
)
plot_double_cake_data(X, Y, ax)
return plot_data
##############################################################################
# With that done, let's have a look at the decision boundaries for our
# initial classifier:
init_plot_data = plot_decision_boundaries(svm, plt.gca())
##############################################################################
# We see the outer points in the dataset can be correctly classified, but
# we still struggle with the inner circle. But remember we have a circuit
# with many free parameters! It is reasonable to believe we can give
# values to those variational parameters which improve the overall accuracy
# of our SVC.
#
# Training the Quantum Embedding Kernel
# -------------------------------------
#
# To be able to train the Quantum Embedding Kernel we need some measure of
# how well it fits the dataset in question. Performing an exhaustive
# search in parameter space is not a good solution because it is very
# resource intensive, and since the accuracy is a discrete quantity we
# would not be able to detect small improvements.
#
# We can, however, resort to a more specialized measure, the
# *kernel-target alignment* [#Alignment]_. The kernel-target alignment compares the
# similarity predicted by the quantum kernel to the actual labels of the
# training data. It is based on *kernel alignment*, a similiarity measure
# between two kernels with given kernel matrices :math:`K_1` and
# :math:`K_2`:
#
# .. math::
# \operatorname{KA}(K_1, K_2) = \frac{\operatorname{Tr}(K_1 K_2)}{\sqrt{\operatorname{Tr}(K_1^2)\operatorname{Tr}(K_2^2)}}.
#
# .. note::
# Seen from a more theoretical side, :math:`\operatorname{KA}`
# is nothing else than the cosine of the angle between the kernel
# matrices :math:`K_1` and :math:`K_2` if we see them as vectors
# in the space of matrices with the Hilbert-Schmidt (or
# Frobenius) scalar product
# :math:`\langle A, B \rangle = \operatorname{Tr}(A^T B)`. This
# reinforces the geometric picture of how this measure relates
# to objects, namely two kernels, being aligned in a vector space.
#
# The training data enters the picture by defining an *ideal* kernel
# function that expresses the original labelling in the vector
# :math:`\boldsymbol{y}` by assigning to two datapoints the product
# of the corresponding labels:
#
# .. math::
# k_{\boldsymbol{y}}(\boldsymbol{x}_i, \boldsymbol{x}_j) = y_i y_j.
#
# The assigned kernel is thus :math:`+1` if both datapoints lie in the
# same class and :math:`-1` otherwise and its kernel matrix is simply
# given by the outer product :math:`\boldsymbol{y}\boldsymbol{y}^T`.
# The kernel-target alignment is then defined as the kernel alignment
# of the kernel matrix :math:`K` generated by the
# quantum kernel and :math:`\boldsymbol{y}\boldsymbol{y}^T`:
#
# .. math::
# \operatorname{KTA}_{\boldsymbol{y}}(K)
# = \frac{\operatorname{Tr}(K \boldsymbol{y}\boldsymbol{y}^T)}{\sqrt{\operatorname{Tr}(K^2)\operatorname{Tr}((\boldsymbol{y}\boldsymbol{y}^T)^2)}}
# = \frac{\boldsymbol{y}^T K \boldsymbol{y}}{\sqrt{\operatorname{Tr}(K^2)} N}
#
# where :math:`N` is the number of elements in :math:`\boldsymbol{y}`,
# that is the number of datapoints in the dataset.
#
# In summary, the kernel-target alignment effectively captures how well
# the kernel you chose reproduces the actual similarities of the data. It
# does have one drawback, however: having a high kernel-target alignment
# is only a necessary but not a sufficient condition for a good
# performance of the kernel [#Alignment]_. This means having good alignment is
# guaranteed for good performance, but optimal alignment will not always
# bring optimal training accuracy with it.
#
# Let's now come back to the actual implementation. PennyLane's
# ``kernels`` module allows you to easily evaluate the kernel
# target alignment:
kta_init = qml.kernels.target_alignment(X, Y, init_kernel, assume_normalized_kernel=True)
print(f"The kernel-target alignment for our dataset and random parameters is {kta_init:.3f}")
##############################################################################
# Now let's code up an optimization loop and improve the kernel-target alignment!
#
# We will make use of regular gradient descent optimization. To speed up
# the optimization we will not use the entire training set to compute
# :math:`\operatorname{KTA}` but rather
# sample smaller subsets of the data at each step, we choose :math:`4`
# datapoints at random. Remember that PennyLane's built-in optimizer works
# to *minimize* the cost function that is given to it, which is why we
# have to multiply the kernel target alignment by :math:`-1` to actually
# *maximize* it in the process.
#
# .. note::
# Currently, the function ``qml.kernels.target_alignment`` is not
# differentiable yet, making it unfit for gradient descent optimization.
# We therefore first define a differentiable version of this function.
def target_alignment(
X,
Y,
kernel,
assume_normalized_kernel=False,
rescale_class_labels=True,
):
"""Kernel-target alignment between kernel and labels."""
K = qml.kernels.square_kernel_matrix(
X,
kernel,
assume_normalized_kernel=assume_normalized_kernel,
)
if rescale_class_labels:
nplus = np.count_nonzero(np.array(Y) == 1)
nminus = len(Y) - nplus
_Y = np.array([y / nplus if y == 1 else y / nminus for y in Y])
else:
_Y = np.array(Y)
T = np.outer(_Y, _Y)
inner_product = np.sum(K * T)
norm = np.sqrt(np.sum(K * K) * np.sum(T * T))
inner_product = inner_product / norm
return inner_product
params = init_params
opt = qml.GradientDescentOptimizer(0.2)
for i in range(500):
# Choose subset of datapoints to compute the KTA on.
subset = np.random.choice(list(range(len(X))), 4)
# Define the cost function for optimization
cost = lambda _params: -target_alignment(
X[subset],
Y[subset],
lambda x1, x2: kernel(x1, x2, _params),
assume_normalized_kernel=True,
)
# Optimization step
params = opt.step(cost, params)
# Report the alignment on the full dataset every 50 steps.
if (i + 1) % 50 == 0:
current_alignment = target_alignment(
X,
Y,
lambda x1, x2: kernel(x1, x2, params),
assume_normalized_kernel=True,
)
print(f"Step {i+1} - Alignment = {current_alignment:.3f}")
##############################################################################
# We want to assess the impact of training the parameters of the quantum
# kernel. Thus, let's build a second support vector classifier with the
# trained kernel:
# First create a kernel with the trained parameter baked into it.
trained_kernel = lambda x1, x2: kernel(x1, x2, params)
# Second create a kernel matrix function using the trained kernel.
trained_kernel_matrix = lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, trained_kernel)
# Note that SVC expects the kernel argument to be a kernel matrix function.
svm_trained = SVC(kernel=trained_kernel_matrix).fit(X, Y)
##############################################################################
# We expect to see an accuracy improvement vs. the SVM with random
# parameters:
accuracy_trained = accuracy(svm_trained, X, Y)
print(f"The accuracy of a kernel with trained parameters is {accuracy_trained:.3f}")
##############################################################################
# We have now achieved perfect classification! 🎆
#
# Following on the results that SVM's have proven good generalisation
# behavior, it will be interesting to inspect the decision boundaries of
# our classifier:
trained_plot_data = plot_decision_boundaries(svm_trained, plt.gca())
##############################################################################
# Indeed, we see that now not only every data instance falls within the
# correct class, but also that there are no strong artifacts that would make us
# distrust the model. In this sense, our approach benefits from both: on
# one hand it can adjust itself to the dataset, and on the other hand
# is not expected to suffer from bad generalisation.
#
# References
# ----------
#
# .. [#Training_QEKs]
#
# Thomas Hubregtsen, David Wierichs, Elies Gil-Fuster, Peter-Jan H. S. Derks,
# Paul K. Faehrmann, and Johannes Jakob Meyer.
# "Training Quantum Embedding Kernels on Near-Term Quantum Computers."
# `arXiv:2105.02276 <https://arxiv.org/abs/2105.02276>`__, 2021.
#
# .. [#Alignment]
#
# Wang, Tinghua, Dongyan Zhao, and Shengfeng Tian.
# "An overview of kernel alignment and its applications."
# `Artificial Intelligence Review 43.2: 179-192 <https://link.springer.com/article/10.1007/s10462-012-9369-4>`__, 2015.
| [((5681, 5701), 'pennylane.numpy.random.seed', 'np.random.seed', (['(1359)'], {}), '(1359)\n', (5695, 5701), True, 'from pennylane import numpy as np\n'), ((10266, 10285), 'pennylane.adjoint', 'qml.adjoint', (['ansatz'], {}), '(ansatz)\n', (10277, 10285), True, 'import pennylane as qml\n'), ((10784, 10832), 'pennylane.device', 'qml.device', (['"""default.qubit"""'], {'wires': '(5)', 'shots': 'None'}), "('default.qubit', wires=5, shots=None)\n", (10794, 10832), True, 'import pennylane as qml\n'), ((11251, 11265), 'pennylane.qnode', 'qml.qnode', (['dev'], {}), '(dev)\n', (11260, 11265), True, 'import pennylane as qml\n'), ((13752, 13831), 'pennylane.kernels.square_kernel_matrix', 'qml.kernels.square_kernel_matrix', (['X', 'init_kernel'], {'assume_normalized_kernel': '(True)'}), '(X, init_kernel, assume_normalized_kernel=True)\n', (13784, 13831), True, 'import pennylane as qml\n'), ((20540, 20618), 'pennylane.kernels.target_alignment', 'qml.kernels.target_alignment', (['X', 'Y', 'init_kernel'], {'assume_normalized_kernel': '(True)'}), '(X, Y, init_kernel, assume_normalized_kernel=True)\n', (20568, 20618), True, 'import pennylane as qml\n'), ((22361, 22394), 'pennylane.GradientDescentOptimizer', 'qml.GradientDescentOptimizer', (['(0.2)'], {}), '(0.2)\n', (22389, 22394), True, 'import pennylane as qml\n'), ((6676, 6701), 'pennylane.numpy.hstack', 'np.hstack', (['[x1, 0.5 * x2]'], {}), '([x1, 0.5 * x2])\n', (6685, 6701), True, 'from pennylane import numpy as np\n'), ((6710, 6735), 'pennylane.numpy.hstack', 'np.hstack', (['[y1, 0.5 * y2]'], {}), '([y1, 0.5 * y2])\n', (6719, 6735), True, 'from pennylane import numpy as np\n'), ((6811, 6845), 'pennylane.numpy.hstack', 'np.hstack', (['[labels1, -1 * labels2]'], {}), '([labels1, -1 * labels2])\n', (6820, 6845), True, 'from pennylane import numpy as np\n'), ((7210, 7259), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (["['#FF0000', '#0000FF']"], {}), "(['#FF0000', '#0000FF'])\n", (7235, 7259), True, 'import matplotlib as mpl\n'), ((8625, 8634), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8632, 8634), True, 'import matplotlib.pyplot as plt\n'), ((9467, 9553), 'pennylane.broadcast', 'qml.broadcast', ([], {'unitary': 'qml.CRZ', 'pattern': '"""ring"""', 'wires': 'wires', 'parameters': 'params[1]'}), "(unitary=qml.CRZ, pattern='ring', wires=wires, parameters=\n params[1])\n", (9480, 9553), True, 'import pennylane as qml\n'), ((10419, 10498), 'pennylane.numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', '(num_layers, 2, num_wires)'], {'requires_grad': '(True)'}), '(0, 2 * np.pi, (num_layers, 2, num_wires), requires_grad=True)\n', (10436, 10498), True, 'from pennylane import numpy as np\n'), ((11393, 11415), 'pennylane.probs', 'qml.probs', ([], {'wires': 'wires'}), '(wires=wires)\n', (11402, 11415), True, 'import pennylane as qml\n'), ((13838, 13881), 'pennylane.numpy.printoptions', 'np.printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (13853, 13881), True, 'from pennylane import numpy as np\n'), ((16265, 16283), 'pennylane.numpy.zeros_like', 'np.zeros_like', (['_xx'], {}), '(_xx)\n', (16278, 16283), True, 'from pennylane import numpy as np\n'), ((16299, 16321), 'pennylane.numpy.ndindex', 'np.ndindex', (['*_xx.shape'], {}), '(*_xx.shape)\n', (16309, 16321), True, 'from pennylane import numpy as np\n'), ((16914, 16923), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (16921, 16923), True, 'import matplotlib.pyplot as plt\n'), ((21809, 21908), 'pennylane.kernels.square_kernel_matrix', 'qml.kernels.square_kernel_matrix', (['X', 'kernel'], {'assume_normalized_kernel': 'assume_normalized_kernel'}), '(X, kernel, assume_normalized_kernel=\n assume_normalized_kernel)\n', (21841, 21908), True, 'import pennylane as qml\n'), ((22164, 22180), 'pennylane.numpy.outer', 'np.outer', (['_Y', '_Y'], {}), '(_Y, _Y)\n', (22172, 22180), True, 'from pennylane import numpy as np\n'), ((22201, 22214), 'pennylane.numpy.sum', 'np.sum', (['(K * T)'], {}), '(K * T)\n', (22207, 22214), True, 'from pennylane import numpy as np\n'), ((23622, 23671), 'pennylane.kernels.kernel_matrix', 'qml.kernels.kernel_matrix', (['X1', 'X2', 'trained_kernel'], {}), '(X1, X2, trained_kernel)\n', (23647, 23671), True, 'import pennylane as qml\n'), ((24452, 24461), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24459, 24461), True, 'import matplotlib.pyplot as plt\n'), ((6326, 6340), 'pennylane.numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (6332, 6340), True, 'from pennylane import numpy as np\n'), ((6355, 6369), 'pennylane.numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (6361, 6369), True, 'from pennylane import numpy as np\n'), ((6777, 6794), 'pennylane.numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (6786, 6794), True, 'from pennylane import numpy as np\n'), ((9331, 9357), 'pennylane.Hadamard', 'qml.Hadamard', ([], {'wires': '[wire]'}), '(wires=[wire])\n', (9343, 9357), True, 'import pennylane as qml\n'), ((9427, 9461), 'pennylane.RY', 'qml.RY', (['params[0, j]'], {'wires': '[wire]'}), '(params[0, j], wires=[wire])\n', (9433, 9461), True, 'import pennylane as qml\n'), ((16186, 16218), 'pennylane.numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'N_gridpoints'], {}), '(-1, 1, N_gridpoints)\n', (16197, 16218), True, 'from pennylane import numpy as np\n'), ((16220, 16252), 'pennylane.numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'N_gridpoints'], {}), '(-1, 1, N_gridpoints)\n', (16231, 16252), True, 'from pennylane import numpy as np\n'), ((22061, 22121), 'pennylane.numpy.array', 'np.array', (['[(y / nplus if y == 1 else y / nminus) for y in Y]'], {}), '([(y / nplus if y == 1 else y / nminus) for y in Y])\n', (22069, 22121), True, 'from pennylane import numpy as np\n'), ((22143, 22154), 'pennylane.numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (22151, 22154), True, 'from pennylane import numpy as np\n'), ((23763, 23796), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': 'trained_kernel_matrix'}), '(kernel=trained_kernel_matrix)\n', (23766, 23796), False, 'from sklearn.svm import SVC\n'), ((16531, 16580), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (["['#FF0000', '#0000FF']"], {}), "(['#FF0000', '#0000FF'])\n", (16556, 16580), True, 'import matplotlib as mpl\n'), ((22234, 22247), 'pennylane.numpy.sum', 'np.sum', (['(K * K)'], {}), '(K * K)\n', (22240, 22247), True, 'from pennylane import numpy as np\n'), ((22250, 22263), 'pennylane.numpy.sum', 'np.sum', (['(T * T)'], {}), '(T * T)\n', (22256, 22263), True, 'from pennylane import numpy as np\n'), ((6400, 6437), 'pennylane.numpy.floor_divide', 'np.floor_divide', (['angles', 'sector_angle'], {}), '(angles, sector_angle)\n', (6415, 6437), True, 'from pennylane import numpy as np\n'), ((7585, 7700), 'matplotlib.patches.Wedge', 'mpl.patches.Wedge', (['(0, 0)', '(1)', '(i * sector_angle)', '((i + 1) * sector_angle)'], {'lw': '(0)', 'color': 'color', 'alpha': '(0.1)', 'width': '(0.5)'}), '((0, 0), 1, i * sector_angle, (i + 1) * sector_angle, lw=0,\n color=color, alpha=0.1, width=0.5)\n', (7602, 7700), True, 'import matplotlib as mpl\n'), ((7933, 8046), 'matplotlib.patches.Wedge', 'mpl.patches.Wedge', (['(0, 0)', '(0.5)', '(i * sector_angle)', '((i + 1) * sector_angle)'], {'lw': '(0)', 'color': 'other_color', 'alpha': '(0.1)'}), '((0, 0), 0.5, i * sector_angle, (i + 1) * sector_angle, lw\n =0, color=other_color, alpha=0.1)\n', (7950, 8046), True, 'import matplotlib as mpl\n'), ((16361, 16391), 'pennylane.numpy.array', 'np.array', (['[_xx[idx], _yy[idx]]'], {}), '([_xx[idx], _yy[idx]])\n', (16369, 16391), True, 'from pennylane import numpy as np\n'), ((21998, 22009), 'pennylane.numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (22006, 22009), True, 'from pennylane import numpy as np\n'), ((15274, 15320), 'pennylane.kernels.kernel_matrix', 'qml.kernels.kernel_matrix', (['X1', 'X2', 'init_kernel'], {}), '(X1, X2, init_kernel)\n', (15299, 15320), True, 'import pennylane as qml\n')] |
scottkaz/PyLoopover | main.py | 8f11f559c09747400fe6bb520ab521dbafa90e97 | #!/usr/bin/python3
import pygame
import random
import time
##VARIABLES TO CHANGE
width = 500
height = 500
stats_height = 150
board_size = 5
window_name = "PyLoopover "+str(board_size)+"x"+str(board_size)
scramble_turns = 50
t_round = 3
FPS = 30
##DONT CHANGE THESE BOIS
WHITE = (255,255,255)
BLACK = (0,0,0)
GREEN = (32,200,32)
keys = {"w":0,"a":0,"s":0,"d":0,"q":0}
last_was_Q = False
class Tile:
def __init__(self,number,s):
self.number = number
n = number-1
self.color = ((n/s)*(255/s),(n%s)*(255/s),128)
def draw(self,screen,font,x,y,width,height):
pygame.draw.rect(screen,self.color,(x,y,width,height))
text = font.render(str(self.number),True,BLACK)
screen.blit(text,(x,y))
class Board:
content = []
start_t=0
end_t=0
game=False
moves = 0
def __init__(self,size):
self.size = size
for i in range(0,size):
self.content.append([])
for j in range(0,size):
self.content[i].append(None)
self.content[i][j] = Tile(i+j*size+1,size)
def rotate_left(self,y):
new = []
for i in range(0,self.size):
new.append(self.content[(i-1)%self.size][y])
for i in range(0,self.size):
self.content[i][y] = new[i]
self.moves+=1
return new
def rotate_right(self,y):
new = []
for i in range(0,self.size):
new.append(self.content[(i+1)%self.size][y])
for i in range(0,self.size):
self.content[i][y] = new[i]
self.moves+=1
return new
def rotate_down(self,x):
new = []
for i in range(0,self.size):
new.append(self.content[x][(i-1)%self.size])
for i in range(0,self.size):
self.content[x][i] = new[i]
self.moves+=1
return new
def rotate_up(self,x):
new = []
for i in range(0,self.size):
new.append(self.content[x][(i+1)%self.size])
for i in range(0,self.size):
self.content[x][i] = new[i]
self.moves+=1
return new
def draw(self,screen,font):
for i in range(0,self.size):
for j in range(0,self.size):
w = (width / self.size)
h = (height / self.size)
x = i * w
y = j * h
self.content[i][j].draw(screen,font,x,y,w,h)
def scramble(self,n):
for i in range(0,n):
o = random.randint(0,3)
if o == 0:
self.rotate_left(random.randint(0,board_size-1))
elif o == 1:
self.rotate_right(random.randint(0,board_size-1))
elif o == 2:
self.rotate_up(random.randint(0,board_size-1))
else:
self.rotate_down(random.randint(0,board_size-1))
self.game=False
self.moves=0
return True
def is_solved(self):
for i in range(0,self.size):
for j in range(0,self.size):
if self.content[i][j].number != i+j*self.size+1:
return False
return True
def start_time(self):
print("time has started")
self.start_t = time.monotonic()
self.game = True
return self.start_time
def end_time(self):
print("time has ended")
self.end_t = time.monotonic()
return self.end_time
def get_time(self):
if (not self.is_solved()) and self.game:
return (time.monotonic() - self.start_t , BLACK)
elif self.is_solved() and self.game:
return (self.end_t - self.start_t , GREEN)
else:
return (0 , BLACK)
def main():
gameboard = Board(board_size)
pygame.init()
pygame.mixer.quit() #weird workaroud
#name the window & size it.
pygame.display.set_caption(window_name)
screen = pygame.display.set_mode((width,height+stats_height),0,32)
#setup framerate
pygame.time.set_timer(pygame.USEREVENT+1,int((1/FPS)*1000))
#setup event que
pygame.event.set_allowed(None) #start with no events allowed
pygame.event.set_allowed(pygame.USEREVENT+1) #timer event
pygame.event.set_allowed(pygame.KEYDOWN)
pygame.event.set_allowed(pygame.QUIT) #4 quitters
#setup fonts
font = pygame.font.SysFont('mono',int((width/board_size)/1.14))
font2 = pygame.font.SysFont('mono',int(stats_height/2.3))
#main l00p
running = True
while running:
#eevveeentttss???
event = pygame.event.wait()
if event.type == pygame.USEREVENT+1:
#a fresh canvas
screen.fill(WHITE)
#draw stats
time = gameboard.get_time()
time_str = str( int( time[0] * (10 ** t_round) ) / (10 ** t_round) )
text_timer = font2.render("Time :"+time_str,True,time[1])
text_moves = font2.render("Moves:"+str(gameboard.moves),True,time[1])
screen.blit(text_timer,(0,height))
screen.blit(text_moves,(0,height+(stats_height/2)))
#draw board
gameboard.draw(screen,font)
#update da screeeeeen
pygame.display.update()
#end the game
if gameboard.is_solved() and gameboard.start_t > gameboard.end_t:
gameboard.end_time()
elif event.type == pygame.KEYDOWN:
k = chr(event.key) #gimme a CHAR, not some weird integer
domap = {
"w":"gameboard.rotate_up(int(pygame.mouse.get_pos()[0]/(width/board_size)))",
"a":"gameboard.rotate_right(int(pygame.mouse.get_pos()[1]/(height/board_size)))",
"s":"gameboard.rotate_down(int(pygame.mouse.get_pos()[0]/(width/board_size)))",
"d":"gameboard.rotate_left(int(pygame.mouse.get_pos()[1]/(height/board_size)))",
"q":"gameboard.scramble(scramble_turns)"
} #i guess?
if k in ['w','a','s','d','q']:
#starting game logic
if k == "q":
last_was_Q = True
else:
if last_was_Q:
gameboard.start_time()
last_was_Q = False
exec(domap[k])
#end the game
if gameboard.is_solved() and gameboard.start_t > gameboard.end_t:
gameboard.end_time()
#for quitters
elif event.type == pygame.QUIT:
print("Quitting...")
running = False
else:
print("err0r, bAd 3v3nt lol")
assert False
if __name__ == "__main__":
main()
| [((3090, 3103), 'pygame.init', 'pygame.init', ([], {}), '()\n', (3101, 3103), False, 'import pygame\n'), ((3105, 3124), 'pygame.mixer.quit', 'pygame.mixer.quit', ([], {}), '()\n', (3122, 3124), False, 'import pygame\n'), ((3172, 3211), 'pygame.display.set_caption', 'pygame.display.set_caption', (['window_name'], {}), '(window_name)\n', (3198, 3211), False, 'import pygame\n'), ((3222, 3284), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height + stats_height)', '(0)', '(32)'], {}), '((width, height + stats_height), 0, 32)\n', (3245, 3284), False, 'import pygame\n'), ((3378, 3408), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['None'], {}), '(None)\n', (3402, 3408), False, 'import pygame\n'), ((3440, 3486), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['(pygame.USEREVENT + 1)'], {}), '(pygame.USEREVENT + 1)\n', (3464, 3486), False, 'import pygame\n'), ((3499, 3539), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['pygame.KEYDOWN'], {}), '(pygame.KEYDOWN)\n', (3523, 3539), False, 'import pygame\n'), ((3541, 3578), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['pygame.QUIT'], {}), '(pygame.QUIT)\n', (3565, 3578), False, 'import pygame\n'), ((566, 625), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self.color', '(x, y, width, height)'], {}), '(screen, self.color, (x, y, width, height))\n', (582, 625), False, 'import pygame\n'), ((2651, 2667), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2665, 2667), False, 'import time\n'), ((2774, 2790), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2788, 2790), False, 'import time\n'), ((3803, 3822), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (3820, 3822), False, 'import pygame\n'), ((2080, 2100), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (2094, 2100), False, 'import random\n'), ((4324, 4347), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4345, 4347), False, 'import pygame\n'), ((2135, 2168), 'random.randint', 'random.randint', (['(0)', '(board_size - 1)'], {}), '(0, board_size - 1)\n', (2149, 2168), False, 'import random\n'), ((2889, 2905), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2903, 2905), False, 'import time\n'), ((2205, 2238), 'random.randint', 'random.randint', (['(0)', '(board_size - 1)'], {}), '(0, board_size - 1)\n', (2219, 2238), False, 'import random\n'), ((2272, 2305), 'random.randint', 'random.randint', (['(0)', '(board_size - 1)'], {}), '(0, board_size - 1)\n', (2286, 2305), False, 'import random\n'), ((2334, 2367), 'random.randint', 'random.randint', (['(0)', '(board_size - 1)'], {}), '(0, board_size - 1)\n', (2348, 2367), False, 'import random\n')] |
yoojunwoong/python_review01 | test3_05.py | 9bb34f4ef75f951cd090fa623728c9542e7c7c27 | # for문에서 continue 사용하기, continue = skip개념!!!
for i in range(1,11):
if i == 6:
continue;
print(i);
print(i);
print(i);
print(i);
print(i);
| [] |
anushreejangid/csmpe-main | csmpe/core_plugins/csm_install_operations/exr/package_lib.py | c62ecb3ce4e44b188ed480d06a6d9d21967c6a2a | # =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
"""
NCS4K
Production Packages
External Names Internal Names
ncs4k-full-x.iso-6.0.2
ncs4k-mini-x.iso-6.0.2
ncs4k-k9sec.pkg-6.0.2
ncs4k-mpls.pkg-6.0.2
ncs4k-mcast.pkg-6.0.2
ncs4k-mgbl.pkg-6.0.2
NCS6K
Production Packages
External Names Internal Names
ncs6k-doc.pkg-5.2.4 ncs6k-doc-5.2.4
ncs6k-li.pkg-5.2.4 ncs6k-li-5.2.4
ncs6k-mcast.pkg-5.2.4 ncs6k-mcast-5.2.4
ncs6k-mgbl.pkg-5.2.4 ncs6k-mgbl-5.2.4
ncs6k-mini-x.iso-5.2.4 ncs6k-mini-x-5.2.4
ncs6k-mpls.pkg-5.2.4 ncs6k-mpls-5.2.4
ncs6k-sysadmin.iso-5.2.4 ncs6k-sysadmin-5.2.4
ncs6k-full-x.iso-5.2.4 ncs6k-full-x-5.2.4
ncs6k-5.2.5.CSCuy47880.smu ncs6k-5.2.5.CSCuy47880-1.0.0 <- subversion added
Engineering Packages
External Names Internal Names
ncs6k-mcast.pkg-5.2.5.47I.DT_IMAGE ncs6k-mcast-5.2.5.47I
ncs6k-mini-x.iso-6.1.0.07I.DT_IMAGE ncs6k-xr-5.2.5.47I
ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i.smu ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i
ASR9K-64
Production Packages - not finalized yet
External Names Internal Names
asr9k-mcast-x64-2.0.0.0-r611.x86_64.rpm asr9k-mcast-x64-2.0.0.0-r611
asr9k-bgp-x64-1.0.0.0-r611.x86_64.rpm asr9k-bgp-x64-1.0.0.0-r611
asr9k-mgbl-x64-3.0.0.0-r611.x86_64.rpm asr9k-mgbl-x64-3.0.0.0-r611
asr9k-full-x64.iso-6.1.1 asr9k-xr-6.1.1
asr9k-mini-x64.iso-6.1.1 asr9k-xr-6.1.1
Engineering Packages
External Names Internal Names
asr9k-mcast-x64-2.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-mcast-x64-2.0.0.0-r61116I
asr9k-bgp-x64-1.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-bgp-x64-1.0.0.0-r61116I
asr9k-mgbl-x64-3.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-mgbl-x64-3.0.0.0-r61116I
asr9k-full-x64.iso-6.1.1.16I.DT_IMAGE asr9k-full-x64-6.1.1.16I
asr9k-mini-x64.iso-6.1.1.16I.DT_IMAGE asr9k-mini-x64-6.1.1.16I
NCS5K
Production Packages
External Names Internal Names
ncs5k-sysadmin.iso-6.0.1 ncs5k-sysadmin-6.0.1
ncs5k-full-x.iso-6.0.1 ncs5k-xr-6.0.1
ncs5k-mini-x.iso-6.0.1 ncs5k-xr-6.0.1
ncs5k-mcast-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mcast-2.0.0.0-r601
ncs5k-mgbl-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mgbl-2.0.0.0-r601
ncs5k-mpls-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mpls-2.0.0.0-r601
ncs5k-k9sec-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-k9sec-2.0.0.0-r601
ncs5k-isis-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-isis-2.0.0.0-r601
ncs5k-ospf-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-ospf-2.0.0.0-r601
Engineering Packages
External Names Internal Names
ncs5k-mgbl-x64-3.0.0.0-r61116I.x86_64.rpm-6.0.1.16I.DT_IMAGE ncs5k-mgbl-3.0.0.0-r60116I
ncs5k-sysadmin.iso-6.0.1 ncs5k-sysadmin-6.0.1.26I
ncs5k-full-x.iso-6.0.1.16I.DT_IMAGE ncs5k-xr-6.0.1.16I
NCS5500
Production Packages
External Names Internal Names
ncs5500-eigrp-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-eigrp-2.0.0.0-r601
ncs5500-isis-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-isis-2.0.0.0-r601
ncs5500-k9sec-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-k9sec-2.0.0.0-r601
ncs5500-m2m-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-m2m-2.0.0.0-r601
ncs5500-mgbl-3.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mgbl-3.0.0.0-r601
ncs5500-mini-x.iso-6.0.1 ncs5500-xr-6.0.1
ncs5500-mpls-te-rsvp-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mpls-te-rsvp-2.0.0.0-r601
ncs5500-mpls-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mpls-2.0.0.0-r601
ncs5500-ospf-1.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-ospf-1.0.0.0-r601
ncs5500-parser-1.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-parser-1.0.0.0-r601
"""
import re
platforms = ['asr9k', 'ncs1k', 'ncs4k', 'ncs5k', 'ncs5500', 'ncs6k', 'xrv9k']
version_dict = {"asr9k ncs1k ncs5k ncs5500 xrv9k": # 61117I or 611 or 6.1.1.17I or 6.1.1
re.compile("(?P<VERSION>(\d+\d+\d+(\d+\w+)?)|(\d+\.\d+\.\d+(\.\d+\w+)?)(?!\.\d)(?!-))"),
"ncs4k ncs6k": # 5.2.4 or 5.2.4.47I
re.compile("(?P<VERSION>\d+\.\d+\.\d+(\.\d+\w+)?)"),
}
smu_re = re.compile("(?P<SMU>CSC[a-z]{2}\d{5})")
subversion_dict = {"asr9k ncs1k ncs5k ncs5500 xrv9k":
re.compile("-(?P<SUBVERSION>\d+\.\d+\.\d+\.\d+)-"), # 2.0.0.0
"ncs4k ncs6k":
re.compile("CSC.*(?P<SUBVERSION>\d+\.\d+\.\d+?)"), # 0.0.4
}
class SoftwarePackage(object):
def __init__(self, package_name):
self.package_name = package_name
self._platform = None
self._package_type = None
self._version = None
self._smu = None
self._subversion = None
@property
def platform(self):
if not self._platform:
for platform in platforms:
if platform + "-" in self.package_name:
self._platform = platform
break
return self._platform
@property
def package_type(self):
if not self._package_type:
# For ASR9K-X64, NCS1K, NCS5K, NCS5500:
# Extract the package type string before X.X.X.X
# For NCS6K
# Extract the package type string before X.X.X
pattern = '-\d+\.\d+\.\d+' if self.platform == 'ncs6k' or \
self.platform == 'ncs4k' else '-\d\.\d\.\d.\d'
if self.platform and self.platform in self.package_name:
match = re.search(pattern, self.package_name)
# Special handling for mini, full, and sysadmin ISO on ASR9K-X64, NCS1K, NCS5K, NCS5500
# Example: ncs5500-mini-x.iso-6.0.1, asr9k-full-x64.iso-6.1.1
# Package type string is before the 3 part version string
# External Name: ncs5k-goldenk9-x.iso-6.3.1.11I.0, Internal Name: ncs5k-goldenk9-x-6.3.1.11I
if not match and sum([x in self.package_name for x in ['full', 'mini', 'sysadmin', 'goldenk9']]) > 0:
# Use the three part match for these ISO packages
match = re.search('-\d+\.\d+\.\d+', self.package_name)
if match:
# Extract the package type
self._package_type = self.package_name[0:match.start()].replace(self.platform + '-', '')
if self._package_type:
# Takes care the external to internal name matching
# Example, ncs6k-mgbl.pkg-5.2.5 -> mgbl, ncs5500-mini-x.iso-6.0.1 -> mini-x
self._package_type = self._package_type.replace('.pkg', '').replace('.iso', '')
return self._package_type
@property
def version(self):
if not self._version:
dict_values = self.get_values(version_dict, self.platform)
if self.platform and dict_values:
to_match = self.package_name.replace(self.platform, '')
result = re.search(dict_values, to_match)
if result:
self._version = result.group("VERSION")
return self._version
@property
def smu(self):
if not self._smu:
result = re.search(smu_re, self.package_name)
if result:
self._smu = result.group("SMU")
return self._smu
@property
def subversion(self):
if not self._subversion:
dict_values = self.get_values(subversion_dict, self.platform)
if self.platform and dict_values:
# For NCS6K, only need to consider subversion if it is a SMU.
if self.platform in ["asr9k", "ncs1k", "ncs5k", "ncs5500", "xrv9k"] or self.smu:
to_match = self.package_name.replace(self.platform, '')
result = re.search(dict_values, to_match)
if result:
self._subversion = result.group("SUBVERSION")
return self._subversion
def get_values(self, dictionary, key):
for keys in dictionary.keys():
if key in keys.split():
return dictionary.get(keys)
return None
def is_valid(self):
return self.platform and self.version and (self.package_type or self.smu)
def __eq__(self, other):
result = self.platform == other.platform and \
(self.package_type == other.package_type) and \
self.version == other.version and \
self.smu == other.smu and \
(self.subversion == other.subversion if self.subversion and other.subversion else True)
return result
def __hash__(self):
return hash("{}{}{}{}{}".format(
self.platform, self.package_type, self.version, self.smu, self.subversion))
@staticmethod
def from_show_cmd(cmd):
software_packages = set()
data = cmd.split()
for line in data:
software_package = SoftwarePackage(line)
if software_package.is_valid():
software_packages.add(software_package)
return software_packages
@staticmethod
def from_package_list(pkg_list):
software_packages = set()
for pkg in pkg_list:
software_package = SoftwarePackage(pkg)
if software_package.is_valid():
""" for debugging
print('package_name', software_package.package_name,
'platform', software_package.platform, 'package_type', software_package.package_type,
'version', software_package.version, 'smu', software_package.smu,
'subversion', software_package.subversion)
"""
software_packages.add(software_package)
return software_packages
def __repr__(self):
return self.package_name
def __str__(self):
return self.__repr__()
| [((6286, 6326), 're.compile', 're.compile', (['"""(?P<SMU>CSC[a-z]{2}\\\\d{5})"""'], {}), "('(?P<SMU>CSC[a-z]{2}\\\\d{5})')\n", (6296, 6326), False, 'import re\n'), ((6027, 6139), 're.compile', 're.compile', (['"""(?P<VERSION>(\\\\d+\\\\d+\\\\d+(\\\\d+\\\\w+)?)|(\\\\d+\\\\.\\\\d+\\\\.\\\\d+(\\\\.\\\\d+\\\\w+)?)(?!\\\\.\\\\d)(?!-))"""'], {}), "(\n '(?P<VERSION>(\\\\d+\\\\d+\\\\d+(\\\\d+\\\\w+)?)|(\\\\d+\\\\.\\\\d+\\\\.\\\\d+(\\\\.\\\\d+\\\\w+)?)(?!\\\\.\\\\d)(?!-))'\n )\n", (6037, 6139), False, 'import re\n'), ((6205, 6264), 're.compile', 're.compile', (['"""(?P<VERSION>\\\\d+\\\\.\\\\d+\\\\.\\\\d+(\\\\.\\\\d+\\\\w+)?)"""'], {}), "('(?P<VERSION>\\\\d+\\\\.\\\\d+\\\\.\\\\d+(\\\\.\\\\d+\\\\w+)?)')\n", (6215, 6264), False, 'import re\n'), ((6400, 6457), 're.compile', 're.compile', (['"""-(?P<SUBVERSION>\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+)-"""'], {}), "('-(?P<SUBVERSION>\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+)-')\n", (6410, 6457), False, 'import re\n'), ((6516, 6570), 're.compile', 're.compile', (['"""CSC.*(?P<SUBVERSION>\\\\d+\\\\.\\\\d+\\\\.\\\\d+?)"""'], {}), "('CSC.*(?P<SUBVERSION>\\\\d+\\\\.\\\\d+\\\\.\\\\d+?)')\n", (6526, 6570), False, 'import re\n'), ((9347, 9383), 're.search', 're.search', (['smu_re', 'self.package_name'], {}), '(smu_re, self.package_name)\n', (9356, 9383), False, 'import re\n'), ((7640, 7677), 're.search', 're.search', (['pattern', 'self.package_name'], {}), '(pattern, self.package_name)\n', (7649, 7677), False, 'import re\n'), ((9116, 9148), 're.search', 're.search', (['dict_values', 'to_match'], {}), '(dict_values, to_match)\n', (9125, 9148), False, 'import re\n'), ((8261, 8312), 're.search', 're.search', (['"""-\\\\d+\\\\.\\\\d+\\\\.\\\\d+"""', 'self.package_name'], {}), "('-\\\\d+\\\\.\\\\d+\\\\.\\\\d+', self.package_name)\n", (8270, 8312), False, 'import re\n'), ((9955, 9987), 're.search', 're.search', (['dict_values', 'to_match'], {}), '(dict_values, to_match)\n', (9964, 9987), False, 'import re\n')] |
xros/megaboat | megaboat.py | e55e7959c39677ad2a0cdbb00ac88814b838d3e3 | # -*- coding: utf-8 -*-
# Copyright to Alexander Liu.
# Any distrubites of this copy should inform its author. If for commercial, please inform the author for authentication. Apr 2014
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from lxml import etree
import time
import json
import urllib
import urllib2
# For media posting
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
class ParsingContainer(object):
"""Parsing Wechat messages for whose types are of : 'text', 'image', 'voice', 'video', 'location', 'link'
After making a new instance of the class, need to declare the 'MsgType'
For example,
$~ python
>>> holder = ParsingContainer()
>>> hasattr(holder, "_Content")
>>> True
>>> holder.initType(MsgType='video')
>>> hasattr(holder, "_PicUrl")
>>> True
>>> holder.initType(MsgType='text') # Or we can just ellipsis this operation since by default its 'text'
>>> hasattr(holder, "_PicUrl")
>>> False
>>> hasattr(holder, "_Content")
>>> True
>>> holder.getElementByTag('Content')
>>> ''
"""
# By default, MsgType is set as 'text'
MsgType = 'text'
# Unique tages in all the mapping relationship
#
# For those tags in-common of normal message
global commonTag
commonTag = ['ToUserName', 'FromUserName', 'CreateTime', 'MsgId', 'MsgType']
# For normal message mapping
global normalMapping
normalMapping = {
'text':['Content'],
'image':['PicUrl', 'MediaId'],
'voice':['MediaId','Format'],
'video':['MediaId','ThumbMeiaId'],
'location':['Location_X','Location_Y','Scale', 'Label'],
'link':['Title','Description','Url'],
}
# For event message mapping
global eventMapping
eventMapping = {
# The list presents the combined tag set of the event message
'event':['Event','EventKey','Ticket','Latitude','Longitude','Precision' ],
}
# For recognition message mapping
global recognitionMapping
recognitionMapping = {
'voice':['MediaId','Format','Recognition'],
}
def __init__(self, incomingMessage='<xml></xml>'):
# pre-set some common variables
root = etree.fromstring(incomingMessage)
# The 5 ones in common
if root.find('ToUserName') is not None:
self._ToUserName = root.find('ToUserName').text
else:
self._ToUserName = ''
if root.find('FromUserName') is not None:
self._FromUserName = root.find('FromUserName').text
else:
self._FromUserName = ''
if root.find('CreateTime') is not None:
self._CreateTime = root.find('CreateTime').text
else:
self._CreateTime = '1000000000'
if root.find('MsgType') is not None:
self._MsgType = root.find('MsgType').text
else:
self._MsgType = ''
if root.find('MsgId') is not None:
self._MsgId = root.find('MsgId').text
else:
self._MsgId = ''
# Store the XML incomingMessage if has
# For text message only
if self.MsgType == 'text':
if root.find('Content') is not None:
self._Content = root.find('Content').text
else:
self._Content = ''
# For image message only
elif self.MsgType == 'image':
if root.find('PicUrl') is not None:
self._PicUrl = root.find('PicUrl').text
else:
self._PicUrl = ''
if root.find('MediaId') is not None:
self._MediaId = root.find('MediaId').text
else:
self._MediaId = ''
# For voice message only
elif self.MsgType == 'voice':
if root.find('MediaId') is not None:
self._MediaId = root.find('MediaId').text
else:
self._MediaId = ''
if root.find('Format') is not None:
self._Format = root.find('Format').text
else:
self._Format = ''
# For video message only
elif self.MsgType == 'video':
if root.find('MediaId') is not None:
self._MediaId = root.find('MediaId').text
else:
self._MediaId = ''
if root.find('ThumbMediaId') is not None:
self._ThumbMediaId = root.find('ThumbMediaId').text
else:
self._ThumbMediaId = ''
# For location message only
elif self.MsgType == 'location':
if root.find('Location_X') is not None:
self._Location_X = root.find('Location_X').text
else:
self._Location_X = ''
if root.find('Location_Y') is not None:
self._Location_Y = root.find('Location_Y').text
else:
self._Location_Y = ''
if root.find('Scale') is not None:
self._Scale = root.find('Scale').text
else:
self._Scale = ''
if root.find('Label') is not None:
self._Label = root.find('Label').text
else:
self._Label = ''
# For link message only
elif self.MsgType == 'link':
if root.find('Title') is not None:
self._Title = root.find('Title').text
else:
self._Title = ''
if root.find('Description') is not None:
self._Description = root.find('Description').text
else:
self._Description = ''
if root.find('Url') is not None:
self._Url = root.find('Url').text
else:
self._Url = ''
# For event message only
elif self.MsgType == 'event':
# It has to have a ```self._Event``` for event message certainly
if root.find('Event') is not None:
self._Event = root.find('Event').text
else:
self._Event = ''
if root.find('EventKey') is not None:
self._EventKey = root.find('EventKey').text
if root.find('Ticket') is not None:
self._Ticket = root.find('Ticket').text
if root.find('Latitude') is not None:
self._Latitude = root.find('Latitude').text
if root.find('Longitude') is not None:
self._Longitude = root.find('Longitude').text
if root.find('Precision') is not None:
self._Precision = root.find('Precision').text
def initType(self, MsgType='text', incomingMessage='<xml></xml>'):
''' To initialize message type
'''
MsgType_list = ['text', 'image', 'voice', 'video', 'location', 'link', 'event']
if MsgType not in MsgType_list:
raise ValueError, "MsgType '%s' not valid " % MsgType
for i in MsgType_list:
if MsgType == i:
self.MsgType = i
break
# Delete the common tags
for c in commonTag:
try:
delattr(self, '_' + c)
except:
pass
# Delete the unuseful elements in normalMapping
for k in normalMapping:
if k !=self.MsgType:
for m in normalMapping[k]:
try:
delattr(self, '_' + m)
except:
pass
# Delete the unuseful elements in eventMapping
for k in eventMapping:
for e in eventMapping[k]:
try:
delattr(self, '_' + e)
except:
pass
self.__init__(incomingMessage)
# releasing method
def __del__(self):
pass
#@property
def getElementByTag(self, tag):
'''To get element from the tag
'''
try:
gotten = getattr(self, "_" + tag)
except:
return None
##raise ValueError
#tmp = "Instance has no attribute _%s" % tag
#raise AttributeError, tmp
else:
return gotten
def digest(self, incomingMessage):
'''To digest the XML message passed from wechat server
Make the value variable
The 'incomingMessage' is of XML
According to its content this will assgin values to ```self.MsgType and etc..``` Logistics as the followings:
1) check parent message type :"MsgType"
2) check subclass message type if "Voice Recognition", "Event", "Normal"
3) check children class message type
'''
root = etree.fromstring(incomingMessage)
msgType = root.find("MsgType").text
# Get message type based from the ```incomingMessage``` variable
if msgType in ['text', 'image', 'voice', 'video', 'location', 'link', 'event']:
# Check if the incomingMessage has tag 'Recognition' then, it is a voice recognition message
if root.find("Recognition") is not None:
self.type = 'recognition'
# Check if the incomingMessage has tag 'Event' then, it is a voice event message
elif root.find("Event") is not None:
self.type = 'event'
# After all then 'normal' message
else:
self.type = 'normal'
# For normal messages
if self.type == 'normal':
if msgType == 'text':
self.initType('text', incomingMessage)
elif msgType == 'image':
self.initType('image', incomingMessage)
elif msgType == 'voice':
self.initType('voice', incomingMessage)
elif msgType == 'video':
self.initType('video', incomingMessage)
elif msgType == 'location':
self.initType('location', incomingMessage)
elif msgType == 'link':
self.initType('link', incomingMessage)
elif msgType == 'image':
self.initType('image', incomingMessage)
# TODO
# For event messages
if self.type == 'recognition':
self.initType('voice', incomingMessage)
# Construct a var ```self._Recognition``` since it is just of this more than that of 'normal message => voice'
self._Recognition = root.find("Recognition").text
# For recognition messages
if self.type == 'event':
self.initType('event', incomingMessage)
class RespondingContainer(object):
"""Package XML to reponse to determained wechat message
For more information please visit: http://mp.weixin.qq.com/wiki/index.php?title=%E5%8F%91%E9%80%81%E8%A2%AB%E5%8A%A8%E5%93%8D%E5%BA%94%E6%B6%88%E6%81%AF
Usage:
>>> rc = RespondingContainer()
>>> rc.initType('text') # Or we can ellipsis this since it is of 'text' by default
>>> # Notice we don't need to set the 'CreateTime' since it has been generated automatically :)
>>> rc.setElementByTag(FromUserName='the_server', ToUserName='the_wechat_client',Content='Hello dude!')
>>> tpl_out = rc.dumpXML()
>>> tpl_out
>>><xml>
<ToUserName>the_wechat_client</ToUserName>
<FromUserName>the_server</FromUserName>
<CreateTime>1397808770</CreateTime>
<MsgType>text</MsgType>
<Content>Hello dude!</Content>
</xml>
>>>
"""
def __init__(self, MsgType='text'):
self._MsgType = MsgType
# By default set root as the 'text' XML format
the_tpl = globals()['tpl_' + self._MsgType].encode('utf-8').decode('utf-8')
self.root = etree.fromstring(the_tpl)
#print self.root.find("FromUserName").text
#print type(self.root.find("FromUserName").text)
def initType(self, MsgType='text'):
tpl_list = ['text', 'image', 'voice', 'video', 'music', 'news']
if MsgType not in tpl_list:
raise ValueError, "Invalid responsing message MsgType '%s'" % MsgType
else:
## Load the template
#for i in tpl_list:
# if MsgType == i:
# self._MsgType = MsgType
# ## the the template
# the_xml = globals()['tpl_'+i]
# self.root = etree.fromstring( the_xml )
# break
## Set the default tag value
### Get all the tags
#child_list = []
#for child in self.root.getchildren():
# child_list += [str(child)]
### Attach 'tag' object to class to make something as : 'self._FromUserName'
#for i in child_list:
# if i == 'CreateTime':
# setattr(self,"_"+i, str(int(time.time())))
# else:
# setattr(self,"_"+i, '')
self.__init__(MsgType)
#def setElementByTag(self, tag):
def setElementByTag(self, **kwargs):
""" To package XML message into an object
Usage:
>>> setElementByTag(FromUserName='the_wechat_server',ToUserName='the_wechat_client',Content='Hello dude!')
# In this way we can then use ```dumpXML()``` to get the XML we need to reponse to wechat clients! :)
"""
## assign the basic time
self.root.find('CreateTime').text = str(int(time.time()))
#print "-----"
#print self._MsgType
## For text message only
if self._MsgType == 'text':
# To set attribute value to such as: 'self._FromUsername'
for k, v in kwargs.items():
try:
## assign value to the object
#getattr(self, "_"+k) = v
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
#raise AttributeError, "Message type '%s' has no attribute/tag '%s'" % (self._MsgType, k)
## For image message only
elif self._MsgType == 'image':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'MediaId':
#print v
#print etree.tostring(self.root)
self.root.find('Image').find('MediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
## For voice message only
elif self._MsgType == 'voice':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'MediaId':
#print v
#print etree.tostring(self.root)
self.root.find('Voice').find('MediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
## For video message only
elif self._MsgType == 'video':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'MediaId':
#print v
#print etree.tostring(self.root)
self.root.find('Video').find('MediaId').text = v
elif k == 'Title':
self.root.find('Video').find('Title').text = v
elif k == 'Description':
self.root.find('Video').find('Description').text = v
elif k == 'MusicUrl':
self.root.find('Video').find('MusicUrl').text = v
elif k == 'HQMusicUrl':
self.root.find('Video').find('HQMusicUrl').text = v
elif k == 'ThumbMediaId':
self.root.find('Video').find('ThumbMediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
## For article message only
elif self._MsgType == 'article':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'ArticleCount':
self.root.find(k).text = v
if k == 'Articles':
# TODO to generate articles as
#print v
#print etree.tostring(self.root)
self.root.find('Video').find('MediaId').text = v
elif k == 'Title':
self.root.find('Video').find('Title').text = v
elif k == 'Description':
self.root.find('Video').find('Description').text = v
elif k == 'MusicUrl':
self.root.find('Video').find('MusicUrl').text = v
elif k == 'HQMusicUrl':
self.root.find('Video').find('HQMusicUrl').text = v
elif k == 'ThumbMediaId':
self.root.find('Video').find('ThumbMediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
def dumpXML(self):
# To dump the XML we need
# the ```self.root``` has been assigned already
return etree.tostring(self.root, encoding='utf-8',method='xml',pretty_print=True)
# The down blow are the templates of all the responsing message valid for wechat
# For more information, please visit : http://mp.weixin.qq.com/wiki/index.php?title=%E5%8F%91%E9%80%81%E8%A2%AB%E5%8A%A8%E5%93%8D%E5%BA%94%E6%B6%88%E6%81%AF
global tpl_text
global tpl_image
global tpl_voice
global tpl_video
global tpl_music
global tpl_news
tpl_text = u'''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[你好]]></Content>
</xml>'''
tpl_image = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[image]]></MsgType>
<Image>
<MediaId><![CDATA[media_id]]></MediaId>
</Image>
</xml>'''
tpl_voice = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[voice]]></MsgType>
<Voice>
<MediaId><![CDATA[media_id]]></MediaId>
</Voice>
</xml>'''
tpl_video = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[video]]></MsgType>
<Video>
<MediaId><![CDATA[media_id]]></MediaId>
<Title><![CDATA[title]]></Title>
<Description><![CDATA[description]]></Description>
</Video>
</xml>'''
tpl_music = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[music]]></MsgType>
<Music>
<Title><![CDATA[TITLE]]></Title>
<Description><![CDATA[DESCRIPTION]]></Description>
<MusicUrl><![CDATA[MUSIC_Url]]></MusicUrl>
<HQMusicUrl><![CDATA[HQ_MUSIC_Url]]></HQMusicUrl>
<ThumbMediaId><![CDATA[media_id]]></ThumbMediaId>
</Music>
</xml>'''
tpl_news = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[news]]></MsgType>
<ArticleCount>2</ArticleCount>
<Articles>
<item>
<Title><![CDATA[title1]]></Title>
<Description><![CDATA[description1]]></Description>
<PicUrl><![CDATA[picurl]]></PicUrl>
<Url><![CDATA[url]]></Url>
</item>
<item>
<Title><![CDATA[title]]></Title>
<Description><![CDATA[description]]></Description>
<PicUrl><![CDATA[picurl]]></PicUrl>
<Url><![CDATA[url]]></Url>
</item>
</Articles>
</xml>'''
# Positive response
class PositiveRespondingContainer(object):
'''Using wechat custom service API to pass 6 types of messages to those wechat clients \n
who sent messages to the public wechat service. Those 6 types of messages include:
text, image, voice, video, music, news
The dumped is of dict format.
We need to json.loads(the_dict_object) if we want to pass the right reponse back
'''
def __init__(self, MsgType='text'):
self._MsgType = MsgType
# By default set the ```self.the_dict``` as from the 'text' JSON format
the_json_tpl = globals()['json_' + self._MsgType].encode('utf-8').decode('utf-8')
self.the_dict = json.loads(the_json_tpl)
if MsgType == 'text':
pass
def initType(self, MsgType='text'):
if MsgType not in ['text', 'image', 'voice', 'video', 'music', 'news']:
raise ValueError, "It has no message type: '%s'" % MsgType
else:
# pass the message type to have ```self.the_dict```
self.__init__(MsgType)
def setElementByKey(self, **kwargs):
'''To set the ```self.the_dict``` according to the message type by such as ```initType(MsgType='text')```
Notice: all the kwargs 's key in this function should be of lower case. Official wechat define that. Don't claim '''
## For text message only
if self._MsgType == 'text':
for k, v in kwargs.items():
try:
if k == 'content':
self.the_dict['text'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For image message only
elif self._MsgType == 'image':
for k, v in kwargs.items():
try:
if k == 'media_id':
self.the_dict['image'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For voice message only
elif self._MsgType == 'voice':
for k, v in kwargs.items():
try:
if k == 'media_id':
self.the_dict['voice'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For video message only
elif self._MsgType == 'video':
for k, v in kwargs.items():
try:
if k == 'media_id':
self.the_dict['video'][k] = v
elif k == 'title':
self.the_dict['video'][k] = v
elif k == 'description':
self.the_dict['video'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For music message only
elif self._MsgType == 'music':
for k, v in kwargs.items():
try:
if k == 'musicurl':
self.the_dict['music'][k] = v
elif k == 'title':
self.the_dict['music'][k] = v
elif k == 'description':
self.the_dict['music'][k] = v
elif k == 'hqmusicurl':
self.the_dict['music'][k] = v
elif k == 'thumb_media_id':
self.the_dict['music'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For news message only
elif self._MsgType == 'news':
for k, v in kwargs.items():
try:
# here we just check whether the ```v``` is type of list the ```v``` should be packaged in a list already
# if list, then its the elment of the key ```articles``` for the news message
'''
"articles": [
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
},
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
}
]
'''
if k == 'articles':
if type(v) == list:
self.the_dict['news'][k] = v
else:
raise ValueError, "The value of the key 'articles' should be of type list"
elif k == 'touser':
self.the_dict['touser'] = v
elif k == 'msgtype':
self.the_dict['msgtype'] = 'news'
except Exception as e:
print e
raise e
# package article
def packageArticle(title= "default title", description="default description", url="http://www.baidu.com", picurl="http://www.baidu.com/img/bdlogo.gif"):
'''This will return an article in a list which contains a dict.
While construcing the JSON dumped,
This is used with the function ```setElementByKey(touser='someone', msgtype='news', articles=packageArticle())```
'''
return [{"title": title, "description":description, "url":url, "picurl":picurl}]
# to dump the the dict as for later on JSON loading
def dumpDict(self):
return self.the_dict
json_text = '''{
"touser":"OPENID",
"msgtype":"text",
"text":
{
"content":"Hello World"
}
}'''
json_image = '''{
"touser":"OPENID",
"msgtype":"image",
"image":
{
"media_id":"MEDIA_ID"
}
}'''
json_voice = '''{
"touser":"OPENID",
"msgtype":"voice",
"voice":
{
"media_id":"MEDIA_ID"
}
}'''
json_video = '''{
"touser":"OPENID",
"msgtype":"video",
"video":
{
"media_id":"MEDIA_ID",
"title":"TITLE",
"description":"DESCRIPTION"
}
}'''
json_music = '''{
"touser":"OPENID",
"msgtype":"music",
"music":
{
"title":"MUSIC_TITLE",
"description":"MUSIC_DESCRIPTION",
"musicurl":"MUSIC_URL",
"hqmusicurl":"HQ_MUSIC_URL",
"thumb_media_id":"THUMB_MEDIA_ID"
}
}'''
json_news = '''{
"touser":"OPENID",
"msgtype":"news",
"news":{
"articles": [
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
},
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
}
]
}
}'''
class SubscriberManager(object):
'''To manage the subscriber groups, profile, location, list.
Usage:
>>> sm = SubscriberManager()
>>> sm.loadToken('abcdefg1234567')
>>> hisprofile = sm.getSubscriberProfile(openid='his_open_id', lang='zh_CN')
'''
def __init__(self, token=''):
self._token = token
def loadToken(self, token=''):
'''Firstly load the access token, then use the functions below'''
self._token = token
def getSubscriberProfile(self, openid='', lang='zh_CN'):
'''The open_id parameter is unique to unique wechat public service.
This function will return a dict if ```token``` and ```open_id``` are valid.
If not exists or not valid will return None.
For the parameter 'zh_CN', there are others: 'zh_TW, en'
For more information: please visit, http://mp.weixin.qq.com/wiki/index.php?title=%E8%8E%B7%E5%8F%96%E7%94%A8%E6%88%B7%E5%9F%BA%E6%9C%AC%E4%BF%A1%E6%81%AF'''
url = "https://api.weixin.qq.com/cgi-bin/user/info?access_token=" + self._token + "&openid=" + openid + "&lang=" + lang
try:
a = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
gotten = a.read()
a_dict = json.loads(gotten)
# means wrong appid or secret
if a_dict.has_key('errcode'):
return None
else:
return a_dict
def createGroup(self, name=''):
'''Create a determained group name.
If created, then it will return the new group id of type 'int'.
If not, will return None.
'''
url = "https://api.weixin.qq.com/cgi-bin/groups/create?access_token=" + self._token
postData = '{"group": {"name": "%s"} }' % name
request = urllib2.Request(url,data=postData)
request.get_method = lambda : 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
return None
else:
return a_dict['group']['id']
def getAllgroups(self):
''' A dict will be returned.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E6.9F.A5.E8.AF.A2.E6.89.80.E6.9C.89.E5.88.86.E7.BB.84
'''
url = "https://api.weixin.qq.com/cgi-bin/groups/get?access_token=" + self._token
try:
response = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
return None
else:
return a_dict
def getHisGroupID(self, openid=''):
'''Get a subscriber's group ID. The ID is of type 'int'.
If openid wrong or token invalid, 'None' will be returned.
For more information, please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E6.9F.A5.E8.AF.A2.E7.94.A8.E6.88.B7.E6.89.80.E5.9C.A8.E5.88.86.E7.BB.84'''
url = "https://api.weixin.qq.com/cgi-bin/groups/getid?access_token="+ self._token
postData = '{"openid":"%s"}' % openid
request = urllib2.Request(url,data=postData)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
return None
else:
return a_dict['groupid']
def updateGroupName(self, groupid='', new_name=''):
'''Update the determained group id with the new_name.
'True' or False if updated or not.
For more information, please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E4.BF.AE.E6.94.B9.E5.88.86.E7.BB.84.E5.90.8D
'''
url = "https://api.weixin.qq.com/cgi-bin/groups/update?access_token=" + self._token
postData = '{"group":{"id":%s,"name":"%s"}}' % (groupid, new_name)
request = urllib2.Request(url,data=postData)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
a_dict = json.loads(response.read())
#print a_dict
if a_dict.has_key('errcode'):
if a_dict['errcode'] == 0:
return True
else:
return False
else:
return False
def moveHimToGroup(self, openid='', groupid=''):
'''Move him to other group.
'True' or 'False' if moved or not.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E7.A7.BB.E5.8A.A8.E7.94.A8.E6.88.B7.E5.88.86.E7.BB.84'''
url = "https://api.weixin.qq.com/cgi-bin/groups/members/update?access_token=" + self._token
postData = '{"openid":"%s","to_groupid":%s}' % (openid, groupid)
request = urllib2.Request(url,data=postData)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
a_dict = json.loads(response.read())
#print a_dict
if a_dict.has_key('errcode'):
if a_dict['errcode'] == 0:
return True
else:
return False
else:
return False
def getSubscriberList(self, next_openid=''):
'''To get subscriber list.
A dict will be return if valid.
If ```token``` and ```next_openid``` are valid, then a dict will be returned.
If the ```next_openid``` does not exist, official wechat server takes it as '' by default
If not, a 'None' will be returned.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E8%8E%B7%E5%8F%96%E5%85%B3%E6%B3%A8%E8%80%85%E5%88%97%E8%A1%A8
'''
url = "https://api.weixin.qq.com/cgi-bin/user/get?access_token=" + self._token + "&next_openid=" + next_openid
try:
response = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
#print a_dict
if a_dict.has_key('errcode'):
return None
else:
return a_dict
def getAPIToken(appid='', appsecret=''):
'''Get wechat API token for cusmter service or others.
If ```appid``` and ```appsecret``` are correct then a string 'token' will be return.
If not , 'return None' '''
default_url = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&'
url = default_url + 'appid=' + appid + '&secret=' + appsecret
try:
a = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
gotten = a.read()
a_dict = json.loads(gotten)
if a_dict.has_key('access_token'):
return a_dict['access_token']
# means wrong appid or secret
else:
return None
def postMessage2API(token='',messageString=''):
'''Using the token, post the message to determained user.
This returns a Boolean value'''
url = "https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=" + token
request = urllib2.Request(url, messageString)
request.get_method = lambda : 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
j = json.loads(response.read())
# The above works
#print j
# to check if the message was accepted
if j['errcode'] == 0:
return True
else:
return False
class MenuManager(object):
'''To manage the bottom menu of the wechat service
Usage:
>>> mm = MenuManager()
>>> mm.loadToken('something_the_api_token')
>>> flag = mm.createMenu('the_menu_format_constructed_from_a_JSON_as_a_string')
>>> flag
True
>>> menu_got = mm.getMenu()
>>> menu_got
{u'menu': {u'button': [{u'type': u'click', u'name': u'\u7b2c\u4e00\u94ae', u'key': u'V1001_TODAY_MUSIC', u'sub_button': []}, {u'type': u'click', u'name': u'\u7b2c\u4e8c\u94ae', u'key': u'V1001_TODAY_SINGER', u'sub_button': []}, {u'name': u'\u7b2c\u4e09\u94ae', u'sub_button': [{u'url': u'http://www.soso.com/', u'type': u'view', u'name': u'\u641c\u641c', u'sub_button': []}, {u'url': u'http://v.qq.com/', u'type': u'view', u'name': u'\u770b\u7535\u5f71', u'sub_button': []}, {u'type': u'click', u'name': u'\u5938\u6211\u5e05', u'key': u'V1001_GOOD', u'sub_button': []}]}]}}
>>> flag2 = mm.deleteMenu()
>>> flag2
True
>>> mm.getMenu()
>>> # nothing gotten: it means no menu at all
'''
def __init__(self, token=''):
self._token = token
def loadToken(self, token=''):
'''Load the token before using other functions'''
self._token = token
def createMenu(self, menu_format=''):
'''Create menu, it needs a token and the menu format.
The ```menu_format``` is of type string.
But ```menu_format``` is constructed from a JSON.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E8%87%AA%E5%AE%9A%E4%B9%89%E8%8F%9C%E5%8D%95%E5%88%9B%E5%BB%BA%E6%8E%A5%E5%8F%A3
'''
token = self._token
url = "https://api.weixin.qq.com/cgi-bin/menu/create?access_token=" + token
request = urllib2.Request(url, menu_format)
request.get_method = lambda : 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
j = json.loads(response.read())
# The above works
#print j
# to check if the message was accepted
if j['errcode'] == 0:
return True
else:
return False
def getMenu(self):
'''Get the menu format from the API.
If there be, then a dict would be returned.
If not, 'None' will be returned.
'''
token = self._token
url = "https://api.weixin.qq.com/cgi-bin/menu/get?access_token="+ token
try:
response = urllib2.urlopen(url)
except Exception as e:
# its better to raise something here if the wechat remote server is down
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
if a_dict['errcode'] != 0:
return None
else:
return a_dict
else:
return a_dict
def deleteMenu(self):
token = self._token
url = "https://api.weixin.qq.com/cgi-bin/menu/delete?access_token=" + token
try:
response = urllib2.urlopen(url)
except Exception as e:
print e
return False
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
if a_dict['errcode'] == 0:
return True
else:
return False
else:
return False
class MediaManager(object):
'''There are four types of media suppored by wechat.
image, voice, video, thumb
Post the file to the offical wechat server and get the response.
'''
def __init__(self, media_type='image', token = ''):
self._media_type = media_type
self._token = token
def loadToken(self, token = ''):
self._token = token
def uploadMedia(self, media_type='image', media_path=''):
'''Post the determained media file to the offical URL
If the image is valid, then a_dict will be returned.
If not, 'None' will be returned.
For more information, please visit: http://mp.weixin.qq.com/wiki/index.php?title=%E4%B8%8A%E4%BC%A0%E4%B8%8B%E8%BD%BD%E5%A4%9A%E5%AA%92%E4%BD%93%E6%96%87%E4%BB%B6'''
if media_type not in ['image', 'voice', 'video', 'thumb']:
raise ValueError, "Media type: '%s' not valid" % media_type
else:
self._media_type = media_type
url = "http://file.api.weixin.qq.com/cgi-bin/media/upload?access_token=" + self._token + "&type=" + self._media_type
register_openers()
try:
datagen, headers = multipart_encode({"image1": open(media_path,"rb")})
except Exception as e:
#print e
return None
#raise e
else:
request = urllib2.Request(url,data=datagen,headers=headers)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return None
| [] |
thasmarinho/root-image-editor | root/converter/__init__.py | 0c3e955a1f81be02fef9a488b2b45a44cf16930a | from .color_converter import ColorConverter
from .scale_converter import ScaleConverter
| [] |
sthagen/chirun-ncl-chirun | chirun/plastex/color/__init__.py | 45897319d5203b9867b5d6e00b2db1aa90a6580c | from plasTeX import Command, Environment
def ProcessOptions(options, document):
colors = {}
document.userdata.setPath('packages/color/colors', colors)
colors['red'] = latex2htmlcolor('1,0,0')
colors['green'] = latex2htmlcolor('0,1,0')
colors['blue'] = latex2htmlcolor('0,0,1')
colors['cyan'] = latex2htmlcolor('0,1,1')
colors['magenta'] = latex2htmlcolor('1,0,1')
colors['yellow'] = latex2htmlcolor('1,1,0')
colors['white'] = latex2htmlcolor('1')
colors['black'] = latex2htmlcolor('0')
colors['gray'] = latex2htmlcolor('0.9')
colors['darkred'] = latex2htmlcolor('0.8,0,0')
colors['middlered'] = latex2htmlcolor('0.9,0,0')
colors['lightred'] = latex2htmlcolor('1,0,0')
colors['darkgreen'] = latex2htmlcolor('0,0.6,0')
colors['middlegreen'] = latex2htmlcolor('0,0.8,0')
colors['lightgreen'] = latex2htmlcolor('0,1,0')
colors['darkblue'] = latex2htmlcolor('0,0,0.8')
colors['middleblue'] = latex2htmlcolor('0,0,0.9')
colors['lightblue'] = latex2htmlcolor('0,0,1')
colors['darkcyan'] = latex2htmlcolor('0.6,0.8,0.8')
colors['middlecyan'] = latex2htmlcolor('0,0.8,0.8')
colors['darkmagenta'] = latex2htmlcolor('0.8,0.6,0.8')
colors['middlemagenta'] = latex2htmlcolor('1,0,0.6')
colors['darkyellow'] = latex2htmlcolor('0.8,0.8,0.6')
colors['middleyellow'] = latex2htmlcolor('1,1,0.2')
colors['darkgray'] = latex2htmlcolor('0.5')
colors['middlegray'] = latex2htmlcolor('0.7')
colors['lightgray'] = latex2htmlcolor('0.9')
def latex2htmlcolor(arg, model='rgb', named=None):
named = named or {}
if model == 'named':
return named.get(arg, '')
if ',' in arg:
parts = [float(x) for x in arg.split(',')]
# rgb
if len(parts) == 3:
red, green, blue = parts
red = min(int(red * 255), 255)
green = min(int(green * 255), 255)
blue = min(int(blue * 255), 255)
# cmyk
elif len(parts) == 4:
c, m, y, k = parts
red, green, blue = [int(255 * x) for x in [1 - c * (1 - k) - k, 1 - m * (1 - k) - k, 1 - y * (1 - k) - k]]
else:
return arg.strip()
else:
try:
red = green = blue = float(arg)
except ValueError:
try:
return named[arg]
except KeyError:
return arg.strip()
return '#%.2X%.2X%.2X' % (int(red), int(green), int(blue))
class definecolor(Command):
args = 'name:str model:str color:str'
def invoke(self, tex):
a = self.parse(tex)
u = self.ownerDocument.userdata
colors = u.getPath('packages/color/colors')
colors[a['name']] = latex2htmlcolor(a['color'], a['model'], colors)
class textcolor(Command):
args = '[ model:str ] color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class color(Environment):
args = '[ model:str ] color:str'
def invoke(self, tex):
a = self.parse(tex)
self.style['color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class pagecolor(Command):
args = '[ model:str ] color:str'
class colorbox(Command):
args = '[ model:str ] color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['background-color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class fcolorbox(Command):
args = '[ model:str ] bordercolor:str color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['background-color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
self.style['border'] = ('1px solid %s'
% latex2htmlcolor(a['bordercolor'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors')))
class normalcolor(Command):
pass
| [] |
gabrieleliasdev/python-cev | ex035A11.py | 45390963b5112a982e673f6a6866da422bf9ae6d | print('\033[0;33;44mTeste\033[m')
print('\033[4;33;44mTeste\033[m')
print('\033[1;35;43mTeste\033[m')
print('\033[7;32;40mTeste\033[m')
print('\033[7;30mTeste\033[m')
print(" - - - Testando os 40 - - -")
print("\033[0;37;40mPreto\033[m")
print("\033[0;30;41mVermelho\033[m")
print("\033[0;30;42mVerde\033[m")
print("\033[0;30;43mAmarelo\033[m")
print("\033[0;30;44mRoxo\033[m")
print("\033[0;30;45mLilás\033[m")
print("\033[0;30;46mTurquesa\033[m")
print("\033[0;30;47mBranco\033[m")
print("\033[0;36;48mFundo Transparente\033[m")
print(" - - - Testando os 30 - - -")
print("\033[0;37;40mTeste\033[m")
print("\033[0;31;40mTeste\033[m")
print("\033[0;32;40mTeste\033[m")
print("\033[0;33;40mTeste\033[m")
print("\033[0;34;40mTeste\033[m")
print("\033[0;35;40mTeste\033[m")
print("\033[0;36;40mTeste\033[m")
print("\033[0;37;40mTeste\033[m")
print("\033[0;38;40mTeste\033[m")
print(" - - - Testando os 1ª - - -")
print("\033[0;30;47mTeste\033[m")
print("\033[1;30;47mTexto em Negrito\033[m")
print("\033[2;30;47mTeste\033[m")
print("\033[3;30;47mFonta Itálica\033[m")
print("\033[4;30;47mSublinhado\033[m")
print("\033[5;30;47mTeste\033[m")
print("\033[6;30;47mTeste\033[m")
print("\033[7;30;47mTeste\033[m")
print("\033[7;38;47mTeste\033[m") | [] |
TurboGears/tg2 | tg/release.py | f40a82d016d70ce560002593b4bb8f83b57f87b3 | """TurboGears project related information"""
version = "2.4.3"
description = "Next generation TurboGears"
long_description="""
TurboGears brings together a best of breed python tools
to create a flexible, full featured, and easy to use web
framework.
TurboGears 2 provides an integrated and well tested set of tools for
everything you need to build dynamic, database driven applications.
It provides a full range of tools for front end javascript
develeopment, back database development and everything in between:
* dynamic javascript powered widgets (ToscaWidgets2)
* automatic JSON generation from your controllers
* powerful, designer friendly XHTML based templating
* object or route based URL dispatching
* powerful Object Relational Mappers (SQLAlchemy)
The latest development version is available in the
`TurboGears Git repositories`_.
.. _TurboGears Git repositories:
https://github.com/TurboGears
"""
url="http://www.turbogears.org/"
author= "Alessandro Molina, Mark Ramm, Christopher Perkins, Jonathan LaCour, Rick Copland, Alberto Valverde, Michael Pedersen and the TurboGears community"
email = "[email protected]"
copyright = """Copyright 2005-2020 Kevin Dangoor, Alberto Valverde, Mark Ramm, Christopher Perkins, Alessandro Molina and contributors"""
license = "MIT"
| [] |
m2lines/subgrid | swm-master/swm-master/calc/mean_e_calc.py | 3de5d14c5525a62529d43cbafccda716c74e32df | ## PRODUCE MEAN CALCULATIONS AND EXPORT AS .NPY
from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import time as tictoc
from netCDF4 import Dataset
# OPTIONS
runfolder = 15
print('Calculating subgrid-EKE means from run ' + str(runfolder))
## read data
runpath = path+'data/run%04i' % runfolder
skip = 5*365
e = np.load(runpath+'/e_sub.npy')[skip:,:,:]
print('run %i read.' % runfolder)
## create ouputfolder
try:
os.mkdir(runpath+'/analysis')
except:
pass
## U,V,H mean
em = e.mean(axis=0)
print('e mean done.')
## STORING
dic = dict()
all_var2export = ['em']
for v in all_var2export:
exec('dic[v] ='+v)
np.save(runpath+'/analysis/mean_e.npy',dic)
print('Everything stored.')
| [((133, 147), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (141, 147), False, 'import os\n'), ((744, 790), 'numpy.save', 'np.save', (["(runpath + '/analysis/mean_e.npy')", 'dic'], {}), "(runpath + '/analysis/mean_e.npy', dic)\n", (751, 790), True, 'import numpy as np\n'), ((435, 466), 'numpy.load', 'np.load', (["(runpath + '/e_sub.npy')"], {}), "(runpath + '/e_sub.npy')\n", (442, 466), True, 'import numpy as np\n'), ((542, 573), 'os.mkdir', 'os.mkdir', (["(runpath + '/analysis')"], {}), "(runpath + '/analysis')\n", (550, 573), False, 'import os\n')] |
gammazero/pybogglesolver | bogglesolver.py | 71d2c6d6ae8c9b5f580f6b27479aea3450a2895a | """
Module to generate solutions for Boggle grids.
Andrew Gillis 22 Dec. 2009
"""
from __future__ import print_function
import os
import sys
import collections
import trie
if sys.version < '3':
range = xrange
class BoggleSolver(object):
"""
This class uses an external words file as a dictionary of acceptable boggle
words. When an instance of this class is created, it sets up an internal
dictionary to look up valid boggle answers. The class' solve method can be
used repeatedly to generate solutions for different boggle grids.
"""
def __init__(self, words_file, xlen=4, ylen=4, pre_compute_adj=False):
"""Create and initialize BoggleSolver instance.
This creates the internal trie for fast word lookup letter-by-letter.
Words that begin with capital letters and words that are not within the
specified length limits are filtered out.
Arguments:
xlen -- X dimension (width) of board.
ylen -- Y dimension (height) of board.
pre_compute_adj -- Pre-compute adjacency matrix.
"""
assert(xlen > 1)
assert(ylen > 1)
self.xlen = xlen
self.ylen = ylen
self.board_size = xlen * ylen
if pre_compute_adj:
self.adjacency = BoggleSolver._create_adjacency_matrix(xlen, ylen)
else:
self.adjacency = None
self.trie = BoggleSolver._load_dictionary(
words_file, self.board_size, 3)
def solve(self, grid):
"""Generate all solutions for the given boggle grid.
Arguments:
grid -- A string of 16 characters representing the letters in a boggle
grid, from top left to bottom right.
Returns:
A list of words found in the boggle grid.
None if given invalid grid.
"""
if self.trie is None:
raise RuntimeError('words file not loaded')
if len(grid) != self.board_size:
raise RuntimeError('invalid board')
board = list(grid)
trie = self.trie
words = set()
q = collections.deque()
adjs = self.adjacency
for init_sq in range(self.board_size):
c = board[init_sq]
q.append((init_sq, c, trie.get_child(c), [init_sq]))
while q:
parent_sq, prefix, pnode, seen = q.popleft()
pnode_get_child = pnode.get_child
if adjs:
adj = adjs[parent_sq]
else:
adj = self._calc_adjacency(self.xlen, self.ylen, parent_sq)
for cur_sq in adj:
if cur_sq in seen:
continue
c = board[cur_sq]
cur_node = pnode_get_child(c)
if cur_node is None:
continue
s = prefix + c
q.append((cur_sq, s, cur_node, seen + [cur_sq]))
if cur_node._is_word:
if s[0] == 'q':
# Rehydrate q-words with 'u'.
words.add('qu' + s[1:])
else:
words.add(s)
return words
def show_grid(self, grid):
"""Utility method to print a 4x4 boggle grid.
Arguments:
grid -- A string of X*Y characters representing the letters in a boggle
grid, from top left to bottom right.
"""
for y in range(self.ylen):
print('+' + '---+' * self.xlen)
yi = y * self.xlen
line = ['| ']
for x in range(self.xlen):
cell = grid[yi+x].upper()
if cell == 'Q':
line.append('Qu')
line.append('| ')
else:
line.append(cell)
line.append(' | ')
print(''.join(line))
print('+' + '---+' * self.xlen)
def find_substrings(self, string):
"""Find all valid substrings in the given string.
This method is not necessary for the boggle solver, but is a utility
for testing that all substrings of a word are correctly found.
Arguments:
string -- The string in which to search for valid substrings.
Returns:
List of substrings that are valid words.
"""
found = set()
for start in range(len(string)):
cur = self.trie
letters = [None] * self.board_size
count = 0
for l in string[start:]:
letters[count] = l
count += 1
cur = cur.get_child(l)
if cur is None:
break
if cur._is_word:
found.add(''.join(letters[:count]))
if not cur.has_children():
break
return found
@staticmethod
def _load_dictionary(words_file, max_len, min_len):
"""Private method to create the trie for finding words.
Arguments:
words_file -- Path of file containing words for reference.
Return:
Count of words inserted into trie.
"""
if not os.path.isfile(words_file):
raise RuntimeError('words file not found: ' + words_file)
print('creating dictionary...')
root = trie.Trie()
word_count = 0
if words_file.endswith('gz'):
import gzip
f = gzip.open(words_file)
elif words_file.endswith('bz2'):
import bz2
f = bz2.BZ2File(words_file)
else:
f = open(words_file)
try:
for word in f:
if sys.version < '3':
word = word.strip()
else:
word = word.strip().decode("utf-8")
# Skip words that are too long or too short.
word_len = len(word)
if word_len > max_len or word_len < min_len:
continue
# Skip words that start with capital letter.
if word[0].isupper():
continue
if word[0] == 'q':
# Skip words starting with q not followed by u.
if word[1] != 'u':
continue
# Remove "u" from q-words so that only the q is matched.
word = 'q' + word[2:]
root.insert(word)
word_count += 1
finally:
f.close()
print('Loaded', word_count, 'words from file.')
return root
@staticmethod
def _create_adjacency_matrix(xlim, ylim):
adj_list = [[]] * (ylim * xlim)
for i in range(ylim * xlim):
# Current cell index = y * xlim + x
adj = BoggleSolver._calc_adjacency(xlim, ylim, i)
adj_list[i] = adj
return adj_list
@staticmethod
def _calc_adjacency(xlim, ylim, sq):
adj = []
y = int(sq / xlim)
x = sq - (y * xlim)
# Look at row above current cell.
if y-1 >= 0:
above = sq - xlim
# Look to upper left.
if x-1 >= 0:
adj.append(above - 1)
# Look above.
adj.append(above)
# Look upper right.
if x+1 < xlim:
adj.append(above + 1)
# Look at same row that current cell is on.
# Look to left of current cell.
if x-1 >= 0:
adj.append(sq - 1)
# Look to right of current cell.
if x+1 < xlim:
adj.append(sq + 1)
# Look at row below current cell.
if y+1 < ylim:
below = sq + xlim
# Look to lower left.
if x-1 >= 0:
adj.append(below - 1)
# Look below.
adj.append(below)
# Look to lower rigth.
if x+1 < xlim:
adj.append(below + 1)
return adj
| [((2118, 2137), 'collections.deque', 'collections.deque', ([], {}), '()\n', (2135, 2137), False, 'import collections\n'), ((5443, 5454), 'trie.Trie', 'trie.Trie', ([], {}), '()\n', (5452, 5454), False, 'import trie\n'), ((5289, 5315), 'os.path.isfile', 'os.path.isfile', (['words_file'], {}), '(words_file)\n', (5303, 5315), False, 'import os\n'), ((5556, 5577), 'gzip.open', 'gzip.open', (['words_file'], {}), '(words_file)\n', (5565, 5577), False, 'import gzip\n'), ((5658, 5681), 'bz2.BZ2File', 'bz2.BZ2File', (['words_file'], {}), '(words_file)\n', (5669, 5681), False, 'import bz2\n'), ((2281, 2298), 'trie.get_child', 'trie.get_child', (['c'], {}), '(c)\n', (2295, 2298), False, 'import trie\n')] |
MeridianExplorer/ocs-ci | tests/manage/monitoring/pagerduty/test_ceph.py | a33d5116128b88f176f5eff68a3ef805125cdba1 | import logging
import pytest
from ocs_ci.framework.testlib import (
managed_service_required,
skipif_ms_consumer,
tier4,
tier4a,
)
from ocs_ci.ocs import constants
from ocs_ci.utility import pagerduty
log = logging.getLogger(__name__)
@tier4
@tier4a
@managed_service_required
@skipif_ms_consumer
@pytest.mark.polarion_id("OCS-2771")
def test_corrupt_pg_pd(measure_corrupt_pg):
"""
Test that there is appropriate incident in PagerDuty when Placement group
on one OSD is corrupted and that this incident is cleared when the corrupted
ceph pool is removed.
"""
api = pagerduty.PagerDutyAPI()
# get incidents from time when manager deployment was scaled down
incidents = measure_corrupt_pg.get("pagerduty_incidents")
target_label = constants.ALERT_CLUSTERERRORSTATE
# TODO(fbalak): check the whole string in summary and incident alerts
assert pagerduty.check_incident_list(
summary=target_label,
incidents=incidents,
urgency="high",
)
api.check_incident_cleared(
summary=target_label,
measure_end_time=measure_corrupt_pg.get("stop"),
)
| [((226, 253), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (243, 253), False, 'import logging\n'), ((318, 353), 'pytest.mark.polarion_id', 'pytest.mark.polarion_id', (['"""OCS-2771"""'], {}), "('OCS-2771')\n", (341, 353), False, 'import pytest\n'), ((609, 633), 'ocs_ci.utility.pagerduty.PagerDutyAPI', 'pagerduty.PagerDutyAPI', ([], {}), '()\n', (631, 633), False, 'from ocs_ci.utility import pagerduty\n'), ((906, 998), 'ocs_ci.utility.pagerduty.check_incident_list', 'pagerduty.check_incident_list', ([], {'summary': 'target_label', 'incidents': 'incidents', 'urgency': '"""high"""'}), "(summary=target_label, incidents=incidents,\n urgency='high')\n", (935, 998), False, 'from ocs_ci.utility import pagerduty\n')] |
phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan | STANchap7.py | d708faab0fdd43800e8726e2c6dd99452c8dcedb | # -*- coding: utf-8 -*-
import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns
from cmdstanpy import CmdStanModel
#%% load data
data = pd.read_csv("data/overfitting.csv", index_col = 'case_id')
data.columns
data.info()
feature_names = data.columns.str.startswith("var_")
predictors = data[data.columns[feature_names]]
labels = data["Target_Practice"]
ix_training = data.train == 1
training_data = predictors[ix_training]
training_labels = labels[ix_training]
ix_testing = data.train == 0
testing_data = predictors[ix_testing]
testing_labels = labels[ix_testing]
sns.displot(training_data.values.flatten(), bins = "sqrt", kde = True)
pca = prince.PCA(n_components = 2, as_array = False).fit(training_data)
pca.plot_row_coordinates(training_data, color_labels = training_labels)
pca.column_correlations(training_data).plot.scatter(x = 0, y = 1) # weird column name
#%% Roshan Sharma model
mdl_data = { # problem with JSON dump => cast to python native type
'N': ix_training.sum().tolist(),
'N2': ix_testing.sum().tolist(),
'K': feature_names.sum().tolist(),
'y': training_labels.values.tolist(),
'X': training_data.values.tolist(),
'new_X': testing_data.values.tolist(),
}
modelfile = "OverfittingRoshanSharma.stan"
with open(modelfile, "w") as file: file.write("""
data {
int N; // the number of training observations
int N2; // the number of test observations
int K; // the number of features
int y[N]; // the response
matrix[N,K] X; // the model matrix
matrix[N2,K] new_X; // the matrix for the predicted values
}
parameters { // regression parameters
real alpha;
vector[K] beta;
}
transformed parameters {
vector[N] linpred = alpha + X * beta;
}
model {
alpha ~ cauchy(0, 10); // prior for the intercept following Gelman 2008
beta ~ student_t(1, 0, 0.03);
y ~ bernoulli_logit(linpred);
}
generated quantities { // y values predicted by the model
vector[N2] y_pred = alpha + new_X * beta;
}
""")
var_name_array = ["alpha"] + [f"beta[{i+1}]" for i in range(mdl_data["K"])]
var_name_combi = ["alpha", "beta"]
sm = CmdStanModel(stan_file = modelfile)
# maximum likelihood estimation
optim = sm.optimize(data = mdl_data).optimized_params_pd
optim[optim.columns[~optim.columns.str.startswith("lp")]]
plt.plot(optim[var_name_array[1:]].values[0])
# variational inference
vb = sm.variational(data = mdl_data)
vb.variational_sample.columns = vb.variational_params_dict.keys()
vb_name = vb.variational_params_pd.columns[~vb.variational_params_pd.columns.str.startswith(("lp", "log_"))]
vb.variational_params_pd[var_name_array]
vb.variational_sample[var_name_array]
# Markov chain Monte Carlo
fit = sm.sample(
data = mdl_data, show_progress = True, chains = 4,
iter_sampling = 50000, iter_warmup = 10000, thin = 5
)
fit.draws().shape # iterations, chains, parameters
fit.summary().loc[var_name_array] # pandas DataFrame
print(fit.diagnose())
posterior = {k: fit_modif.stan_variable(k) for k in var_name_combi}
az_trace = az.from_cmdstanpy(fit)
az.summary(az_trace).loc[var_name] # pandas DataFrame
az.plot_trace(az_trace, var_names = ["alpha"])
az.plot_forest(az_trace, var_names = ["beta"])
sample_pred = fit.stan_variable('y_pred')
# Tim Salimans model: DOES NOT WORK yet
# need to figure out how to marginalize all discrete params
| [((186, 242), 'pandas.read_csv', 'pd.read_csv', (['"""data/overfitting.csv"""'], {'index_col': '"""case_id"""'}), "('data/overfitting.csv', index_col='case_id')\n", (197, 242), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((2185, 2218), 'cmdstanpy.CmdStanModel', 'CmdStanModel', ([], {'stan_file': 'modelfile'}), '(stan_file=modelfile)\n', (2197, 2218), False, 'from cmdstanpy import CmdStanModel\n'), ((2374, 2419), 'matplotlib.pyplot.plot', 'plt.plot', (['optim[var_name_array[1:]].values[0]'], {}), '(optim[var_name_array[1:]].values[0])\n', (2382, 2419), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((3118, 3140), 'arviz.from_cmdstanpy', 'az.from_cmdstanpy', (['fit'], {}), '(fit)\n', (3135, 3140), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((3197, 3241), 'arviz.plot_trace', 'az.plot_trace', (['az_trace'], {'var_names': "['alpha']"}), "(az_trace, var_names=['alpha'])\n", (3210, 3241), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((3245, 3289), 'arviz.plot_forest', 'az.plot_forest', (['az_trace'], {'var_names': "['beta']"}), "(az_trace, var_names=['beta'])\n", (3259, 3289), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((713, 755), 'prince.PCA', 'prince.PCA', ([], {'n_components': '(2)', 'as_array': '(False)'}), '(n_components=2, as_array=False)\n', (723, 755), False, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((3142, 3162), 'arviz.summary', 'az.summary', (['az_trace'], {}), '(az_trace)\n', (3152, 3162), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n')] |
factabulous/matgrindr | watcher.py | 6f5d6d20e34f9b13950d654cf70afdb2e46f5d1e | # -*- coding: utf-8 -*-
import json
import threading
import os
import time
import mats
import sys
import requests
import traceback
import re
from util import debug, error
class MatsLoader(threading.Thread):
"""
Fire and forget loader for materials - will queue a 'mats' event or
an 'error' event if the load fails. Automatically runs as a daemon
"""
def __init__(self, filename, queue):
"""
filename is the file to async load
queue is the queue to report the results into
"""
threading.Thread.__init__(self)
self.queue = queue
self.filename = filename
self.daemon = True
def run(self):
try:
m = mats.Materials(self.filename)
self.queue.put( { 'mats': m._materials } )
except:
self.queue.put( { 'error': 'Failed to load materials ' + str(sys.exc_info()[0]) } )
class MatsLoaderRemote(threading.Thread):
"""
Fire and forget loader for materials - will queue a 'mats' event or
an 'error' event if the load fails. Automatically runs as a daemon
"""
def __init__(self, filename, queue):
"""
filename is the cache file - we only read the remote file
if the cache is old (or missing)
queue is the queue to report the results into
"""
threading.Thread.__init__(self)
self.filename = filename
self.queue = queue
self.daemon = True
self.integerRe = re.compile(r'^-?\d+$')
self.floatRe = re.compile(r'^-?\d+(\.\d+)?$')
self.arrayRe = re.compile(r'^\[.*\]$')
def need_refresh(self):
"""
Returns True if the local cache needs a refresh.
"""
if not os.path.exists(self.filename):
return True
mtime = os.path.getmtime(self.filename)
now = time.time()
return mtime < now - 24 * 3600 # Daily update
def array_splitter(self, value):
return [ x[1:-1] for x in value[1:-1].split(", ") ]
def detect(self, value):
"""
Looks at a data value and converts into an appropriate type
(maybe should look at using ast instead)
"""
if self.integerRe.match(value):
return int(value)
elif self.floatRe.match(value):
return float(value)
elif self.arrayRe.match(value):
return self.array_splitter(value)
else:
return value
def parse(self, text):
"""
Parse a string field containing all the data ina TSV
into an array of dicts. Mainly split out so we can test
"""
lines = text.replace("\r", "").split("\n")
fields = lines[0].split("\t")
res = []
for entry in lines[1:]:
values = entry.split("\t")
if len(values) < len(fields):
continue
v = {}
for k in range(0, len(fields)):
v[fields[k]] = self.detect(values[k])
res.append(v)
return res
def run(self):
try:
if self.need_refresh():
r = requests.get("https://docs.google.com/spreadsheets/u/0/d/1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4/export?format=tsv&id=1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4&gid=0")
res = self.parse(r.text)
if res:
with open(self.filename, "wt") as cache_file:
json.dump(res, cache_file)
self.queue.put( { 'mats': res } )
debug("Async remote mats loader from tsv is completed {} entries".format(len(res)))
else:
error("Async remote mats loader failed - zero records")
else:
with open(self.filename, "rt") as cache_file:
res = json.load(cache_file)
self.queue.put( { 'mats': res } )
debug("loader from cache is completed {} entries".format(len(res)))
except:
self.queue.put( { 'error': 'Failed to load tsv materials ' + str(sys.exc_info()[0]) + ' ' + traceback.format_exc() } )
| [((539, 570), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (564, 570), False, 'import threading\n'), ((1347, 1378), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (1372, 1378), False, 'import threading\n'), ((1491, 1513), 're.compile', 're.compile', (['"""^-?\\\\d+$"""'], {}), "('^-?\\\\d+$')\n", (1501, 1513), False, 'import re\n'), ((1537, 1569), 're.compile', 're.compile', (['"""^-?\\\\d+(\\\\.\\\\d+)?$"""'], {}), "('^-?\\\\d+(\\\\.\\\\d+)?$')\n", (1547, 1569), False, 'import re\n'), ((1591, 1615), 're.compile', 're.compile', (['"""^\\\\[.*\\\\]$"""'], {}), "('^\\\\[.*\\\\]$')\n", (1601, 1615), False, 'import re\n'), ((1813, 1844), 'os.path.getmtime', 'os.path.getmtime', (['self.filename'], {}), '(self.filename)\n', (1829, 1844), False, 'import os\n'), ((1859, 1870), 'time.time', 'time.time', ([], {}), '()\n', (1868, 1870), False, 'import time\n'), ((707, 736), 'mats.Materials', 'mats.Materials', (['self.filename'], {}), '(self.filename)\n', (721, 736), False, 'import mats\n'), ((1741, 1770), 'os.path.exists', 'os.path.exists', (['self.filename'], {}), '(self.filename)\n', (1755, 1770), False, 'import os\n'), ((3134, 3319), 'requests.get', 'requests.get', (['"""https://docs.google.com/spreadsheets/u/0/d/1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4/export?format=tsv&id=1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4&gid=0"""'], {}), "(\n 'https://docs.google.com/spreadsheets/u/0/d/1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4/export?format=tsv&id=1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4&gid=0'\n )\n", (3146, 3319), False, 'import requests\n'), ((3731, 3786), 'util.error', 'error', (['"""Async remote mats loader failed - zero records"""'], {}), "('Async remote mats loader failed - zero records')\n", (3736, 3786), False, 'from util import debug, error\n'), ((3893, 3914), 'json.load', 'json.load', (['cache_file'], {}), '(cache_file)\n', (3902, 3914), False, 'import json\n'), ((3482, 3508), 'json.dump', 'json.dump', (['res', 'cache_file'], {}), '(res, cache_file)\n', (3491, 3508), False, 'import json\n'), ((4178, 4200), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4198, 4200), False, 'import traceback\n'), ((881, 895), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (893, 895), False, 'import sys\n'), ((4151, 4165), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4163, 4165), False, 'import sys\n')] |
pighui/luoxia | luoxia/pipelines.py | 24daa0f1595fd2b18a4b251acf77321ef98eb534 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
from scrapy import Request
from scrapy.pipelines.images import ImagesPipeline
from luoxia import settings
class LuoxiaPipeline(object):
def process_item(self, item, spider):
title= item['title']
bookname = item['bookname']
titlename = item['titlename']
text = item['text']
path = "books/%s/%s/" % (title, bookname)
if not os.path.exists(path):
os.makedirs(path)
with open(path+titlename+'.txt', 'a', encoding='utf-8') as f:
f.write(text)
return item
class LuoxiaImagePipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for url in item['image_urls']:
yield Request(url, meta={'title': item['title'],
'bookname': item['bookname']})
def item_completed(self, results, item, info):
# 将下载完成后的图片路径设置到item中
item['images'] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
# 为每本书创建一个目录,存放她自己所有的图片
title = request.meta['title']
bookname = request.meta['bookname']
book_dir = os.path.join(settings.IMAGES_STORE, title +'/'+ bookname)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
# 从连接中提取扩展名
try:
ext_name = request.url.split(".")[-1]
except:
ext_name = 'jpg'
# 返回的相对路径
return '%s/%s/%s.%s' % (title, bookname, bookname, ext_name) | [((1359, 1418), 'os.path.join', 'os.path.join', (['settings.IMAGES_STORE', "(title + '/' + bookname)"], {}), "(settings.IMAGES_STORE, title + '/' + bookname)\n", (1371, 1418), False, 'import os\n'), ((581, 601), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (595, 601), False, 'import os\n'), ((615, 632), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (626, 632), False, 'import os\n'), ((1432, 1456), 'os.path.exists', 'os.path.exists', (['book_dir'], {}), '(book_dir)\n', (1446, 1456), False, 'import os\n'), ((1470, 1491), 'os.makedirs', 'os.makedirs', (['book_dir'], {}), '(book_dir)\n', (1481, 1491), False, 'import os\n'), ((896, 969), 'scrapy.Request', 'Request', (['url'], {'meta': "{'title': item['title'], 'bookname': item['bookname']}"}), "(url, meta={'title': item['title'], 'bookname': item['bookname']})\n", (903, 969), False, 'from scrapy import Request\n')] |
jpmarques19/tensorflwo-test | aws_sagemaker_studio/frameworks/tensorflow_mnist/mnist.py | 0ff8b06e0415075c7269820d080284a42595bb2e | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import numpy as np
import tensorflow as tf
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu
)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu
)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
'classes': tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predictions['classes'])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def _load_training_data(base_dir):
x_train = np.load(os.path.join(base_dir, 'train_data.npy'))
y_train = np.load(os.path.join(base_dir, 'train_labels.npy'))
return x_train, y_train
def _load_testing_data(base_dir):
x_test = np.load(os.path.join(base_dir, 'eval_data.npy'))
y_test = np.load(os.path.join(base_dir, 'eval_labels.npy'))
return x_test, y_test
def _parse_args():
parser = argparse.ArgumentParser()
# Data, model, and output directories.
# model_dir is always passed in from SageMaker.
# By default this is a S3 path under the default bucket.
parser.add_argument('--model_dir', type=str)
parser.add_argument('--sm-model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAINING'))
parser.add_argument('--hosts', type=list, default=json.loads(os.environ.get('SM_HOSTS')))
parser.add_argument('--current-host', type=str, default=os.environ.get('SM_CURRENT_HOST'))
return parser.parse_known_args()
def serving_input_fn():
inputs = {'x': tf.placeholder(tf.float32, [None, 784])}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
if __name__ == '__main__':
args, _ = _parse_args()
train_data, train_labels = _load_training_data(args.train)
eval_data, eval_labels = _load_testing_data(args.train)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir=args.model_dir)
# Set up logging for predictions
# Log the values in the 'Softmax' tensor with label 'probabilities'
tensors_to_log = {'probabilities': 'softmax_tensor'}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True
)
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False
)
train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=20000)
eval_spec = tf.estimator.EvalSpec(eval_input_fn)
tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec)
if args.current_host == args.hosts[0]:
mnist_classifier.export_savedmodel(args.sm_model_dir, serving_input_fn)
| [((1038, 1080), 'tensorflow.reshape', 'tf.reshape', (["features['x']", '[-1, 28, 28, 1]'], {}), "(features['x'], [-1, 28, 28, 1])\n", (1048, 1080), True, 'import tensorflow as tf\n'), ((1346, 1457), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'input_layer', 'filters': '(32)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=input_layer, filters=32, kernel_size=[5, 5],\n padding='same', activation=tf.nn.relu)\n", (1362, 1457), True, 'import tensorflow as tf\n'), ((1703, 1769), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv1', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv1, pool_size=[2, 2], strides=2)\n', (1726, 1769), True, 'import tensorflow as tf\n'), ((2015, 2121), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'pool1', 'filters': '(64)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=pool1, filters=64, kernel_size=[5, 5], padding=\n 'same', activation=tf.nn.relu)\n", (2031, 2121), True, 'import tensorflow as tf\n'), ((2365, 2431), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv2', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv2, pool_size=[2, 2], strides=2)\n', (2388, 2431), True, 'import tensorflow as tf\n'), ((2596, 2631), 'tensorflow.reshape', 'tf.reshape', (['pool2', '[-1, 7 * 7 * 64]'], {}), '(pool2, [-1, 7 * 7 * 64])\n', (2606, 2631), True, 'import tensorflow as tf\n'), ((2808, 2877), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'pool2_flat', 'units': '(1024)', 'activation': 'tf.nn.relu'}), '(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n', (2823, 2877), True, 'import tensorflow as tf\n'), ((2964, 3056), 'tensorflow.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'dense', 'rate': '(0.4)', 'training': '(mode == tf.estimator.ModeKeys.TRAIN)'}), '(inputs=dense, rate=0.4, training=mode == tf.estimator.\n ModeKeys.TRAIN)\n', (2981, 3056), True, 'import tensorflow as tf\n'), ((3183, 3224), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'dropout', 'units': '(10)'}), '(inputs=dropout, units=10)\n', (3198, 3224), True, 'import tensorflow as tf\n'), ((3727, 3795), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (3765, 3795), True, 'import tensorflow as tf\n'), ((4348, 4434), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'eval_metric_ops': 'eval_metric_ops'}), '(mode=mode, loss=loss, eval_metric_ops=\n eval_metric_ops)\n', (4374, 4434), True, 'import tensorflow as tf\n'), ((4856, 4881), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4879, 4881), False, 'import argparse\n'), ((5596, 5652), 'tensorflow.estimator.export.ServingInputReceiver', 'tf.estimator.export.ServingInputReceiver', (['inputs', 'inputs'], {}), '(inputs, inputs)\n', (5636, 5652), True, 'import tensorflow as tf\n'), ((5885, 5956), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'cnn_model_fn', 'model_dir': 'args.model_dir'}), '(model_fn=cnn_model_fn, model_dir=args.model_dir)\n', (5907, 5956), True, 'import tensorflow as tf\n'), ((6143, 6210), 'tensorflow.train.LoggingTensorHook', 'tf.train.LoggingTensorHook', ([], {'tensors': 'tensors_to_log', 'every_n_iter': '(50)'}), '(tensors=tensors_to_log, every_n_iter=50)\n', (6169, 6210), True, 'import tensorflow as tf\n'), ((6255, 6377), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': train_data}", 'y': 'train_labels', 'batch_size': '(100)', 'num_epochs': 'None', 'shuffle': '(True)'}), "(x={'x': train_data}, y=train_labels,\n batch_size=100, num_epochs=None, shuffle=True)\n", (6289, 6377), True, 'import tensorflow as tf\n'), ((6484, 6586), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': eval_data}", 'y': 'eval_labels', 'num_epochs': '(1)', 'shuffle': '(False)'}), "(x={'x': eval_data}, y=eval_labels,\n num_epochs=1, shuffle=False)\n", (6518, 6586), True, 'import tensorflow as tf\n'), ((6639, 6694), 'tensorflow.estimator.TrainSpec', 'tf.estimator.TrainSpec', (['train_input_fn'], {'max_steps': '(20000)'}), '(train_input_fn, max_steps=20000)\n', (6661, 6694), True, 'import tensorflow as tf\n'), ((6711, 6747), 'tensorflow.estimator.EvalSpec', 'tf.estimator.EvalSpec', (['eval_input_fn'], {}), '(eval_input_fn)\n', (6732, 6747), True, 'import tensorflow as tf\n'), ((6752, 6824), 'tensorflow.estimator.train_and_evaluate', 'tf.estimator.train_and_evaluate', (['mnist_classifier', 'train_spec', 'eval_spec'], {}), '(mnist_classifier, train_spec, eval_spec)\n', (6783, 6824), True, 'import tensorflow as tf\n'), ((3324, 3355), 'tensorflow.argmax', 'tf.argmax', ([], {'input': 'logits', 'axis': '(1)'}), '(input=logits, axis=1)\n', (3333, 3355), True, 'import tensorflow as tf\n'), ((3487, 3531), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""softmax_tensor"""'}), "(logits, name='softmax_tensor')\n", (3500, 3531), True, 'import tensorflow as tf\n'), ((3599, 3661), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions'}), '(mode=mode, predictions=predictions)\n', (3625, 3661), True, 'import tensorflow as tf\n'), ((3910, 3964), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (3943, 3964), True, 'import tensorflow as tf\n'), ((4094, 4161), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'train_op': 'train_op'}), '(mode=mode, loss=loss, train_op=train_op)\n', (4120, 4161), True, 'import tensorflow as tf\n'), ((4252, 4322), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'labels', 'predictions': "predictions['classes']"}), "(labels=labels, predictions=predictions['classes'])\n", (4271, 4322), True, 'import tensorflow as tf\n'), ((4498, 4538), 'os.path.join', 'os.path.join', (['base_dir', '"""train_data.npy"""'], {}), "(base_dir, 'train_data.npy')\n", (4510, 4538), False, 'import os\n'), ((4562, 4604), 'os.path.join', 'os.path.join', (['base_dir', '"""train_labels.npy"""'], {}), "(base_dir, 'train_labels.npy')\n", (4574, 4604), False, 'import os\n'), ((4691, 4730), 'os.path.join', 'os.path.join', (['base_dir', '"""eval_data.npy"""'], {}), "(base_dir, 'eval_data.npy')\n", (4703, 4730), False, 'import os\n'), ((4753, 4794), 'os.path.join', 'os.path.join', (['base_dir', '"""eval_labels.npy"""'], {}), "(base_dir, 'eval_labels.npy')\n", (4765, 4794), False, 'import os\n'), ((5544, 5583), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 784]'], {}), '(tf.float32, [None, 784])\n', (5558, 5583), True, 'import tensorflow as tf\n'), ((5148, 5178), 'os.environ.get', 'os.environ.get', (['"""SM_MODEL_DIR"""'], {}), "('SM_MODEL_DIR')\n", (5162, 5178), False, 'import os\n'), ((5233, 5270), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_TRAINING"""'], {}), "('SM_CHANNEL_TRAINING')\n", (5247, 5270), False, 'import os\n'), ((5426, 5459), 'os.environ.get', 'os.environ.get', (['"""SM_CURRENT_HOST"""'], {}), "('SM_CURRENT_HOST')\n", (5440, 5459), False, 'import os\n'), ((4051, 4077), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (4075, 4077), True, 'import tensorflow as tf\n'), ((5337, 5363), 'os.environ.get', 'os.environ.get', (['"""SM_HOSTS"""'], {}), "('SM_HOSTS')\n", (5351, 5363), False, 'import os\n')] |
loop-perception/AutowareArchitectureProposal.iv | common/util/autoware_debug_tools/scripts/stop_reason2pose.py | 5d8dff0db51634f0c42d2a3e87ca423fbee84348 | #! /usr/bin/env python3
# Copyright 2020 Tier IV, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import sys
from autoware_planning_msgs.msg import StopReasonArray
from case_converter import pascal2snake
from geometry_msgs.msg import PoseStamped
import numpy as np
import rclpy
from rclpy.node import Node
from rtree import index
from self_pose_listener import SelfPoseListener
class StopReason2PoseNode(Node):
def __init__(self, options):
super().__init__("stop_reason2pose_node")
self._options = options
self._sub_pose = self.create_subscription(
StopReasonArray, self._options.topic_name, self._on_stop_reasons, 1
)
self._pub_pose_map = {}
self._idx_map = {}
self._pose_map = {}
self._self_pose_listener = SelfPoseListener()
self.timer = self.create_timer((1.0 / 100), self._self_pose_listener.get_current_pose)
def _on_stop_reasons(self, msg):
for stop_reason in msg.stop_reasons:
snake_case_stop_reason = pascal2snake(stop_reason.reason)
if len(stop_reason.stop_factors) == 0:
self.get_logger().warn("stop_factor is null")
return
for stop_factor in stop_reason.stop_factors:
pose = PoseStamped()
pose.header = msg.header
pose.pose = stop_factor.stop_pose
# Get nearest pose
th_dist = 1.0
nearest_pose_id = self._get_nearest_pose_id(
snake_case_stop_reason, pose.pose, th_dist
)
if nearest_pose_id:
self._update_pose(snake_case_stop_reason, pose.pose, nearest_pose_id)
pose_id = nearest_pose_id
else:
pose_id = self._register_pose(snake_case_stop_reason, pose.pose)
pose_topic_name = "{snake_case_stop_reason}_{pose_id}".format(**locals())
topic_ns = "/autoware_debug_tools/stop_reason2pose/"
if pose_topic_name not in self._pub_pose_map:
self._pub_pose_map[pose_topic_name] = self.create_publisher(
PoseStamped, topic_ns + pose_topic_name, 1
)
self._pub_pose_map[pose_topic_name].publish(pose)
# Publish nearest stop_reason without number
nearest_pose = PoseStamped()
nearest_pose.header = msg.header
nearest_pose.pose = self._get_nearest_pose_in_array(
stop_reason, self._self_pose_listener.self_pose
)
if nearest_pose.pose:
if snake_case_stop_reason not in self._pub_pose_map:
topic_ns = "/autoware_debug_tools/stop_reason2pose/"
self._pub_pose_map[snake_case_stop_reason] = self.create_publisher(
PoseStamped, topic_ns + snake_case_stop_reason, 1
)
self._pub_pose_map[snake_case_stop_reason].publish(nearest_pose)
def _get_nearest_pose_in_array(self, stop_reason, self_pose):
poses = [stop_factor.stop_pose for stop_factor in stop_reason.stop_factors]
if not poses:
return None
distances = map(lambda p: StopReason2PoseNode.calc_distance2d(p, self_pose), poses)
nearest_idx = np.argmin(distances)
return poses[nearest_idx]
def _find_nearest_pose_id(self, name, pose):
if name not in self._idx_map:
self._idx_map[name] = index.Index()
return self._idx_map[name].nearest(StopReason2PoseNode.pose2boundingbox(pose), 1)
def _get_nearest_pose_id(self, name, pose, th_dist):
nearest_pose_ids = list(self._find_nearest_pose_id(name, pose))
if not nearest_pose_ids:
return None
nearest_pose_id = nearest_pose_ids[0]
nearest_pose = self._get_pose(name, nearest_pose_id)
if not nearest_pose:
return None
dist = StopReason2PoseNode.calc_distance2d(pose, nearest_pose)
if dist > th_dist:
return None
return nearest_pose_id
def _get_pose(self, name, pose_id):
if name not in self._pose_map:
return None
return self._pose_map[name][pose_id]
def _update_pose(self, name, pose, pose_id):
self._pose_map[name][id] = pose
self._idx_map[name].insert(pose_id, StopReason2PoseNode.pose2boundingbox(pose))
def _register_pose(self, name, pose):
if name not in self._pose_map:
self._pose_map[name] = {}
pose_id = len(self._pose_map[name]) + 1
self._pose_map[name][pose_id] = pose
self._idx_map[name].insert(pose_id, StopReason2PoseNode.pose2boundingbox(pose))
return pose_id
@staticmethod
def calc_distance2d(pose1, pose2):
p1 = pose1.position
p2 = pose2.position
return math.hypot(p1.x - p2.x, p1.y - p2.y)
@staticmethod
def pose2boundingbox(pose):
return [pose.position.x, pose.position.y, pose.position.x, pose.position.y]
def main(args):
rclpy.init()
parser = argparse.ArgumentParser()
parser.add_argument("topic_name", type=str)
ns = parser.parse_args(args)
stop_reason2pose_node = StopReason2PoseNode(ns)
rclpy.spin(stop_reason2pose_node)
stop_reason2pose_node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main(sys.argv[1:])
| [((5663, 5675), 'rclpy.init', 'rclpy.init', ([], {}), '()\n', (5673, 5675), False, 'import rclpy\n'), ((5690, 5715), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5713, 5715), False, 'import argparse\n'), ((5854, 5887), 'rclpy.spin', 'rclpy.spin', (['stop_reason2pose_node'], {}), '(stop_reason2pose_node)\n', (5864, 5887), False, 'import rclpy\n'), ((5933, 5949), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (5947, 5949), False, 'import rclpy\n'), ((1325, 1343), 'self_pose_listener.SelfPoseListener', 'SelfPoseListener', ([], {}), '()\n', (1341, 1343), False, 'from self_pose_listener import SelfPoseListener\n'), ((3901, 3921), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (3910, 3921), True, 'import numpy as np\n'), ((5469, 5505), 'math.hypot', 'math.hypot', (['(p1.x - p2.x)', '(p1.y - p2.y)'], {}), '(p1.x - p2.x, p1.y - p2.y)\n', (5479, 5505), False, 'import math\n'), ((1559, 1591), 'case_converter.pascal2snake', 'pascal2snake', (['stop_reason.reason'], {}), '(stop_reason.reason)\n', (1571, 1591), False, 'from case_converter import pascal2snake\n'), ((2945, 2958), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (2956, 2958), False, 'from geometry_msgs.msg import PoseStamped\n'), ((4079, 4092), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (4090, 4092), False, 'from rtree import index\n'), ((1810, 1823), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1821, 1823), False, 'from geometry_msgs.msg import PoseStamped\n')] |
tmaila/aiounittest | aiounittest/case.py | c43d3b619fd6a8fd071758996a5f42310b0293dc | import asyncio
import unittest
from .helpers import async_test
class AsyncTestCase(unittest.TestCase):
''' AsyncTestCase allows to test asynchoronus function.
The usage is the same as :code:`unittest.TestCase`. It works with other test frameworks
and runners (eg. `pytest`, `nose`) as well.
AsyncTestCase can run:
- test of synchronous code (:code:`unittest.TestCase`)
- test of asynchronous code, supports syntax with
:code:`async`/:code:`await` (Python 3.5+) and
:code:`asyncio.coroutine`/:code:`yield from` (Python 3.4)
Code to test:
.. code-block:: python
import asyncio
async def async_add(x, y, delay=0.1):
await asyncio.sleep(delay)
return x + y
async def async_one():
await async_nested_exc()
async def async_nested_exc():
await asyncio.sleep(0.1)
raise Exception('Test')
Tests:
.. code-block:: python
import aiounittest
class MyTest(aiounittest.AsyncTestCase):
async def test_await_async_add(self):
ret = await async_add(1, 5)
self.assertEqual(ret, 6)
async def test_await_async_fail(self):
with self.assertRaises(Exception) as e:
await async_one()
'''
def get_event_loop(self):
''' Method provides an event loop for the test
It is called before each test, by default :code:`aiounittest.AsyncTestCase` creates the brand new event
loop everytime. After completion, the loop is closed and then recreated, set as default,
leaving asyncio clean.
.. note::
In the most common cases you don't have to bother about this method, the default implementation is a receommended one.
But if, for some reasons, you want to provide your own event loop just override it. Note that :code:`AsyncTestCase` won't close such a loop.
.. code-block:: python
class MyTest(aiounittest.AsyncTestCase):
def get_event_loop(self):
self.my_loop = asyncio.get_event_loop()
return self.my_loop
'''
return None
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if name.startswith('test_') and asyncio.iscoroutinefunction(attr):
return async_test(attr, loop=self.get_event_loop())
else:
return attr
| [((2441, 2474), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['attr'], {}), '(attr)\n', (2468, 2474), False, 'import asyncio\n')] |
Code-Master1234/Turtle_Flags_File_Hub | US Flag.py | d99f8bc05c4f2280f8c91cdda14005ef9c5d6236 | import turtle as t
def rectangle(horizontal, vertical, color):
t.pendown()
t.pensize(1)
t.color(color)
t.begin_fill()
for counter in range(2):
t.forward(horizontal)
t.right(90)
t.forward(vertical)
t.right(90)
t.end_fill()
t.penup()
def star(length, points, color):
sumangle = ((points*2)-2) * 180
oneangle = sumangle/points
smallangle = oneangle/3.5
bigangle = oneangle - smallangle
t.color(color)
t.pendown()
t.begin_fill()
t.penup()
for counter in range(points):
t.forward(length)
t.left(smallangle)
t.forward(length)
t.left(bigangle)
t.end_fill()
t.penup()
gotoy = 222
t.speed(0)
t.setup(988,520)
t.goto(494,260)
t.pendown()
for counter in range(7):
t.setheading(-90)
rectangle(40,988,'#B22234')
t.setheading(-90)
t.forward(80)
t.penup()
t.setheading(0)
t.goto(-494,260)
t.pendown()
rectangle(494,280,'#3C3B6E')
t.goto(-474,245)
for counter in range(4):
for counter in range(6):
star(9,5,'white')
t.setheading(0)
t.forward(84)
t.penup()
t.goto(-434,gotoy)
gotoy = gotoy - 28
t.pendown()
for counter in range(5):
star(9,5,'white')
t.setheading(0)
t.forward(84)
t.goto(-476,gotoy)
gotoy = gotoy - 28
for counter in range(6):
star(9,5,'white')
t.setheading(0)
t.forward(84)
t.penup()
t.hideturtle()
| [((716, 725), 'turtle.penup', 't.penup', ([], {}), '()\n', (723, 725), True, 'import turtle as t\n'), ((744, 754), 'turtle.speed', 't.speed', (['(0)'], {}), '(0)\n', (751, 754), True, 'import turtle as t\n'), ((756, 773), 'turtle.setup', 't.setup', (['(988)', '(520)'], {}), '(988, 520)\n', (763, 773), True, 'import turtle as t\n'), ((774, 790), 'turtle.goto', 't.goto', (['(494)', '(260)'], {}), '(494, 260)\n', (780, 790), True, 'import turtle as t\n'), ((791, 802), 'turtle.pendown', 't.pendown', ([], {}), '()\n', (800, 802), True, 'import turtle as t\n'), ((932, 941), 'turtle.penup', 't.penup', ([], {}), '()\n', (939, 941), True, 'import turtle as t\n'), ((945, 960), 'turtle.setheading', 't.setheading', (['(0)'], {}), '(0)\n', (957, 960), True, 'import turtle as t\n'), ((962, 979), 'turtle.goto', 't.goto', (['(-494)', '(260)'], {}), '(-494, 260)\n', (968, 979), True, 'import turtle as t\n'), ((980, 991), 'turtle.pendown', 't.pendown', ([], {}), '()\n', (989, 991), True, 'import turtle as t\n'), ((1025, 1042), 'turtle.goto', 't.goto', (['(-474)', '(245)'], {}), '(-474, 245)\n', (1031, 1042), True, 'import turtle as t\n'), ((1535, 1549), 'turtle.hideturtle', 't.hideturtle', ([], {}), '()\n', (1547, 1549), True, 'import turtle as t\n'), ((71, 82), 'turtle.pendown', 't.pendown', ([], {}), '()\n', (80, 82), True, 'import turtle as t\n'), ((88, 100), 'turtle.pensize', 't.pensize', (['(1)'], {}), '(1)\n', (97, 100), True, 'import turtle as t\n'), ((106, 120), 'turtle.color', 't.color', (['color'], {}), '(color)\n', (113, 120), True, 'import turtle as t\n'), ((126, 140), 'turtle.begin_fill', 't.begin_fill', ([], {}), '()\n', (138, 140), True, 'import turtle as t\n'), ((278, 290), 'turtle.end_fill', 't.end_fill', ([], {}), '()\n', (288, 290), True, 'import turtle as t\n'), ((296, 305), 'turtle.penup', 't.penup', ([], {}), '()\n', (303, 305), True, 'import turtle as t\n'), ((485, 499), 'turtle.color', 't.color', (['color'], {}), '(color)\n', (492, 499), True, 'import turtle as t\n'), ((505, 516), 'turtle.pendown', 't.pendown', ([], {}), '()\n', (514, 516), True, 'import turtle as t\n'), ((522, 536), 'turtle.begin_fill', 't.begin_fill', ([], {}), '()\n', (534, 536), True, 'import turtle as t\n'), ((542, 551), 'turtle.penup', 't.penup', ([], {}), '()\n', (549, 551), True, 'import turtle as t\n'), ((700, 712), 'turtle.end_fill', 't.end_fill', ([], {}), '()\n', (710, 712), True, 'import turtle as t\n'), ((836, 853), 'turtle.setheading', 't.setheading', (['(-90)'], {}), '(-90)\n', (848, 853), True, 'import turtle as t\n'), ((892, 909), 'turtle.setheading', 't.setheading', (['(-90)'], {}), '(-90)\n', (904, 909), True, 'import turtle as t\n'), ((915, 928), 'turtle.forward', 't.forward', (['(80)'], {}), '(80)\n', (924, 928), True, 'import turtle as t\n'), ((1197, 1216), 'turtle.goto', 't.goto', (['(-434)', 'gotoy'], {}), '(-434, gotoy)\n', (1203, 1216), True, 'import turtle as t\n'), ((1245, 1256), 'turtle.pendown', 't.pendown', ([], {}), '()\n', (1254, 1256), True, 'import turtle as t\n'), ((1367, 1386), 'turtle.goto', 't.goto', (['(-476)', 'gotoy'], {}), '(-476, gotoy)\n', (1373, 1386), True, 'import turtle as t\n'), ((1474, 1489), 'turtle.setheading', 't.setheading', (['(0)'], {}), '(0)\n', (1486, 1489), True, 'import turtle as t\n'), ((1499, 1512), 'turtle.forward', 't.forward', (['(84)'], {}), '(84)\n', (1508, 1512), True, 'import turtle as t\n'), ((1522, 1531), 'turtle.penup', 't.penup', ([], {}), '()\n', (1529, 1531), True, 'import turtle as t\n'), ((180, 201), 'turtle.forward', 't.forward', (['horizontal'], {}), '(horizontal)\n', (189, 201), True, 'import turtle as t\n'), ((211, 222), 'turtle.right', 't.right', (['(90)'], {}), '(90)\n', (218, 222), True, 'import turtle as t\n'), ((232, 251), 'turtle.forward', 't.forward', (['vertical'], {}), '(vertical)\n', (241, 251), True, 'import turtle as t\n'), ((261, 272), 'turtle.right', 't.right', (['(90)'], {}), '(90)\n', (268, 272), True, 'import turtle as t\n'), ((596, 613), 'turtle.forward', 't.forward', (['length'], {}), '(length)\n', (605, 613), True, 'import turtle as t\n'), ((623, 641), 'turtle.left', 't.left', (['smallangle'], {}), '(smallangle)\n', (629, 641), True, 'import turtle as t\n'), ((651, 668), 'turtle.forward', 't.forward', (['length'], {}), '(length)\n', (660, 668), True, 'import turtle as t\n'), ((678, 694), 'turtle.left', 't.left', (['bigangle'], {}), '(bigangle)\n', (684, 694), True, 'import turtle as t\n'), ((1134, 1149), 'turtle.setheading', 't.setheading', (['(0)'], {}), '(0)\n', (1146, 1149), True, 'import turtle as t\n'), ((1159, 1172), 'turtle.forward', 't.forward', (['(84)'], {}), '(84)\n', (1168, 1172), True, 'import turtle as t\n'), ((1182, 1191), 'turtle.penup', 't.penup', ([], {}), '()\n', (1189, 1191), True, 'import turtle as t\n'), ((1323, 1338), 'turtle.setheading', 't.setheading', (['(0)'], {}), '(0)\n', (1335, 1338), True, 'import turtle as t\n'), ((1348, 1361), 'turtle.forward', 't.forward', (['(84)'], {}), '(84)\n', (1357, 1361), True, 'import turtle as t\n')] |
bryanlimy/technical-interview | linked-list/delete_zero_sum_nodes.py | f888a4fb2bc4d34dda6cd74b6e4215f46d5ce6d6 | # Given a linked list, remove consecutive nodes that sums up to zero
# https://www.careercup.com/question?id=5717797377146880
from util import *
def remove_zero_sum(head):
start = head
new = None
root = None
while start:
end = start.next
total = start.value
zero = False
while end:
total += end.value
if total == 0:
zero = True
start = end
break
end = end.next
if not zero and not new:
new = Node(start.value)
root = new
elif not zero and new:
new.next = Node(start.value)
start = start.next
return root
if __name__ == "__main__":
s1 = [6, -6, 8, 4, -12, 9, 8, -8]
s2 = [4, 6 - 10, 8, 9, 10, -19, 10, -18, 20, 25]
s3 = [2, 3, -5, 10, 10, -5, -5, 20, 5, -5]
samples = [s1,s2,s3]
for sample in samples:
head = create_linked_list(sample)
print(linked_list_to_list(head))
result = remove_zero_sum(head)
print(linked_list_to_list(result))
print("\n")
| [] |
psignoret/azure-cli | src/azure-cli/azure/cli/command_modules/maps/custom.py | 1a4a043750315f9a7f2894b4287126089978b615 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.log import get_logger
from knack.prompting import prompt_y_n
from knack.util import CLIError
from azure.mgmt.maps.models import (
MapsAccountCreateParameters,
Sku)
ACCOUNT_LOCATION = 'global'
logger = get_logger(__name__)
def create_account(client, resource_group_name, account_name, sku_name='S0', tags=None, force=None):
terms = 'By creating an Azure Maps account, you agree that you have read and agree to the ' \
'\nLicense (https://azure.microsoft.com/support/legal/) and ' \
'\nPrivacy Statement (https://privacy.microsoft.com/privacystatement).'
hint = 'Please select.'
client_denied_terms = 'You must agree to the License and Privacy Statement to create an account.'
# Show ToS message to the user
logger.warning(terms)
# Prompt yes/no for the user, if --force parameter is not passed in.
if not force:
option = prompt_y_n(hint)
if not option:
raise CLIError(client_denied_terms)
# Submit query
sku = Sku(name=sku_name)
maps_account_create_params = MapsAccountCreateParameters(location=ACCOUNT_LOCATION, sku=sku, tags=tags)
return client.create_or_update(resource_group_name, account_name, maps_account_create_params)
def list_accounts(client, resource_group_name=None):
# Retrieve accounts via subscription
if resource_group_name is None:
return client.list_by_subscription()
# Retrieve accounts via resource group
return client.list_by_resource_group(resource_group_name)
def generic_update_account(instance, sku_name=None, tags=None):
# Pre-populate with old instance
maps_account_create_params = MapsAccountCreateParameters(location=ACCOUNT_LOCATION, sku=instance.sku,
tags=instance.tags)
# Update fields with new parameter values
if sku_name:
maps_account_create_params.sku.name = sku_name
if tags:
maps_account_create_params.tags = tags
return maps_account_create_params
| [((569, 589), 'knack.log.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (579, 589), False, 'from knack.log import get_logger\n'), ((1370, 1388), 'azure.mgmt.maps.models.Sku', 'Sku', ([], {'name': 'sku_name'}), '(name=sku_name)\n', (1373, 1388), False, 'from azure.mgmt.maps.models import MapsAccountCreateParameters, Sku\n'), ((1422, 1496), 'azure.mgmt.maps.models.MapsAccountCreateParameters', 'MapsAccountCreateParameters', ([], {'location': 'ACCOUNT_LOCATION', 'sku': 'sku', 'tags': 'tags'}), '(location=ACCOUNT_LOCATION, sku=sku, tags=tags)\n', (1449, 1496), False, 'from azure.mgmt.maps.models import MapsAccountCreateParameters, Sku\n'), ((2013, 2109), 'azure.mgmt.maps.models.MapsAccountCreateParameters', 'MapsAccountCreateParameters', ([], {'location': 'ACCOUNT_LOCATION', 'sku': 'instance.sku', 'tags': 'instance.tags'}), '(location=ACCOUNT_LOCATION, sku=instance.sku,\n tags=instance.tags)\n', (2040, 2109), False, 'from azure.mgmt.maps.models import MapsAccountCreateParameters, Sku\n'), ((1252, 1268), 'knack.prompting.prompt_y_n', 'prompt_y_n', (['hint'], {}), '(hint)\n', (1262, 1268), False, 'from knack.prompting import prompt_y_n\n'), ((1310, 1339), 'knack.util.CLIError', 'CLIError', (['client_denied_terms'], {}), '(client_denied_terms)\n', (1318, 1339), False, 'from knack.util import CLIError\n')] |
Leavingseason/wsdm2022-seqrecsys | examples/wsdm2022/run_seqreco_B.py | 4659edb93a96300d7a52bb0e1b9c912e3fae2a76 | import sys
import os
from tempfile import TemporaryDirectory
import numpy as np
import tensorflow.compat.v1 as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from recommenders.utils.timer import Timer
from recommenders.utils.constants import SEED
from recommenders.models.deeprec.deeprec_utils import (
prepare_hparams
)
from recommenders.datasets.amazon_reviews import download_and_extract, data_preprocessing, _create_vocab
from recommenders.datasets.download_utils import maybe_download
from recommenders.models.deeprec.models.sequential.sli_rec import SLI_RECModel as SeqModel
# from recommenders.models.deeprec.models.sequential.asvd import A2SVDModel as SeqModel
# from recommenders.models.deeprec.models.sequential.caser import CaserModel as SeqModel
# from recommenders.models.deeprec.models.sequential.gru4rec import GRU4RecModel as SeqModel
# from recommenders.models.deeprec.models.sequential.sum import SUMModel as SeqModel
#from recommenders.models.deeprec.models.sequential.nextitnet import NextItNetModel
from recommenders.models.deeprec.io.sequential_iterator import SequentialIterator
#from recommenders.models.deeprec.io.nextitnet_iterator import NextItNetIterator
print("System version: {}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
yaml_file = '/home/jialia/wsdm/src/recommenders/examples/wsdm2022/sli_rec_B.yaml'
RANDOM_SEED = SEED # Set None for non-deterministic result
# data_path = os.path.join("tests", "resources", "deeprec", "slirec")
# data_path = '/home/jialia/wsdm/seq_datasets/B_full_feature_v2'
data_path = sys.argv[1]
print(os.path.abspath(data_path)) ## the path where I enter the cmd
# for test
train_file = os.path.join(data_path, r'train_instances.txt')
valid_file = os.path.join(data_path, r'valid_instances.txt')
test_file = os.path.join(data_path, r'valid.tsv')
pred_file = os.path.join(data_path, r'inter_test.tsv')
final_pred_file = os.path.join(data_path, r'final_test.tsv')
user_vocab = os.path.join(data_path, r'user_vocab.pkl')
item_vocab = os.path.join(data_path, r'item_vocab.pkl')
cate_vocab = os.path.join(data_path, r'category_vocab.pkl')
output_file = os.path.join(data_path, r'inter_test_output.txt')
submit_file = os.path.join(data_path, r'final_test_output.txt')
train_num_ngs = 9 # number of negative instances with a positive instance for training
valid_num_ngs = 9 # number of negative instances with a positive instance for validation
test_num_ngs = 9 # number of negative instances with a positive instance for testing
_create_vocab(
[train_file, valid_file],
user_vocab, item_vocab, cate_vocab
)
### NOTE:
### remember to use `_create_vocab(train_file, user_vocab, item_vocab, cate_vocab)` to generate the user_vocab, item_vocab and cate_vocab files, if you are using your own dataset rather than using our demo Amazon dataset.
hparams = prepare_hparams(yaml_file,
# user_dropout=False,
embed_l2=0.,
layer_l2=0.,
enable_BN=True, ##-- True
learning_rate=0.001, # set to 0.01 if batch normalization is disable else 0.001
epochs=100000,
EARLY_STOP=40000,
batch_size=400,
show_step=5000,
MODEL_DIR=os.path.join(data_path, "model/"),
SUMMARIES_DIR=os.path.join(data_path, "summary/"),
user_vocab=user_vocab,
item_vocab=item_vocab,
cate_vocab=cate_vocab,
need_sample=False,
train_num_ngs=train_num_ngs, # provides the number of negative instances for each positive instance for loss computation.
loss='log_loss', #'log_loss', 'softmax'
max_seq_length=50,
cont_feat_len=85,
use_cont_feat=False,
init_item_emb=False,
shuffle=True
)
print(hparams.values)
input_creator = SequentialIterator
model = SeqModel(hparams, input_creator, seed=RANDOM_SEED)
# model.load_model(os.path.join(data_path, "model_20220118_20k_0.8923", 'step_20000'))
with Timer() as train_time:
model = model.fit(train_file, valid_file, valid_num_ngs=9, eval_metric='auc')
print('Time cost for training is {0:.2f} mins'.format(train_time.interval/60.0))
### model = model.fit(test_file, test_file, valid_num_ngs=9, eval_metric='auc') ##-- quick test
model.load_model(os.path.join(data_path, "model", 'best_model'))
res_syn = model.run_eval(test_file, num_ngs=9)
print(res_syn)
model.predict(pred_file, output_file)
model.predict(final_pred_file, submit_file)
# print('Job finished. B, continue training = 20k, seq=50')
# print('Job finished. B_v2, epoch=50k, seq=100')
## ASVD: 0.867497
## GRU: 0.877529
## SLi-Rec: 0.892736
## B_v4: 0.8937
print("Job:B_full_feature_v2, with BN, no cont feat, seq=50, shuffle=True")
## B_full_feature_v2 no cont_feat, with BN
##5k: 0.8778
##10k: 0.8827
##20k: 0.8848
##25k: 0.8824
##35k: 0.8878
##40k: 0.8903
##45k: 0.8876
##50k: 0.8925
##55k: 0.8903
##60k: 0.8894
##65k: 0.8904
##70k: 0.8814
##75k: 0.8896
##80k: 0.8871
##85k: 0.8920
## with shuffle:
##5k: 0.8793
##10k: 0.8884
##15k: 0.8898
##20k: 0.8923
##25k: 0.8908
##30k: 0.8895
##35k: 0.8888
##40k: 0.8913
##45k: 0.8909
##50k: 0.8876
##65k: 0.8881 | [((1717, 1763), 'os.path.join', 'os.path.join', (['data_path', '"""train_instances.txt"""'], {}), "(data_path, 'train_instances.txt')\n", (1729, 1763), False, 'import os\n'), ((1778, 1824), 'os.path.join', 'os.path.join', (['data_path', '"""valid_instances.txt"""'], {}), "(data_path, 'valid_instances.txt')\n", (1790, 1824), False, 'import os\n'), ((1838, 1874), 'os.path.join', 'os.path.join', (['data_path', '"""valid.tsv"""'], {}), "(data_path, 'valid.tsv')\n", (1850, 1874), False, 'import os\n'), ((1888, 1929), 'os.path.join', 'os.path.join', (['data_path', '"""inter_test.tsv"""'], {}), "(data_path, 'inter_test.tsv')\n", (1900, 1929), False, 'import os\n'), ((1949, 1990), 'os.path.join', 'os.path.join', (['data_path', '"""final_test.tsv"""'], {}), "(data_path, 'final_test.tsv')\n", (1961, 1990), False, 'import os\n'), ((2005, 2046), 'os.path.join', 'os.path.join', (['data_path', '"""user_vocab.pkl"""'], {}), "(data_path, 'user_vocab.pkl')\n", (2017, 2046), False, 'import os\n'), ((2061, 2102), 'os.path.join', 'os.path.join', (['data_path', '"""item_vocab.pkl"""'], {}), "(data_path, 'item_vocab.pkl')\n", (2073, 2102), False, 'import os\n'), ((2117, 2162), 'os.path.join', 'os.path.join', (['data_path', '"""category_vocab.pkl"""'], {}), "(data_path, 'category_vocab.pkl')\n", (2129, 2162), False, 'import os\n'), ((2178, 2226), 'os.path.join', 'os.path.join', (['data_path', '"""inter_test_output.txt"""'], {}), "(data_path, 'inter_test_output.txt')\n", (2190, 2226), False, 'import os\n'), ((2242, 2290), 'os.path.join', 'os.path.join', (['data_path', '"""final_test_output.txt"""'], {}), "(data_path, 'final_test_output.txt')\n", (2254, 2290), False, 'import os\n'), ((2557, 2632), 'recommenders.datasets.amazon_reviews._create_vocab', '_create_vocab', (['[train_file, valid_file]', 'user_vocab', 'item_vocab', 'cate_vocab'], {}), '([train_file, valid_file], user_vocab, item_vocab, cate_vocab)\n', (2570, 2632), False, 'from recommenders.datasets.amazon_reviews import download_and_extract, data_preprocessing, _create_vocab\n'), ((4234, 4284), 'recommenders.models.deeprec.models.sequential.sli_rec.SLI_RECModel', 'SeqModel', (['hparams', 'input_creator'], {'seed': 'RANDOM_SEED'}), '(hparams, input_creator, seed=RANDOM_SEED)\n', (4242, 4284), True, 'from recommenders.models.deeprec.models.sequential.sli_rec import SLI_RECModel as SeqModel\n'), ((1629, 1655), 'os.path.abspath', 'os.path.abspath', (['data_path'], {}), '(data_path)\n', (1644, 1655), False, 'import os\n'), ((4379, 4386), 'recommenders.utils.timer.Timer', 'Timer', ([], {}), '()\n', (4384, 4386), False, 'from recommenders.utils.timer import Timer\n'), ((4680, 4726), 'os.path.join', 'os.path.join', (['data_path', '"""model"""', '"""best_model"""'], {}), "(data_path, 'model', 'best_model')\n", (4692, 4726), False, 'import os\n'), ((115, 130), 'tensorflow.compat.v1.get_logger', 'tf.get_logger', ([], {}), '()\n', (128, 130), True, 'import tensorflow.compat.v1 as tf\n'), ((3411, 3444), 'os.path.join', 'os.path.join', (['data_path', '"""model/"""'], {}), "(data_path, 'model/')\n", (3423, 3444), False, 'import os\n'), ((3486, 3521), 'os.path.join', 'os.path.join', (['data_path', '"""summary/"""'], {}), "(data_path, 'summary/')\n", (3498, 3521), False, 'import os\n')] |
fgrie/ctypesgen | ctypesgen/ctypedescs.py | bc1627648a1479cefd1a2c3c261dd0471358cfff | #!/usr/bin/env python
"""
ctypesgen.ctypedescs contains classes to represent a C type. All of them
classes are subclasses of CtypesType.
Unlike in previous versions of ctypesgen, CtypesType and its subclasses are
completely independent of the parser module.
The most important method of CtypesType and its subclasses is the py_string
method. str(ctype) returns a string which, when evaluated in the wrapper
at runtime, results in a ctypes type object.
For example, a CtypesType
representing an array of four integers could be created using:
>>> ctype = CtypesArray(CtypesSimple("int",True,0),4)
str(ctype) would evaluate to "c_int * 4".
"""
import warnings
__docformat__ = "restructuredtext"
ctypes_type_map = {
# typename signed longs
("void", True, 0): "None",
("int", True, 0): "c_int",
("int", False, 0): "c_uint",
("int", True, 1): "c_long",
("int", False, 1): "c_ulong",
("char", True, 0): "c_char",
("char", False, 0): "c_ubyte",
("short", True, 0): "c_short",
("short", False, 0): "c_ushort",
("float", True, 0): "c_float",
("double", True, 0): "c_double",
("double", True, 1): "c_longdouble",
("int8_t", True, 0): "c_int8",
("__int8", True, 0): "c_int8",
("int16_t", True, 0): "c_int16",
("__int16", True, 0): "c_int16",
("int32_t", True, 0): "c_int32",
("__int32", True, 0): "c_int32",
("int64_t", True, 0): "c_int64",
("__int64", True, 0): "c_int64",
("uint8_t", True, 0): "c_uint8",
("uint16_t", True, 0): "c_uint16",
("uint32_t", True, 0): "c_uint32",
("uint64_t", True, 0): "c_uint64",
("_Bool", True, 0): "c_bool",
}
ctypes_type_map_python_builtin = {
("int", True, 2): "c_longlong",
("int", False, 2): "c_ulonglong",
("size_t", True, 0): "c_size_t",
("apr_int64_t", True, 0): "c_int64",
("off64_t", True, 0): "c_int64",
("apr_uint64_t", True, 0): "c_uint64",
("wchar_t", True, 0): "c_wchar",
("ptrdiff_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble
("ssize_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble
("va_list", True, 0): "c_void_p",
}
# This protocol is used for walking type trees.
class CtypesTypeVisitor(object):
def visit_struct(self, struct):
pass
def visit_enum(self, enum):
pass
def visit_typedef(self, name):
pass
def visit_error(self, error, cls):
pass
def visit_identifier(self, identifier):
# This one comes from inside ExpressionNodes. There may be
# ExpressionNode objects in array count expressions.
pass
def visit_type_and_collect_info(ctype):
class Visitor(CtypesTypeVisitor):
def visit_struct(self, struct):
structs.append(struct)
def visit_enum(self, enum):
enums.append(enum)
def visit_typedef(self, typedef):
typedefs.append(typedef)
def visit_error(self, error, cls):
errors.append((error, cls))
def visit_identifier(self, identifier):
identifiers.append(identifier)
structs = []
enums = []
typedefs = []
errors = []
identifiers = []
v = Visitor()
ctype.visit(v)
return structs, enums, typedefs, errors, identifiers
# Remove one level of indirection from funtion pointer; needed for typedefs
# and function parameters.
def remove_function_pointer(t):
if type(t) == CtypesPointer and type(t.destination) == CtypesFunction:
return t.destination
elif type(t) == CtypesPointer:
t.destination = remove_function_pointer(t.destination)
return t
else:
return t
class CtypesType(object):
def __init__(self):
super(CtypesType, self).__init__()
self.errors = []
def __repr__(self):
return '<Ctype (%s) "%s">' % (type(self).__name__, self.py_string())
def error(self, message, cls=None):
self.errors.append((message, cls))
def visit(self, visitor):
for error, cls in self.errors:
visitor.visit_error(error, cls)
class CtypesSimple(CtypesType):
"""Represents a builtin type, like "char" or "int"."""
def __init__(self, name, signed, longs):
super(CtypesSimple, self).__init__()
self.name = name
self.signed = signed
self.longs = longs
def py_string(self, ignore_can_be_ctype=None):
return ctypes_type_map[(self.name, self.signed, self.longs)]
class CtypesSpecial(CtypesType):
def __init__(self, name):
super(CtypesSpecial, self).__init__()
self.name = name
def py_string(self, ignore_can_be_ctype=None):
return self.name
class CtypesTypedef(CtypesType):
"""Represents a type defined by a typedef."""
def __init__(self, name):
super(CtypesTypedef, self).__init__()
self.name = name
def visit(self, visitor):
if not self.errors:
visitor.visit_typedef(self.name)
super(CtypesTypedef, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return self.name
class CtypesBitfield(CtypesType):
def __init__(self, base, bitfield):
super(CtypesBitfield, self).__init__()
self.base = base
self.bitfield = bitfield
def visit(self, visitor):
self.base.visit(visitor)
super(CtypesBitfield, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return self.base.py_string()
class CtypesPointer(CtypesType):
def __init__(self, destination, qualifiers):
super(CtypesPointer, self).__init__()
self.destination = destination
self.qualifiers = qualifiers
def visit(self, visitor):
if self.destination:
self.destination.visit(visitor)
super(CtypesPointer, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "POINTER(%s)" % self.destination.py_string()
class CtypesArray(CtypesType):
def __init__(self, base, count):
super(CtypesArray, self).__init__()
self.base = base
self.count = count
def visit(self, visitor):
self.base.visit(visitor)
if self.count:
self.count.visit(visitor)
super(CtypesArray, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
if self.count is None:
return "POINTER(%s)" % self.base.py_string()
if type(self.base) == CtypesArray:
return "(%s) * int(%s)" % (self.base.py_string(), self.count.py_string(False))
else:
return "%s * int(%s)" % (self.base.py_string(), self.count.py_string(False))
class CtypesNoErrorCheck(object):
def py_string(self, ignore_can_be_ctype=None):
return "None"
def __bool__(self):
return False
__nonzero__ = __bool__
class CtypesPointerCast(object):
def __init__(self, target):
self.target = target
def py_string(self, ignore_can_be_ctype=None):
return "lambda v,*a : cast(v, {})".format(self.target.py_string())
class CtypesFunction(CtypesType):
def __init__(self, restype, parameters, variadic, attrib=dict()):
super(CtypesFunction, self).__init__()
self.restype = restype
self.errcheck = CtypesNoErrorCheck()
# Don't allow POINTER(None) (c_void_p) as a restype... causes errors
# when ctypes automagically returns it as an int.
# Instead, convert to POINTER(c_void). c_void is not a ctypes type,
# you can make it any arbitrary type.
if (
type(self.restype) == CtypesPointer
and type(self.restype.destination) == CtypesSimple
and self.restype.destination.name == "void"
):
# we will provide a means of converting this to a c_void_p
self.restype = CtypesPointer(CtypesSpecial("c_ubyte"), ())
self.errcheck = CtypesPointerCast(CtypesSpecial("c_void_p"))
# Return "String" instead of "POINTER(c_char)"
if self.restype.py_string() == "POINTER(c_char)":
if "const" in self.restype.qualifiers:
self.restype = CtypesSpecial("c_char_p")
else:
self.restype = CtypesSpecial("String")
self.argtypes = [remove_function_pointer(p) for p in parameters]
self.variadic = variadic
self.attrib = attrib
def visit(self, visitor):
self.restype.visit(visitor)
for a in self.argtypes:
a.visit(visitor)
super(CtypesFunction, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "CFUNCTYPE(UNCHECKED(%s), %s)" % (
self.restype.py_string(),
", ".join([a.py_string() for a in self.argtypes]),
)
last_tagnum = 0
def anonymous_struct_tagnum():
global last_tagnum
last_tagnum += 1
return last_tagnum
def fmt_anonymous_struct_tag(num):
return "anon_%d" % num
def anonymous_struct_tag():
return fmt_anonymous_struct_tag(anonymous_struct_tagnum())
class CtypesStruct(CtypesType):
def __init__(self, tag, attrib, variety, members, src=None):
super(CtypesStruct, self).__init__()
self.tag = tag
self.attrib = attrib
self.variety = variety # "struct" or "union"
self.members = members
if type(self.tag) == int or not self.tag:
if type(self.tag) == int:
self.tag = fmt_anonymous_struct_tag(self.tag)
else:
self.tag = anonymous_struct_tag()
self.anonymous = True
else:
self.anonymous = False
if self.members == None:
self.opaque = True
else:
self.opaque = False
self.src = src
def get_required_types(self):
types = super(CtypesStruct, self).get_required_types()
types.add((self.variety, self.tag))
return types
def visit(self, visitor):
visitor.visit_struct(self)
if not self.opaque:
for name, ctype in self.members:
ctype.visit(visitor)
super(CtypesStruct, self).visit(visitor)
def get_subtypes(self):
if self.opaque:
return set()
else:
return set([m[1] for m in self.members])
def py_string(self, ignore_can_be_ctype=None):
return "%s_%s" % (self.variety, self.tag)
last_tagnum = 0
def anonymous_enum_tag():
global last_tagnum
last_tagnum += 1
return "anon_%d" % last_tagnum
class CtypesEnum(CtypesType):
def __init__(self, tag, enumerators, src=None):
super(CtypesEnum, self).__init__()
self.tag = tag
self.enumerators = enumerators
if not self.tag:
self.tag = anonymous_enum_tag()
self.anonymous = True
else:
self.anonymous = False
if self.enumerators == None:
self.opaque = True
else:
self.opaque = False
self.src = src
def visit(self, visitor):
visitor.visit_enum(self)
super(CtypesEnum, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "enum_%s" % self.tag
| [] |
Krai53n/pytouch | pytouch/elements.py | 8a1c69c4ba5981f3cb0bf00db3bcef5dd15e8375 | from random import randint
import pyxel
from constants import Screen
import cursors
class Text:
def __init__(self, text):
self._text = text
self._symbol_len = 3
self._padding_len = 1
def _count_text_len(self):
return (
self._symbol_len + self._padding_len
) * len(self._text) - self._padding_len
def _x_text_center_position(self):
return (Screen.width - self._count_text_len()) // 2
def draw(self):
pyxel.text(self._x_text_center_position(), 0, self._text, 2)
class Score:
def __init__(self, padding_right=2, padding_top=2):
self._padding_right = padding_right
self._padding_top = padding_top
self.score = 0
def increase(self):
self.score += 1
def reduce(self):
self.score -= 1
def draw(self):
pyxel.text(self._padding_right, self._padding_top,
f"Score: {self.score}", (Screen.bg - 2) % 16)
class Circle:
def __init__(self):
self._r = 0
self._col = (Screen.bg - 1) % 16
def zero(self):
self._r = 0
def increase(self, size=1):
self._r += size
@property
def r(self):
return self._r
@r.setter
def r(self, r):
self._r = r
@property
def col(self):
return self._col
@col.setter
def col(self, color):
self._col = color
def draw(self, x, y):
pyxel.circ(x, y, self._r, self._col)
class ReachCircle(Circle):
def __init__(self):
super().__init__()
self.min_r = 10
self.respawn()
@property
def x(self):
return self._x
@property
def y(self):
return self._y
def respawn(self):
self._x = randint(self._r, Screen.width - self._r)
self._y = randint(self._r, Screen.height - self._r)
self._r = randint(self.min_r, min(Screen.width, Screen.height) // 2) - 4
def draw(self):
pyxel.circb(self._x, self._y, self._r, self._col)
| [((858, 959), 'pyxel.text', 'pyxel.text', (['self._padding_right', 'self._padding_top', 'f"""Score: {self.score}"""', '((Screen.bg - 2) % 16)'], {}), "(self._padding_right, self._padding_top, f'Score: {self.score}', \n (Screen.bg - 2) % 16)\n", (868, 959), False, 'import pyxel\n'), ((1449, 1485), 'pyxel.circ', 'pyxel.circ', (['x', 'y', 'self._r', 'self._col'], {}), '(x, y, self._r, self._col)\n', (1459, 1485), False, 'import pyxel\n'), ((1765, 1805), 'random.randint', 'randint', (['self._r', '(Screen.width - self._r)'], {}), '(self._r, Screen.width - self._r)\n', (1772, 1805), False, 'from random import randint\n'), ((1824, 1865), 'random.randint', 'randint', (['self._r', '(Screen.height - self._r)'], {}), '(self._r, Screen.height - self._r)\n', (1831, 1865), False, 'from random import randint\n'), ((1976, 2025), 'pyxel.circb', 'pyxel.circb', (['self._x', 'self._y', 'self._r', 'self._col'], {}), '(self._x, self._y, self._r, self._col)\n', (1987, 2025), False, 'import pyxel\n')] |
cleve/varidb | app/volume/admin_process.py | fc1b10aa4d708cee1c83909f10773948cee0c539 | from pulzarutils.utils import Utils
from pulzarutils.utils import Constants
from pulzarutils.messenger import Messenger
from pulzarcore.core_db import DB
class AdminProcess:
"""Handle admin operations from manage
"""
def __init__(self, logger):
self.TAG = self.__class__.__name__
self.logger = logger
self.utils = Utils()
self.messenger = Messenger()
self.mark_of_local_verification = b'varidb_execute_file_verification'
def process_request(self, url_path):
"""Get request type, checking for key value.
"""
regex_result = self.utils.get_search_regex(
url_path, Constants.RE_ADMIN)
if regex_result:
try:
call_path_list = regex_result.groups()[0].split('/')
call_path_list = [x for x in call_path_list if x != '']
# All nodes
if len(call_path_list) == 1 and call_path_list[0] == 'start_backup':
db_backup = DB(Constants.DB_BACKUP)
db_backup.update_or_insert_value(
self.mark_of_local_verification, b'1')
self.messenger.code_type = Constants.BACKUP_SCHEDULED
self.messenger.set_message = 'backup scheduled'
except Exception as err:
self.logger.exception('{}:{}'.format(self.TAG, err))
self.messenger.code_type = Constants.PULZAR_ERROR
self.messenger.set_message = str(err)
self.messenger.mark_as_failed()
else:
self.messenger.code_type = Constants.USER_ERROR
self.messenger.set_message = 'wrong request'
self.messenger.mark_as_failed()
return self.messenger
| [((353, 360), 'pulzarutils.utils.Utils', 'Utils', ([], {}), '()\n', (358, 360), False, 'from pulzarutils.utils import Utils\n'), ((386, 397), 'pulzarutils.messenger.Messenger', 'Messenger', ([], {}), '()\n', (395, 397), False, 'from pulzarutils.messenger import Messenger\n'), ((1005, 1028), 'pulzarcore.core_db.DB', 'DB', (['Constants.DB_BACKUP'], {}), '(Constants.DB_BACKUP)\n', (1007, 1028), False, 'from pulzarcore.core_db import DB\n')] |
fduthilleul/scap-security-guide | tests/ssg_test_suite/profile.py | f9b67869600f6c20dcb0ba83801578cec1a51bba | #!/usr/bin/env python2
from __future__ import print_function
import atexit
import logging
import sys
import ssg_test_suite.oscap
import ssg_test_suite.virt
from ssg_test_suite.rule import get_viable_profiles
from ssg_test_suite.virt import SnapshotStack
logging.getLogger(__name__).addHandler(logging.NullHandler())
def perform_profile_check(options):
"""Perform profile check.
Iterate over profiles in datastream and perform scanning of unaltered VM
using every profile according to input. Also perform remediation run.
Return value not defined, textual output and generated reports is the
result.
"""
dom = ssg_test_suite.virt.connect_domain(options.hypervisor,
options.domain_name)
if dom is None:
sys.exit(1)
snapshot_stack = SnapshotStack(dom)
atexit.register(snapshot_stack.clear)
snapshot_stack.create('origin')
ssg_test_suite.virt.start_domain(dom)
domain_ip = ssg_test_suite.virt.determine_ip(dom)
has_worked = False
profiles = get_viable_profiles(options.target,
options.datastream,
options.benchmark_id)
if len(profiles) > 1:
snapshot_stack.create('profile')
for profile in profiles:
logging.info("Evaluation of profile {0}.".format(profile))
has_worked = True
runner = options.remediate_using
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'initial',
options.datastream,
options.benchmark_id,
runner=runner)
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'remediation',
options.datastream,
options.benchmark_id,
runner=runner)
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'final',
options.datastream,
options.benchmark_id,
runner=runner)
snapshot_stack.revert(delete=False)
if not has_worked:
logging.error("Nothing has been tested!")
snapshot_stack.delete()
# depending on number of profiles we have either "origin" snapshot
# still to be reverted (multiple profiles) or we are reverted
# completely (only one profile was run)
| [((296, 317), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (315, 317), False, 'import logging\n'), ((826, 844), 'ssg_test_suite.virt.SnapshotStack', 'SnapshotStack', (['dom'], {}), '(dom)\n', (839, 844), False, 'from ssg_test_suite.virt import SnapshotStack\n'), ((849, 886), 'atexit.register', 'atexit.register', (['snapshot_stack.clear'], {}), '(snapshot_stack.clear)\n', (864, 886), False, 'import atexit\n'), ((1059, 1136), 'ssg_test_suite.rule.get_viable_profiles', 'get_viable_profiles', (['options.target', 'options.datastream', 'options.benchmark_id'], {}), '(options.target, options.datastream, options.benchmark_id)\n', (1078, 1136), False, 'from ssg_test_suite.rule import get_viable_profiles\n'), ((257, 284), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (274, 284), False, 'import logging\n'), ((793, 804), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (801, 804), False, 'import sys\n'), ((2516, 2557), 'logging.error', 'logging.error', (['"""Nothing has been tested!"""'], {}), "('Nothing has been tested!')\n", (2529, 2557), False, 'import logging\n')] |
solidaritreebiz/Solidaritree | lib/wtforms/ext/appengine/fields.py | 15cc2e10e4cec56eb4fe218166d4157fcce9bf8d | import decimal
import operator
import warnings
from wtforms import fields, widgets
class ReferencePropertyField(fields.SelectFieldBase):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
label_attr=None, get_label=None, allow_blank=False,
blank_text=u'', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
if label_attr is not None:
warnings.warn('label_attr= will be removed in WTForms 1.1, use get_label= instead.', DeprecationWarning)
self.get_label = operator.attrgetter(label_attr)
elif get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, basestring):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.all()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key())
label = self.get_label(obj)
yield (key, label, self.data and ( self.data.key( ) == obj.key() ) )
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.query:
if str(self.data.key()) == str(obj.key()):
break
else:
raise ValueError(self.gettext(u'Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return self.data and unicode("\n".join(self.data)) or u''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = valuelist[0].splitlines()
except ValueError:
raise ValueError(self.gettext(u'Not a valid list'))
class GeoPtPropertyField(fields.TextField):
def process_formdata(self, valuelist):
if valuelist:
try:
lat, lon = valuelist[0].split(',')
self.data = u'%s,%s' % (decimal.Decimal(lat.strip()), decimal.Decimal(lon.strip()),)
except (decimal.InvalidOperation, ValueError):
raise ValueError(u'Not a valid coordinate location')
| [((1039, 1055), 'wtforms.widgets.Select', 'widgets.Select', ([], {}), '()\n', (1053, 1055), False, 'from wtforms import fields, widgets\n'), ((1426, 1539), 'warnings.warn', 'warnings.warn', (['"""label_attr= will be removed in WTForms 1.1, use get_label= instead."""', 'DeprecationWarning'], {}), "(\n 'label_attr= will be removed in WTForms 1.1, use get_label= instead.',\n DeprecationWarning)\n", (1439, 1539), False, 'import warnings\n'), ((1560, 1591), 'operator.attrgetter', 'operator.attrgetter', (['label_attr'], {}), '(label_attr)\n', (1579, 1591), False, 'import operator\n'), ((1742, 1772), 'operator.attrgetter', 'operator.attrgetter', (['get_label'], {}), '(get_label)\n', (1761, 1772), False, 'import operator\n')] |
trevor-wieland/MTrainAI | mtrainsimulator.py | 47bab3bf3af9e5426a822a7d14586f1798674cd7 | import mtrain
import numpy as np
import pandas as pd
import random
def simulate_games(num_players=4, domino_size=12, num_games=250, collect_data=True,
debug=False, players=["Random", "Greedy", "Probability", "Neural"],
file_name="PlayData/data4_12_250"):
"""
Runs the mexican train game repeatedly with different combinations of players to
generate data to be used in testing and training the neural net.
If collect_data is on, the play data is retrieved and stored into a .xlsx file for later use
The format for the file name for this is as follows:
PlayData/data + num_players + _ + domino_size + _ + num_games + .xlsx
This spreadsheet is to be used when training the neural net.
This script has no required parameters, and will run the game with the default params if
unchanged.
If collect_data is on, the players are selected randomly each game from:
["Random", "Greedy", "Probability"]
If collect_data is off, the players are selected in order from the parameter players.
When collect_data is off: len(players) must equal num_players
Returns a tuple of lists: (score_averages, win_percentage) corresponding to the players
"""
#Sets column names for building dataframe later on
column_names = ["round_number", "turn_number", "player_number", "play",
"t_num", "hand", "unknown", "potential_plays", "points"]
#Depending on mode of use, sets players and checks validity of player values
modes = []
if collect_data:
modes = ["Random", "Greedy", "Probability"]
else:
if not len(players) == num_players:
raise RuntimeError("len(players) must equal num_players when collect_data is off")
modes = players
#Simulates num_games of games
scores = np.ndarray((num_players, num_games))
wins = np.ndarray((num_players, num_games))
full_data = pd.DataFrame(columns=column_names)
current_index = 0
for game_num in range(0, num_games):
#Randomize players if in collect_data mode
game_modes = []
if collect_data:
for select in range(0, num_players):
game_modes.append(random.choice(modes))
else:
game_modes = modes
#Run game with parameters
results = mtrain.mexicantrain(num_players, domino_size, debug=debug,
modes=game_modes,
data_collection=collect_data,
data_index=current_index, file_name=file_name)
#If collecting data, data is stored into the dataframe
if collect_data:
current_index = results[2].index[-1] + 1
full_data = pd.concat([full_data, results[2]])
#Scores and wins are recorded into their respective arrays
for player_num in range(0, num_players):
scores[player_num, game_num] = results[0][player_num]
if results[1] == player_num:
wins[player_num, game_num] = 1
else:
wins[player_num, game_num] = 0
#Calculates performance of the players
score_averages = np.ndarray((num_players))
win_percentage = np.ndarray((num_players))
for player_num in range(0, num_players):
score_averages[player_num] = np.mean(scores[player_num, :])
win_percentage[player_num] = np.mean(wins[player_num, :])
#If collecting data, prints data to a .xlsx file
if collect_data:
filename = "PlayData/data" + str(num_players) + "_" + str(domino_size) + "_" + str(num_games) + ".xlsx"
writer = pd.ExcelWriter(filename)
full_data.to_excel(writer, "Sheet1")
writer.save()
#Prints results and returns them as well
if debug: print(score_averages)
if debug: print(win_percentage)
return score_averages, win_percentage | [((1844, 1880), 'numpy.ndarray', 'np.ndarray', (['(num_players, num_games)'], {}), '((num_players, num_games))\n', (1854, 1880), True, 'import numpy as np\n'), ((1892, 1928), 'numpy.ndarray', 'np.ndarray', (['(num_players, num_games)'], {}), '((num_players, num_games))\n', (1902, 1928), True, 'import numpy as np\n'), ((1945, 1979), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (1957, 1979), True, 'import pandas as pd\n'), ((3240, 3263), 'numpy.ndarray', 'np.ndarray', (['num_players'], {}), '(num_players)\n', (3250, 3263), True, 'import numpy as np\n'), ((3287, 3310), 'numpy.ndarray', 'np.ndarray', (['num_players'], {}), '(num_players)\n', (3297, 3310), True, 'import numpy as np\n'), ((2355, 2517), 'mtrain.mexicantrain', 'mtrain.mexicantrain', (['num_players', 'domino_size'], {'debug': 'debug', 'modes': 'game_modes', 'data_collection': 'collect_data', 'data_index': 'current_index', 'file_name': 'file_name'}), '(num_players, domino_size, debug=debug, modes=game_modes,\n data_collection=collect_data, data_index=current_index, file_name=file_name\n )\n', (2374, 2517), False, 'import mtrain\n'), ((3395, 3427), 'numpy.mean', 'np.mean', (['scores[(player_num), :]'], {}), '(scores[(player_num), :])\n', (3402, 3427), True, 'import numpy as np\n'), ((3463, 3493), 'numpy.mean', 'np.mean', (['wins[(player_num), :]'], {}), '(wins[(player_num), :])\n', (3470, 3493), True, 'import numpy as np\n'), ((3696, 3720), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['filename'], {}), '(filename)\n', (3710, 3720), True, 'import pandas as pd\n'), ((2796, 2830), 'pandas.concat', 'pd.concat', (['[full_data, results[2]]'], {}), '([full_data, results[2]])\n', (2805, 2830), True, 'import pandas as pd\n'), ((2227, 2247), 'random.choice', 'random.choice', (['modes'], {}), '(modes)\n', (2240, 2247), False, 'import random\n')] |
RGBCube/dml | dml/errors.py | f551821545a062e15aea1f2c2444e6016748ea34 | __all__ = ("DottedMarkupLanguageException", "DecodeError")
class DottedMarkupLanguageException(Exception):
"""Base class for all exceptions in this module."""
pass
class DecodeError(DottedMarkupLanguageException):
"""Raised when there is an error decoding a string."""
pass
| [] |
VaranRohila/apn | licenseplates/dataset.py | dbb5b814233accbbb49b9bfe12b7162402e3b267 | ##############################################################################
#
# Below code is inspired on
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/pascal_voc.py
# --------------------------------------------------------
# Detectron2
# Licensed under the Apache 2.0 license.
# --------------------------------------------------------
from fvcore.common.file_io import PathManager
import os
import numpy as np
import xml.etree.ElementTree as ET
from detectron2.structures import BoxMode
from detectron2.data import DatasetCatalog, MetadataCatalog
__all__ = ["register_licenseplates_voc"]
CLASS_NAMES = [
"license_plate",
]
def load_voc_instances(dirname: str, split: str):
"""
Load licenseplates VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "annotations", "images"
split (str): one of "train", "test"
"""
with PathManager.open(os.path.join(dirname, split + ".txt")) as f:
fileids = np.loadtxt(f, dtype=np.str)
dicts = []
for fileid in fileids:
anno_file = os.path.join(dirname, "annotations", fileid + ".xml")
jpeg_file = os.path.join(dirname, "images", fileid + ".jpg")
tree = ET.parse(anno_file)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
instances.append(
{"category_id": CLASS_NAMES.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
return dicts
def register_licenseplates_voc(name, dirname, split):
DatasetCatalog.register(name,
lambda: load_voc_instances(dirname, split))
MetadataCatalog.get(name).set(thing_classes=CLASS_NAMES,
dirname=dirname,
split=split)
if __name__ == "__main__":
import random
import cv2
from detectron2.utils.visualizer import Visualizer
import argparse
# Parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("--split", default="train")
ap.add_argument("--samples", type=int, default=10)
ap.add_argument("--scale", type=float, default=1.0)
args = ap.parse_args()
dataset_name = f"licenseplates_{args.split}"
register_licenseplates_voc(dataset_name, "datasets/licenseplates", args.split)
dataset_dicts = DatasetCatalog.get(dataset_name)
for d in random.sample(dataset_dicts, args.samples):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1],
metadata=MetadataCatalog.get(dataset_name),
scale=args.scale)
vis = visualizer.draw_dataset_dict(d)
cv2.imshow(dataset_name, vis.get_image()[:, :, ::-1])
# Exit? Press ESC
if cv2.waitKey(0) & 0xFF == 27:
break
cv2.destroyAllWindows()
| [((2443, 2468), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2466, 2468), False, 'import argparse\n'), ((2808, 2840), 'detectron2.data.DatasetCatalog.get', 'DatasetCatalog.get', (['dataset_name'], {}), '(dataset_name)\n', (2826, 2840), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((2854, 2896), 'random.sample', 'random.sample', (['dataset_dicts', 'args.samples'], {}), '(dataset_dicts, args.samples)\n', (2867, 2896), False, 'import random\n'), ((3312, 3335), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3333, 3335), False, 'import cv2\n'), ((1011, 1038), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'dtype': 'np.str'}), '(f, dtype=np.str)\n', (1021, 1038), True, 'import numpy as np\n'), ((1102, 1155), 'os.path.join', 'os.path.join', (['dirname', '"""annotations"""', "(fileid + '.xml')"], {}), "(dirname, 'annotations', fileid + '.xml')\n", (1114, 1155), False, 'import os\n'), ((1176, 1224), 'os.path.join', 'os.path.join', (['dirname', '"""images"""', "(fileid + '.jpg')"], {}), "(dirname, 'images', fileid + '.jpg')\n", (1188, 1224), False, 'import os\n'), ((1241, 1260), 'xml.etree.ElementTree.parse', 'ET.parse', (['anno_file'], {}), '(anno_file)\n', (1249, 1260), True, 'import xml.etree.ElementTree as ET\n'), ((2912, 2938), 'cv2.imread', 'cv2.imread', (["d['file_name']"], {}), "(d['file_name'])\n", (2922, 2938), False, 'import cv2\n'), ((948, 985), 'os.path.join', 'os.path.join', (['dirname', "(split + '.txt')"], {}), "(dirname, split + '.txt')\n", (960, 985), False, 'import os\n'), ((2106, 2131), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['name'], {}), '(name)\n', (2125, 2131), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((3029, 3062), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['dataset_name'], {}), '(dataset_name)\n', (3048, 3062), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((3260, 3274), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3271, 3274), False, 'import cv2\n')] |
RogerChern/DALI | docs/examples/pytorch/resnet50/scripts/test_read_speed.py | be143c3bb35458549e273608f1683a99ae41968e | import glob
import time
import random
filelist = glob.glob('/mnt/lustre/chenyuntao1/datasets/imagenet/train/*/*')
random.shuffle(filelist)
begin = time.time()
for i, f in enumerate(filelist):
if i == 10000:
break
with open(f, "rb") as fin:
result = fin.read()
end = time.time()
print("%.1f images/s" % (10000 / (end - begin))) | [((51, 115), 'glob.glob', 'glob.glob', (['"""/mnt/lustre/chenyuntao1/datasets/imagenet/train/*/*"""'], {}), "('/mnt/lustre/chenyuntao1/datasets/imagenet/train/*/*')\n", (60, 115), False, 'import glob\n'), ((116, 140), 'random.shuffle', 'random.shuffle', (['filelist'], {}), '(filelist)\n', (130, 140), False, 'import random\n'), ((150, 161), 'time.time', 'time.time', ([], {}), '()\n', (159, 161), False, 'import time\n'), ((293, 304), 'time.time', 'time.time', ([], {}), '()\n', (302, 304), False, 'import time\n')] |
TormodLandet/Ocellaris | ocellaris/solver_parts/boundary_conditions/dirichlet.py | 6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58 | # Copyright (C) 2015-2019 Tormod Landet
# SPDX-License-Identifier: Apache-2.0
import dolfin
from . import register_boundary_condition, BoundaryConditionCreator
from ocellaris.utils import (
CodedExpression,
OcellarisCppExpression,
OcellarisError,
verify_field_variable_definition,
)
class OcellarisDirichletBC(dolfin.DirichletBC):
def __init__(
self, simulation, V, value, subdomain_marker, subdomain_id, updater=None
):
"""
A simple storage class for Dirichlet boundary conditions
"""
super().__init__(
V, value, subdomain_marker, subdomain_id, method='geometric'
)
self.simulation = simulation
self._value = value
self.subdomain_marker = subdomain_marker
self.subdomain_id = subdomain_id
self._updater = updater
def func(self):
"""
The boundary value derivative function
"""
return self._value
def ds(self):
"""
Returns the ds measure of the subdomain
"""
return self.simulation.data['ds'](self.subdomain_id)
def copy_and_change_function_space(self, V):
"""
Return a copy with a new function space. Used when converting from
BCs for a segregated solver (default) to BCs for a coupled solver
"""
return OcellarisDirichletBC(
self.simulation, V, self._value, self.subdomain_marker, self.subdomain_id
)
def update(self):
"""
Update the time and other parameters used in the BC.
This is used every timestep and for all RK substeps
"""
if self._updater:
self._updater(
self.simulation.timestep, self.simulation.time, self.simulation.dt
)
def __repr__(self):
return '<OcellarisDirichletBC on subdomain %d>' % self.subdomain_id
@register_boundary_condition('ConstantValue')
class ConstantDirichletBoundary(BoundaryConditionCreator):
description = 'A prescribed constant value Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with constant value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
value = inp_dict.get_value('value', required_type='any')
if isinstance(value, list):
assert len(value) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
self.register_dirichlet_condition(
name, value[d], subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(var_name, value, subdomains, subdomain_id)
def register_dirichlet_condition(self, var_name, value, subdomains, subdomain_id):
"""
Add a Dirichlet condition to this variable
"""
if not isinstance(value, (float, int)):
raise OcellarisError(
'Error in ConstantValue BC for %s' % var_name,
'The value %r is not a number' % value,
)
df_value = dolfin.Constant(value)
# Store the boundary condition for use in the solver
bc = OcellarisDirichletBC(
self.simulation, self.func_space, df_value, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Constant value %r for %s' % (value, var_name))
@register_boundary_condition('CodedValue')
class CodedDirichletBoundary(BoundaryConditionCreator):
description = 'A coded Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with coded value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Make a dolfin Expression object that runs the code string
code = inp_dict.get_value('code', required_type='any')
if isinstance(code, list):
assert len(code) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
description = 'coded value boundary condition for %s' % name
sub_code = inp_dict.get_value('code/%d' % d, required_type='string')
expr = CodedExpression(simulation, sub_code, description)
self.register_dirichlet_condition(name, expr, subdomains, subdomain_id)
else:
description = 'coded value boundary condition for %s' % var_name
expr = CodedExpression(simulation, code, description)
self.register_dirichlet_condition(var_name, expr, subdomains, subdomain_id)
def register_dirichlet_condition(self, var_name, expr, subdomains, subdomain_id):
"""
Store the boundary condition for use in the solver
"""
bc = OcellarisDirichletBC(
self.simulation, self.func_space, expr, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Coded value for %s' % var_name)
@register_boundary_condition('CppCodedValue')
class CppCodedDirichletBoundary(BoundaryConditionCreator):
description = 'A C++ coded Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with C++ coded value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Make a dolfin Expression object that runs the code string
code = inp_dict.get_value('cpp_code', required_type='any')
if isinstance(code, list):
assert len(code) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
sub_code = inp_dict.get_value('cpp_code/%d' % d, required_type='string')
self.register_dirichlet_condition(
name, sub_code, subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(var_name, code, subdomains, subdomain_id)
def register_dirichlet_condition(
self, var_name, cpp_code, subdomains, subdomain_id
):
"""
Store the boundary condition for use in the solver
"""
description = 'boundary condititon for %s' % var_name
P = self.func_space.ufl_element().degree()
expr, updater = OcellarisCppExpression(
self.simulation, cpp_code, description, P, return_updater=True
)
bc = OcellarisDirichletBC(
self.simulation,
self.func_space,
expr,
subdomains,
subdomain_id,
updater=updater,
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' C++ coded value for %s' % var_name)
@register_boundary_condition('FieldFunction')
class FieldFunctionDirichletBoundary(BoundaryConditionCreator):
description = 'A Dirichlet condition with values from a field function'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet boundary condition with value from a field function
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Get the field function expression object
vardef = inp_dict.get_value('function', required_type='any')
description = 'boundary condititon for %s' % var_name
if isinstance(vardef, list):
assert len(vardef) == simulation.ndim
exprs = [
verify_field_variable_definition(simulation, vd, description)
for vd in vardef
]
else:
expr = verify_field_variable_definition(simulation, vardef, description)
if expr.ufl_shape != ():
assert expr.ufl_shape == (
simulation.ndim,
), 'Expected shape %r got %r' % ((simulation.ndim,), expr.ufl_shape)
exprs = [expr[d] for d in range(simulation.ndim)]
else:
exprs = [expr]
# Register BCs
if len(exprs) > 1:
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
self.register_dirichlet_condition(
name, exprs[d], subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(
var_name, exprs[0], subdomains, subdomain_id
)
def register_dirichlet_condition(self, var_name, expr, subdomains, subdomain_id):
"""
Store the boundary condition for use in the solver
"""
assert expr.ufl_shape == ()
bc = OcellarisDirichletBC(
self.simulation, self.func_space, expr, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Field function value for %s' % var_name)
@register_boundary_condition('FieldVelocityValve')
class FieldVelocityValveDirichletBoundary(BoundaryConditionCreator):
description = 'A Dirichlet condition that compensates for non-zero total flux of a known velocity field'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet boundary condition with value from a field function
"""
self.simulation = simulation
# A var_name like "u0" should be given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
# Get the field function expression object
vardef = inp_dict.get_value('function', required_type='any')
description = 'boundary condititon for %s' % var_name
self.velocity = verify_field_variable_definition(
simulation, vardef, description
)
field = simulation.fields[vardef.split('/')[0]]
# The expression value is updated as the field is changed
inp_dict.get_value('function', required_type='any')
field.register_dependent_field(self)
self.flux = dolfin.Constant(1.0)
# Create the
bc = OcellarisDirichletBC(
self.simulation, self.func_space, self.flux, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Field velocity valve for %s' % var_name)
# Compute the region area, then update the flux
mesh = simulation.data['mesh']
self.area = dolfin.assemble(self.flux * bc.ds()(domain=mesh))
self.region_names = inp_dict.get_value('regions', required_type='list(string)')
self.update()
def update(self, timestep_number=None, t=None, dt=None):
"""
The main field has changed, update our flux to make the total sum to zero
"""
regions = self.simulation.data['boundary']
mesh = self.simulation.data['mesh']
n = dolfin.FacetNormal(mesh)
flux = 0
count = 0
for region in regions:
if region.name in self.region_names:
f = dolfin.dot(self.velocity, n) * region.ds()
flux += dolfin.assemble(f)
count += 1
assert count == len(self.region_names)
# FIXME: assumes n is pointing outwards along the axis in the positive
# direction in this boundary region
self.flux.assign(dolfin.Constant(-flux / self.area))
| [((3425, 3447), 'dolfin.Constant', 'dolfin.Constant', (['value'], {}), '(value)\n', (3440, 3447), False, 'import dolfin\n'), ((7405, 7495), 'ocellaris.utils.OcellarisCppExpression', 'OcellarisCppExpression', (['self.simulation', 'cpp_code', 'description', 'P'], {'return_updater': '(True)'}), '(self.simulation, cpp_code, description, P,\n return_updater=True)\n', (7427, 7495), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((11117, 11182), 'ocellaris.utils.verify_field_variable_definition', 'verify_field_variable_definition', (['simulation', 'vardef', 'description'], {}), '(simulation, vardef, description)\n', (11149, 11182), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((11453, 11473), 'dolfin.Constant', 'dolfin.Constant', (['(1.0)'], {}), '(1.0)\n', (11468, 11473), False, 'import dolfin\n'), ((12353, 12377), 'dolfin.FacetNormal', 'dolfin.FacetNormal', (['mesh'], {}), '(mesh)\n', (12371, 12377), False, 'import dolfin\n'), ((3257, 3363), 'ocellaris.utils.OcellarisError', 'OcellarisError', (["('Error in ConstantValue BC for %s' % var_name)", "('The value %r is not a number' % value)"], {}), "('Error in ConstantValue BC for %s' % var_name, \n 'The value %r is not a number' % value)\n", (3271, 3363), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((5200, 5246), 'ocellaris.utils.CodedExpression', 'CodedExpression', (['simulation', 'code', 'description'], {}), '(simulation, code, description)\n', (5215, 5246), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((9040, 9105), 'ocellaris.utils.verify_field_variable_definition', 'verify_field_variable_definition', (['simulation', 'vardef', 'description'], {}), '(simulation, vardef, description)\n', (9072, 9105), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((12828, 12862), 'dolfin.Constant', 'dolfin.Constant', (['(-flux / self.area)'], {}), '(-flux / self.area)\n', (12843, 12862), False, 'import dolfin\n'), ((4951, 5001), 'ocellaris.utils.CodedExpression', 'CodedExpression', (['simulation', 'sub_code', 'description'], {}), '(simulation, sub_code, description)\n', (4966, 5001), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((8898, 8959), 'ocellaris.utils.verify_field_variable_definition', 'verify_field_variable_definition', (['simulation', 'vd', 'description'], {}), '(simulation, vd, description)\n', (8930, 8959), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((12580, 12598), 'dolfin.assemble', 'dolfin.assemble', (['f'], {}), '(f)\n', (12595, 12598), False, 'import dolfin\n'), ((12513, 12541), 'dolfin.dot', 'dolfin.dot', (['self.velocity', 'n'], {}), '(self.velocity, n)\n', (12523, 12541), False, 'import dolfin\n')] |
abaldwin/algorithms | count_split_inversions/test_count_split_inversions.py | 8c8722394c9115c572dadcd8ab601885512fd494 | import unittest
from count_split_inversions import count_inversions
class TestCountSplitInversions(unittest.TestCase):
def test_count_inversions(self):
input = [1, 3, 5, 2, 4, 6]
result = count_inversions(input)
self.assertEqual(result, 3)
if __name__ == '__main__':
unittest.main()
| [((303, 318), 'unittest.main', 'unittest.main', ([], {}), '()\n', (316, 318), False, 'import unittest\n'), ((211, 234), 'count_split_inversions.count_inversions', 'count_inversions', (['input'], {}), '(input)\n', (227, 234), False, 'from count_split_inversions import count_inversions\n')] |
Forest216/BigDL | python/chronos/test/bigdl/chronos/forecaster/tf/test_seq2seq_keras_forecaster.py | 840da9a2eaf395978dd83730b02aa5e5dfbd7989 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import tempfile
import os
from unittest import TestCase
import numpy as np
import tensorflow as tf
def create_data(tf_data=False, batch_size=32):
train_num_samples = 1000
test_num_samples = 400
input_feature_num = 10
output_feature_num = 2
past_seq_len = 10
future_seq_len = 2
def get_x_y(num_sample):
x = np.random.randn(num_sample, past_seq_len, input_feature_num)
y = np.random.randn(num_sample, future_seq_len, output_feature_num)
return x, y
train_data = get_x_y(train_num_samples)
test_data = get_x_y(test_num_samples)
if tf_data:
from_tensor_slices = tf.data.Dataset.from_tensor_slices
train_data = from_tensor_slices(train_data).cache()\
.shuffle(train_num_samples)\
.batch(batch_size)\
.prefetch(tf.data.AUTOTUNE)
test_data = from_tensor_slices(test_data).cache()\
.batch(batch_size)\
.prefetch(tf.data.AUTOTUNE)
return train_data, test_data
@pytest.mark.skipif(tf.__version__ < '2.0.0', reason="Run only when tf > 2.0.0.")
class TestSeq2SeqForecaster(TestCase):
def setUp(self):
from bigdl.chronos.forecaster.tf.seq2seq_forecaster import Seq2SeqForecaster
self.forecaster = Seq2SeqForecaster(past_seq_len=10,
future_seq_len=2,
input_feature_num=10,
output_feature_num=2)
def tearDown(self):
pass
def test_seq2seq_fit_predict_evaluate(self):
train_data, test_data = create_data()
self.forecaster.fit(train_data,
epochs=2,
batch_size=32)
yhat = self.forecaster.predict(test_data[0])
assert yhat.shape == (400, 2, 2)
mse = self.forecaster.evaluate(test_data, multioutput="raw_values")
assert mse[0].shape == test_data[-1].shape[1:]
def test_seq2seq_fit_tf_data(self):
train_data, test_data = create_data(tf_data=True)
self.forecaster.fit(train_data,
epochs=2)
yhat = self.forecaster.predict(test_data)
assert yhat.shape == (400, 2, 2)
def test_seq2seq_save_load(self):
train_data, test_data = create_data()
self.forecaster.fit(train_data,
epochs=2,
batch_size=32)
yhat = self.forecaster.predict(test_data[0])
with tempfile.TemporaryDirectory() as tmp_dir_file:
tmp_dir_file = os.path.join(tmp_dir_file, 'seq2seq.ckpt')
self.forecaster.save(tmp_dir_file)
self.forecaster.load(tmp_dir_file)
from bigdl.chronos.model.tf2.Seq2Seq_keras import LSTMSeq2Seq
assert isinstance(self.forecaster.internal, LSTMSeq2Seq)
load_model_yhat = self.forecaster.predict(test_data[0])
assert yhat.shape == (400, 2, 2)
np.testing.assert_almost_equal(yhat, load_model_yhat, decimal=5)
if __name__ == '__main__':
pytest.main([__file__])
| [((1807, 1892), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(tf.__version__ < '2.0.0')"], {'reason': '"""Run only when tf > 2.0.0."""'}), "(tf.__version__ < '2.0.0', reason='Run only when tf > 2.0.0.'\n )\n", (1825, 1892), False, 'import pytest\n'), ((3882, 3905), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (3893, 3905), False, 'import pytest\n'), ((950, 1010), 'numpy.random.randn', 'np.random.randn', (['num_sample', 'past_seq_len', 'input_feature_num'], {}), '(num_sample, past_seq_len, input_feature_num)\n', (965, 1010), True, 'import numpy as np\n'), ((1023, 1086), 'numpy.random.randn', 'np.random.randn', (['num_sample', 'future_seq_len', 'output_feature_num'], {}), '(num_sample, future_seq_len, output_feature_num)\n', (1038, 1086), True, 'import numpy as np\n'), ((2064, 2164), 'bigdl.chronos.forecaster.tf.seq2seq_forecaster.Seq2SeqForecaster', 'Seq2SeqForecaster', ([], {'past_seq_len': '(10)', 'future_seq_len': '(2)', 'input_feature_num': '(10)', 'output_feature_num': '(2)'}), '(past_seq_len=10, future_seq_len=2, input_feature_num=10,\n output_feature_num=2)\n', (2081, 2164), False, 'from bigdl.chronos.forecaster.tf.seq2seq_forecaster import Seq2SeqForecaster\n'), ((3784, 3848), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['yhat', 'load_model_yhat'], {'decimal': '(5)'}), '(yhat, load_model_yhat, decimal=5)\n', (3814, 3848), True, 'import numpy as np\n'), ((3317, 3346), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (3344, 3346), False, 'import tempfile\n'), ((3391, 3433), 'os.path.join', 'os.path.join', (['tmp_dir_file', '"""seq2seq.ckpt"""'], {}), "(tmp_dir_file, 'seq2seq.ckpt')\n", (3403, 3433), False, 'import os\n')] |
nicolaikd/sl-ksp | examples/SubOrbitalFlight.py | cc1e239570e10428d11a41a26b33947b54f7f0ec | import time
import krpc
conn = krpc.connect(name='Sub-orbital flight')
vessel = conn.space_center.active_vessel
vessel.auto_pilot.target_pitch_and_heading(90, 90)
vessel.auto_pilot.engage()
vessel.control.throttle = 1
time.sleep(1)
print('Launch!')
vessel.control.activate_next_stage()
fuel_amount = conn.get_call(vessel.resources.amount, 'SolidFuel')
expr = conn.krpc.Expression.less_than(
conn.krpc.Expression.call(fuel_amount),
conn.krpc.Expression.constant_float(0.1))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Booster separation')
vessel.control.activate_next_stage()
mean_altitude = conn.get_call(getattr, vessel.flight(), 'mean_altitude')
expr = conn.krpc.Expression.greater_than(
conn.krpc.Expression.call(mean_altitude),
conn.krpc.Expression.constant_double(10000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Gravity turn')
vessel.auto_pilot.target_pitch_and_heading(60, 90)
apoapsis_altitude = conn.get_call(getattr, vessel.orbit, 'apoapsis_altitude')
expr = conn.krpc.Expression.greater_than(
conn.krpc.Expression.call(apoapsis_altitude),
conn.krpc.Expression.constant_double(100000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Launch stage separation')
vessel.control.throttle = 0
time.sleep(1)
vessel.control.activate_next_stage()
vessel.auto_pilot.disengage()
srf_altitude = conn.get_call(getattr, vessel.flight(), 'surface_altitude')
expr = conn.krpc.Expression.less_than(
conn.krpc.Expression.call(srf_altitude),
conn.krpc.Expression.constant_double(1000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
vessel.control.activate_next_stage()
while vessel.flight(vessel.orbit.body.reference_frame).vertical_speed < -0.1:
print('Altitude = %.1f meters' % vessel.flight().surface_altitude)
time.sleep(1)
print('Landed!')
| [((31, 70), 'krpc.connect', 'krpc.connect', ([], {'name': '"""Sub-orbital flight"""'}), "(name='Sub-orbital flight')\n", (43, 70), False, 'import krpc\n'), ((220, 233), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (230, 233), False, 'import time\n'), ((1337, 1350), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1347, 1350), False, 'import time\n'), ((1891, 1904), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1901, 1904), False, 'import time\n')] |
davide-butera/data-analysis-with-python | part02/part02-e11_rows_and_columns/src/rows_and_columns.py | 78ba3d3d060ddb305bfd84b9a122409c15c47006 | #!/usr/bin/env python3
import numpy as np
def get_rows(a):
return list(a)
def get_columns(a):
return list(a.T)
def main():
np.random.seed(0)
a=np.random.randint(0,10, (4,4))
print("a:", a)
print("Rows:", get_rows(a))
print("Columns:", get_columns(a))
if __name__ == "__main__":
main()
| [((138, 155), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (152, 155), True, 'import numpy as np\n'), ((162, 194), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(4, 4)'], {}), '(0, 10, (4, 4))\n', (179, 194), True, 'import numpy as np\n')] |
kegl/ramp-board | ramp-database/ramp_database/tools/leaderboard.py | 6373bf02efc096e02b26320e4f11edd00f9e5752 | from distutils.version import LooseVersion
from itertools import product
import numpy as np
import pandas as pd
from ..model.event import Event
from ..model.event import EventTeam
from ..model.submission import Submission
from ..model.team import Team
from .team import get_event_team_by_name
from .submission import get_bagged_scores
from .submission import get_scores
from .submission import get_submission_max_ram
from .submission import get_time
width = -1 if LooseVersion(pd.__version__) < LooseVersion("1.0.0") else None
pd.set_option('display.max_colwidth', width)
def _compute_leaderboard(session, submissions, leaderboard_type, event_name,
with_links=True):
"""Format the leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
with_links : bool
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : dataframe
The leaderboard in a dataframe format.
"""
record_score = []
event = session.query(Event).filter_by(name=event_name).one()
map_score_precision = {score_type.name: score_type.precision
for score_type in event.score_types}
for sub in submissions:
# take only max n bag
df_scores_bag = get_bagged_scores(session, sub.id)
highest_level = df_scores_bag.index.get_level_values('n_bag').max()
df_scores_bag = df_scores_bag.loc[(slice(None), highest_level), :]
df_scores_bag.index = df_scores_bag.index.droplevel('n_bag')
df_scores_bag = df_scores_bag.round(map_score_precision)
df_scores = get_scores(session, sub.id)
df_scores = df_scores.round(map_score_precision)
df_time = get_time(session, sub.id)
df_time = df_time.stack().to_frame()
df_time.index = df_time.index.set_names(['fold', 'step'])
df_time = df_time.rename(columns={0: 'time'})
df_time = df_time.sum(axis=0, level="step").T
df_scores_mean = df_scores.groupby('step').mean()
df_scores_std = df_scores.groupby('step').std()
# select only the validation and testing steps and rename them to
# public and private
map_renaming = {'valid': 'public', 'test': 'private'}
df_scores_mean = (df_scores_mean.loc[list(map_renaming.keys())]
.rename(index=map_renaming)
.stack().to_frame().T)
df_scores_std = (df_scores_std.loc[list(map_renaming.keys())]
.rename(index=map_renaming)
.stack().to_frame().T)
df_scores_bag = (df_scores_bag.rename(index=map_renaming)
.stack().to_frame().T)
df = pd.concat([df_scores_bag, df_scores_mean, df_scores_std], axis=1,
keys=['bag', 'mean', 'std'])
df.columns = df.columns.set_names(['stat', 'set', 'score'])
# change the multi-index into a stacked index
df.columns = df.columns.map(lambda x: " ".join(x))
# add the aggregated time information
df_time.index = df.index
df_time = df_time.rename(
columns={'train': 'train time [s]',
'valid': 'validation time [s]',
'test': 'test time [s]'}
)
df = pd.concat([df, df_time], axis=1)
if leaderboard_type == 'private':
df['submission ID'] = sub.basename.replace('submission_', '')
df['team'] = sub.team.name
df['submission'] = sub.name_with_link if with_links else sub.name
df['contributivity'] = int(round(100 * sub.contributivity))
df['historical contributivity'] = int(round(
100 * sub.historical_contributivity))
df['max RAM [MB]'] = get_submission_max_ram(session, sub.id)
df['submitted at (UTC)'] = pd.Timestamp(sub.submission_timestamp)
record_score.append(df)
# stack all the records
df = pd.concat(record_score, axis=0, ignore_index=True, sort=False)
# keep only second precision for the time stamp
df['submitted at (UTC)'] = df['submitted at (UTC)'].astype('datetime64[s]')
# reordered the column
stats_order = (['bag', 'mean', 'std'] if leaderboard_type == 'private'
else ['bag'])
dataset_order = (['public', 'private'] if leaderboard_type == 'private'
else ['public'])
score_order = ([event.official_score_name] +
[score_type.name for score_type in event.score_types
if score_type.name != event.official_score_name])
score_list = [
'{} {} {}'.format(stat, dataset, score)
for dataset, score, stat in product(dataset_order,
score_order,
stats_order)
]
# Only display train and validation time for the public leaderboard
time_list = (['train time [s]', 'validation time [s]', 'test time [s]']
if leaderboard_type == 'private'
else ['train time [s]', 'validation time [s]'])
col_ordered = (
['team', 'submission'] +
score_list +
['contributivity', 'historical contributivity'] +
time_list +
['max RAM [MB]', 'submitted at (UTC)']
)
if leaderboard_type == "private":
col_ordered = ["submission ID"] + col_ordered
df = df[col_ordered]
# check if the contributivity columns are null
contrib_columns = ['contributivity', 'historical contributivity']
if (df[contrib_columns] == 0).all(axis=0).all():
df = df.drop(columns=contrib_columns)
df = df.sort_values(
"bag {} {}".format(leaderboard_type, event.official_score_name),
ascending=event.get_official_score_type(session).is_lower_the_better
)
# rename the column name for the public leaderboard
if leaderboard_type == 'public':
df = df.rename(columns={
key: value for key, value in zip(score_list, score_order)
})
return df
def _compute_competition_leaderboard(session, submissions, leaderboard_type,
event_name):
"""Format the competition leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
Returns
-------
competition_leaderboard : dataframe
The competition leaderboard in a dataframe format.
"""
event = session.query(Event).filter_by(name=event_name).one()
score_type = event.get_official_score_type(session)
score_name = event.official_score_name
private_leaderboard = _compute_leaderboard(session, submissions, 'private',
event_name, with_links=False)
time_list = (['train time [s]', 'validation time [s]', 'test time [s]']
if leaderboard_type == 'private'
else ['train time [s]', 'validation time [s]'])
col_selected_private = (['team', 'submission'] +
['bag private ' + score_name,
'bag public ' + score_name] +
time_list +
['submitted at (UTC)'])
leaderboard_df = private_leaderboard[col_selected_private]
leaderboard_df = leaderboard_df.rename(
columns={'bag private ' + score_name: 'private ' + score_name,
'bag public ' + score_name: 'public ' + score_name}
)
# select best submission for each team
best_df = (leaderboard_df.groupby('team').min()
if score_type.is_lower_the_better
else leaderboard_df.groupby('team').max())
best_df = best_df[['public ' + score_name]].reset_index()
best_df['best'] = True
# merge to get a best indicator column then select best
leaderboard_df = pd.merge(
leaderboard_df, best_df, how='left',
left_on=['team', 'public ' + score_name],
right_on=['team', 'public ' + score_name]
)
leaderboard_df = leaderboard_df.fillna(False)
leaderboard_df = leaderboard_df[leaderboard_df['best']]
leaderboard_df = leaderboard_df.drop(columns='best')
# dealing with ties: we need the lowest timestamp
best_df = leaderboard_df.groupby('team').min()
best_df = best_df[['submitted at (UTC)']].reset_index()
best_df['best'] = True
leaderboard_df = pd.merge(
leaderboard_df, best_df, how='left',
left_on=['team', 'submitted at (UTC)'],
right_on=['team', 'submitted at (UTC)'])
leaderboard_df = leaderboard_df.fillna(False)
leaderboard_df = leaderboard_df[leaderboard_df['best']]
leaderboard_df = leaderboard_df.drop(columns='best')
# sort by public score then by submission timestamp, compute rank
leaderboard_df = leaderboard_df.sort_values(
by=['public ' + score_name, 'submitted at (UTC)'],
ascending=[score_type.is_lower_the_better, True])
leaderboard_df['public rank'] = np.arange(len(leaderboard_df)) + 1
# sort by private score then by submission timestamp, compute rank
leaderboard_df = leaderboard_df.sort_values(
by=['private ' + score_name, 'submitted at (UTC)'],
ascending=[score_type.is_lower_the_better, True])
leaderboard_df['private rank'] = np.arange(len(leaderboard_df)) + 1
leaderboard_df['move'] = \
leaderboard_df['public rank'] - leaderboard_df['private rank']
leaderboard_df['move'] = [
'{:+d}'.format(m) if m != 0 else '-' for m in leaderboard_df['move']]
col_selected = (
[leaderboard_type + ' rank', 'team', 'submission',
leaderboard_type + ' ' + score_name] +
time_list +
['submitted at (UTC)']
)
if leaderboard_type == 'private':
col_selected.insert(1, 'move')
df = leaderboard_df[col_selected]
df = df.rename(columns={
leaderboard_type + ' ' + score_name: score_name,
leaderboard_type + ' rank': 'rank'
})
df = df.sort_values(by='rank')
return df
def get_leaderboard(session, leaderboard_type, event_name, user_name=None,
with_links=True):
"""Get a leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
leaderboard_type : {'public', 'private', 'failed', 'new', \
'public competition', 'private competition'}
The type of leaderboard to generate.
event_name : str
The event name.
user_name : None or str, default is None
The user name. If None, scores from all users will be queried. This
parameter is discarded when requesting the competition leaderboard.
with_links : bool, default is True
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : str
The leaderboard in HTML format.
"""
q = (session.query(Submission)
.filter(Event.id == EventTeam.event_id)
.filter(Team.id == EventTeam.team_id)
.filter(EventTeam.id == Submission.event_team_id)
.filter(Event.name == event_name))
if user_name is not None:
q = q.filter(Team.name == user_name)
submissions = q.all()
submission_filter = {'public': 'is_public_leaderboard',
'private': 'is_private_leaderboard',
'failed': 'is_error',
'new': 'is_new',
'public competition': 'is_in_competition',
'private competition': 'is_in_competition'}
submissions = [sub for sub in submissions
if (getattr(sub, submission_filter[leaderboard_type]) and
sub.is_not_sandbox)]
if not submissions:
return None
if leaderboard_type in ['public', 'private']:
df = _compute_leaderboard(
session, submissions, leaderboard_type, event_name,
with_links=with_links
)
elif leaderboard_type in ['new', 'failed']:
if leaderboard_type == 'new':
columns = ['team', 'submission', 'submitted at (UTC)', 'state']
else:
columns = ['team', 'submission', 'submitted at (UTC)', 'error']
# we rely on the zip function ignore the submission state if the error
# column was not appended
data = [{
column: value for column, value in zip(
columns,
[sub.event_team.team.name,
sub.name_with_link,
pd.Timestamp(sub.submission_timestamp),
(sub.state_with_link if leaderboard_type == 'failed'
else sub.state)])
} for sub in submissions]
df = pd.DataFrame(data, columns=columns)
else:
# make some extra filtering
submissions = [sub for sub in submissions if sub.is_public_leaderboard]
if not submissions:
return None
competition_type = ('public' if 'public' in leaderboard_type
else 'private')
df = _compute_competition_leaderboard(
session, submissions, competition_type, event_name
)
df_html = df.to_html(escape=False, index=False, max_cols=None,
max_rows=None, justify='left')
df_html = '<thead> {} </tbody>'.format(
df_html.split('<thead>')[1].split('</tbody>')[0]
)
return df_html
def update_leaderboards(session, event_name, new_only=False):
"""Update the leaderboards for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event = session.query(Event).filter_by(name=event_name).one()
if not new_only:
event.private_leaderboard_html = get_leaderboard(
session, 'private', event_name
)
event.public_leaderboard_html_with_links = get_leaderboard(
session, 'public', event_name
)
event.public_leaderboard_html_no_links = get_leaderboard(
session, 'public', event_name, with_links=False
)
event.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name
)
event.public_competition_leaderboard_html = get_leaderboard(
session, 'public competition', event_name
)
event.private_competition_leaderboard_html = get_leaderboard(
session, 'private competition', event_name
)
event.new_leaderboard_html = get_leaderboard(
session, 'new', event_name
)
session.commit()
def update_user_leaderboards(session, event_name, user_name,
new_only=False):
"""Update the of a user leaderboards for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
user_name : str
The user name. If None, scores from all users will be queried.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event_team = get_event_team_by_name(session, event_name, user_name)
if not new_only:
event_team.leaderboard_html = get_leaderboard(
session, 'public', event_name, user_name
)
event_team.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name, user_name
)
event_team.new_leaderboard_html = get_leaderboard(
session, 'new', event_name, user_name
)
session.commit()
def update_all_user_leaderboards(session, event_name, new_only=False):
"""Update the leaderboards for all users for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event = session.query(Event).filter_by(name=event_name).one()
event_teams = session.query(EventTeam).filter_by(event=event).all()
for event_team in event_teams:
user_name = event_team.team.name
if not new_only:
event_team.leaderboard_html = get_leaderboard(
session, 'public', event_name, user_name
)
event_team.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name, user_name
)
event_team.new_leaderboard_html = get_leaderboard(
session, 'new', event_name, user_name
)
session.commit()
| [((532, 576), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', 'width'], {}), "('display.max_colwidth', width)\n", (545, 576), True, 'import pandas as pd\n'), ((4369, 4431), 'pandas.concat', 'pd.concat', (['record_score'], {'axis': '(0)', 'ignore_index': '(True)', 'sort': '(False)'}), '(record_score, axis=0, ignore_index=True, sort=False)\n', (4378, 4431), True, 'import pandas as pd\n'), ((8577, 8711), 'pandas.merge', 'pd.merge', (['leaderboard_df', 'best_df'], {'how': '"""left"""', 'left_on': "['team', 'public ' + score_name]", 'right_on': "['team', 'public ' + score_name]"}), "(leaderboard_df, best_df, how='left', left_on=['team', 'public ' +\n score_name], right_on=['team', 'public ' + score_name])\n", (8585, 8711), True, 'import pandas as pd\n'), ((9119, 9249), 'pandas.merge', 'pd.merge', (['leaderboard_df', 'best_df'], {'how': '"""left"""', 'left_on': "['team', 'submitted at (UTC)']", 'right_on': "['team', 'submitted at (UTC)']"}), "(leaderboard_df, best_df, how='left', left_on=['team',\n 'submitted at (UTC)'], right_on=['team', 'submitted at (UTC)'])\n", (9127, 9249), True, 'import pandas as pd\n'), ((469, 497), 'distutils.version.LooseVersion', 'LooseVersion', (['pd.__version__'], {}), '(pd.__version__)\n', (481, 497), False, 'from distutils.version import LooseVersion\n'), ((500, 521), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.0.0"""'], {}), "('1.0.0')\n", (512, 521), False, 'from distutils.version import LooseVersion\n'), ((3141, 3240), 'pandas.concat', 'pd.concat', (['[df_scores_bag, df_scores_mean, df_scores_std]'], {'axis': '(1)', 'keys': "['bag', 'mean', 'std']"}), "([df_scores_bag, df_scores_mean, df_scores_std], axis=1, keys=[\n 'bag', 'mean', 'std'])\n", (3150, 3240), True, 'import pandas as pd\n'), ((3726, 3758), 'pandas.concat', 'pd.concat', (['[df, df_time]'], {'axis': '(1)'}), '([df, df_time], axis=1)\n', (3735, 3758), True, 'import pandas as pd\n'), ((4260, 4298), 'pandas.Timestamp', 'pd.Timestamp', (['sub.submission_timestamp'], {}), '(sub.submission_timestamp)\n', (4272, 4298), True, 'import pandas as pd\n'), ((5109, 5157), 'itertools.product', 'product', (['dataset_order', 'score_order', 'stats_order'], {}), '(dataset_order, score_order, stats_order)\n', (5116, 5157), False, 'from itertools import product\n'), ((13513, 13548), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (13525, 13548), True, 'import pandas as pd\n'), ((13316, 13354), 'pandas.Timestamp', 'pd.Timestamp', (['sub.submission_timestamp'], {}), '(sub.submission_timestamp)\n', (13328, 13354), True, 'import pandas as pd\n')] |
SavantLogics/Visual_Studio_Python_Scripts-master | projects/boring_stuff/03_functions/ZigZag.py | 9e3c5f8a8f685f9ae51045af9260ccc28f89d72f | #Automate the Boring Stuff with Python
import time, sys
indent = 0 # How many spaces to indent
indent_Increasing = True # Whether the indentation is increasing or not
try:
while True: # The main program loop
print(' ' * indent, end='')
print('********')
time.sleep(0.1) # Pause for 1/10th of a second
if indent_Increasing:
indent = indent + 1
if indent == 20:
indent_Increasing = False
else:
indent = indent - 1
if indent == 0:
indent_Increasing = True
except KeyboardInterrupt():
sys.exit() | [((284, 299), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (294, 299), False, 'import time, sys\n'), ((613, 623), 'sys.exit', 'sys.exit', ([], {}), '()\n', (621, 623), False, 'import time, sys\n')] |
whitews/ReFlowRESTClient | examples/add_compensation_to_sample.py | 69369bbea501382291b71facea7a511ab8f7848b | import getpass
import sys
import json
from reflowrestclient.utils import *
host = raw_input('Host: ')
username = raw_input('Username: ')
password = getpass.getpass('Password: ')
token = get_token(host, username, password)
if token:
print "Authentication successful"
print '=' * 40
else:
print "No token for you!!!"
sys.exit()
def start():
# Projects
project_list = get_projects(host, token)
for i, result in enumerate(project_list['data']):
print i, ':', result['project_name']
project_choice = raw_input('Choose Project:')
project = project_list['data'][int(project_choice)]
# Subjects
subject_list = get_subjects(host, token, project_pk=project['id'])
for i, result in enumerate(subject_list['data']):
print i, ':', result['subject_id']
subject_choice = raw_input('Choose Subject (leave blank for all subjects): ')
subject = None
if subject_choice:
subject = subject_list['data'][int(subject_choice)]
# Sites
site_list = get_sites(host, token, project_pk=project['id'])
if not site_list:
sys.exit('There are no sites')
for i, result in enumerate(site_list['data']):
print i, ':', result['site_name']
site_choice = raw_input('Choose Site (required): ')
site = site_list['data'][int(site_choice)]
# Samples
sample_args = [host, token]
sample_kwargs = {'site_pk': site['id']}
if subject:
sample_kwargs['subject_pk'] = subject['id']
sample_list = get_samples(*sample_args, **sample_kwargs)
if not sample_list:
sys.exit('There are no samples')
for i, result in enumerate(sample_list['data']):
print i, ':', result['original_filename']
sample_choice = raw_input('Choose Sample (leave blank for all samples): ')
sample = None
if sample_choice:
sample = sample_list['data'][int(sample_choice)]
# Compensation
compensation_list = get_compensations(host, token, site_pk=site['id'], project_pk=project['id'])
if not compensation_list:
sys.exit('There are no compensations')
for i, result in enumerate(compensation_list['data']):
print i, ':', result['original_filename']
compensation_choice = raw_input('Choose Compensation (required): ')
compensation = compensation_list['data'][int(compensation_choice)]
# Now have user verify information
print '=' * 40
print 'You chose to add this compensation to these samples:'
print '\Compensation: %s' % compensation['original_filename']
print 'Samples:'
if sample:
print '\t%s' % sample['original_filename']
else:
for s in sample_list['data']:
print '\t%s' % s['original_filename']
print '=' * 40
apply_choice = None
while apply_choice not in ['continue', 'exit']:
apply_choice = raw_input("Type 'continue' to upload, 'exit' abort: ")
if apply_choice == 'exit':
sys.exit()
print 'continue'
if sample:
response_dict = add_compensation_to_sample(
host,
token,
sample_pk=str(sample['id']),
compensation_pk=str(compensation['id'])
)
print "Response: ", response_dict['status'], response_dict['reason']
print 'Data: '
print json.dumps(response_dict['data'], indent=4)
else:
for sample in sample_list['data']:
response_dict = add_compensation_to_sample(
host,
token,
sample_pk=str(sample['id']),
compensation_pk=str(compensation['id']),
)
print "Response: ", response_dict['status'], response_dict['reason']
print 'Data: '
print json.dumps(response_dict['data'], indent=4)
while True:
start() | [] |
imcallister/accountifie | accountifie/toolkit/urls.py | 094834c9d632e0353e3baf8d924eeb10cba0add4 | from django.conf import settings
from django.conf.urls import url, static
from . import views
from . import jobs
urlpatterns = [
url(r'^choose_company/(?P<company_id>.*)/$', views.choose_company, name='choose_company'),
url(r'^cleanlogs/$', jobs.cleanlogs, name='cleanlogs'),
url(r'^primecache/$', jobs.primecache, name='primecache'),
url(r'^dump_fixtures/$', views.dump_fixtures),
]
| [((136, 229), 'django.conf.urls.url', 'url', (['"""^choose_company/(?P<company_id>.*)/$"""', 'views.choose_company'], {'name': '"""choose_company"""'}), "('^choose_company/(?P<company_id>.*)/$', views.choose_company, name=\n 'choose_company')\n", (139, 229), False, 'from django.conf.urls import url, static\n'), ((235, 288), 'django.conf.urls.url', 'url', (['"""^cleanlogs/$"""', 'jobs.cleanlogs'], {'name': '"""cleanlogs"""'}), "('^cleanlogs/$', jobs.cleanlogs, name='cleanlogs')\n", (238, 288), False, 'from django.conf.urls import url, static\n'), ((295, 351), 'django.conf.urls.url', 'url', (['"""^primecache/$"""', 'jobs.primecache'], {'name': '"""primecache"""'}), "('^primecache/$', jobs.primecache, name='primecache')\n", (298, 351), False, 'from django.conf.urls import url, static\n'), ((358, 402), 'django.conf.urls.url', 'url', (['"""^dump_fixtures/$"""', 'views.dump_fixtures'], {}), "('^dump_fixtures/$', views.dump_fixtures)\n", (361, 402), False, 'from django.conf.urls import url, static\n')] |
sequentialchaos/i3-workspace-swap | setup.py | 86646066b9f971c1ff130a642a914ab2db8f9ae6 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="i3-workspace-swap",
description='A python utility swap the content of two workplaces in i3wm',
long_description=long_description,
long_description_content_type="text/markdown",
version="1.1.0",
url='https://github.com/einzigartigername/i3-workspace-swap',
license='MIT',
author='Nelson Gillo',
author_email='[email protected]',
packages=setuptools.find_packages(),
scripts=['i3-workspace-swap'],
install_requires=['i3ipc'],
classifiers=[
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
'Programming Language :: Python :: 3'
],
python_requires='>=3.6',
)
| [((494, 520), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (518, 520), False, 'import setuptools\n')] |
sdnit-se/intersight-python | intersight/models/niaapi_version_regex_all_of.py | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from intersight.configuration import Configuration
class NiaapiVersionRegexAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'apic': 'NiaapiVersionRegexPlatform',
'dcnm': 'NiaapiVersionRegexPlatform',
'version': 'str'
}
attribute_map = {'apic': 'Apic', 'dcnm': 'Dcnm', 'version': 'Version'}
def __init__(self,
apic=None,
dcnm=None,
version=None,
local_vars_configuration=None): # noqa: E501
"""NiaapiVersionRegexAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._apic = None
self._dcnm = None
self._version = None
self.discriminator = None
if apic is not None:
self.apic = apic
if dcnm is not None:
self.dcnm = dcnm
if version is not None:
self.version = version
@property
def apic(self):
"""Gets the apic of this NiaapiVersionRegexAllOf. # noqa: E501
:return: The apic of this NiaapiVersionRegexAllOf. # noqa: E501
:rtype: NiaapiVersionRegexPlatform
"""
return self._apic
@apic.setter
def apic(self, apic):
"""Sets the apic of this NiaapiVersionRegexAllOf.
:param apic: The apic of this NiaapiVersionRegexAllOf. # noqa: E501
:type: NiaapiVersionRegexPlatform
"""
self._apic = apic
@property
def dcnm(self):
"""Gets the dcnm of this NiaapiVersionRegexAllOf. # noqa: E501
:return: The dcnm of this NiaapiVersionRegexAllOf. # noqa: E501
:rtype: NiaapiVersionRegexPlatform
"""
return self._dcnm
@dcnm.setter
def dcnm(self, dcnm):
"""Sets the dcnm of this NiaapiVersionRegexAllOf.
:param dcnm: The dcnm of this NiaapiVersionRegexAllOf. # noqa: E501
:type: NiaapiVersionRegexPlatform
"""
self._dcnm = dcnm
@property
def version(self):
"""Gets the version of this NiaapiVersionRegexAllOf. # noqa: E501
Version number for the Version Regex data, also used as identity. # noqa: E501
:return: The version of this NiaapiVersionRegexAllOf. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this NiaapiVersionRegexAllOf.
Version number for the Version Regex data, also used as identity. # noqa: E501
:param version: The version of this NiaapiVersionRegexAllOf. # noqa: E501
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict()
if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NiaapiVersionRegexAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NiaapiVersionRegexAllOf):
return True
return self.to_dict() != other.to_dict()
| [((4607, 4640), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (4620, 4640), False, 'import six\n'), ((2358, 2373), 'intersight.configuration.Configuration', 'Configuration', ([], {}), '()\n', (2371, 2373), False, 'from intersight.configuration import Configuration\n')] |
genemerewether/fprime | ROS/fprime_ws/src/genfprime/src/genfprime/generate_modmk.py | fcdd071b5ddffe54ade098ca5d451903daba9eed | #
# Copyright 2004-2016, by the California Institute of Technology.
# ALL RIGHTS RESERVED. United States Government Sponsorship
# acknowledged. Any commercial use must be negotiated with the Office
# of Technology Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws and
# regulations. By accepting this document, the user agrees to comply
# with all U.S. export laws and regulations. User has the
# responsibility to obtain export licenses, or other export authority
# as may be required before exporting such information to foreign
# countries or providing access to foreign persons.
#
from __future__ import print_function
import os
from genmsg import MsgGenerationException
#from . name import *
## :param type_name outdir: Full path to output directory
## :returns int: status. 0 if successful
def write_modmk(outdir): #, msg_types, srv_types):
if not os.path.isdir(outdir):
#TODO: warn?
return 0
xml_in_dir = set([f for f in os.listdir(outdir)
if f.endswith('.xml')])
_write_modmk(outdir, sorted(xml_in_dir))
# TODO(mereweth) if we want to independently specify the generated XML files
# generated_xml = [_msg_serializable_xml_name(f) for f in sorted(msg_types)]
# generated_xml.extend([_port_xml_name(f) for f in sorted(msg_types)]
# write_msg_modmk(outdir, generated_xml)
# generated_xml = [_srv_serializable_xml_name(f) for f in sorted(srv_types)]
# generated_xml.extend([_port_xml_name(f) for f in sorted(srv_types)]
# write_msg_modmk(outdir, generated_xml)
return 0
def _write_modmk(outdir, generated_xml):
if not os.path.exists(outdir):
os.makedirs(outdir)
elif not os.path.isdir(outdir):
raise MsgGenerationException("file preventing the creating of Fprime directory: %s"%dir)
p = os.path.join(outdir, 'mod.mk')
with open(p, 'w') as f:
f.write('SRC = \\\n')
if len(generated_xml) != 0:
for xml in generated_xml[:-1]:
f.write('%s \\\n'%xml)
f.write('%s\n'%generated_xml[-1])
return 0
| [((1892, 1922), 'os.path.join', 'os.path.join', (['outdir', '"""mod.mk"""'], {}), "(outdir, 'mod.mk')\n", (1904, 1922), False, 'import os\n'), ((948, 969), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (961, 969), False, 'import os\n'), ((1699, 1721), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (1713, 1721), False, 'import os\n'), ((1731, 1750), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (1742, 1750), False, 'import os\n'), ((1764, 1785), 'os.path.isdir', 'os.path.isdir', (['outdir'], {}), '(outdir)\n', (1777, 1785), False, 'import os\n'), ((1801, 1890), 'genmsg.MsgGenerationException', 'MsgGenerationException', (["('file preventing the creating of Fprime directory: %s' % dir)"], {}), "(\n 'file preventing the creating of Fprime directory: %s' % dir)\n", (1823, 1890), False, 'from genmsg import MsgGenerationException\n'), ((1043, 1061), 'os.listdir', 'os.listdir', (['outdir'], {}), '(outdir)\n', (1053, 1061), False, 'import os\n')] |
fool65c/jupytext | tests/test_compare.py | 4b55d2e6ccc995c04679de0863234c60c3741a69 | import pytest
from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell
from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion
def test_raise_on_different_metadata():
ref = new_notebook(metadata={'kernelspec': {'language': 'python', 'name': 'python', 'display_name': 'Python'}},
cells=[new_markdown_cell('Cell one')])
test = new_notebook(metadata={'kernelspec': {'language': 'R', 'name': 'R', 'display_name': 'R'}},
cells=[new_markdown_cell('Cell one')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_type(raise_on_first_difference):
ref = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one'), new_raw_cell('Cell two')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', raise_on_first_difference=raise_on_first_difference)
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_content(raise_on_first_difference):
ref = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Modified cell two')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', raise_on_first_difference=raise_on_first_difference)
def test_raise_on_incomplete_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
def test_does_raise_on_split_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one'),
new_markdown_cell('second line')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
def test_raise_on_different_cell_metadata():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', metadata={'metakey': 'value'})])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_count(raise_on_first_difference):
ref = new_notebook(cells=[new_code_cell('1')])
test = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light', raise_on_first_difference=raise_on_first_difference)
with pytest.raises(NotebookDifference):
compare_notebooks(test, ref, 'py:light', raise_on_first_difference=raise_on_first_difference)
def test_does_not_raise_on_blank_line_removed():
ref = new_notebook(cells=[new_code_cell('1+1\n ')])
test = new_notebook(cells=[new_code_cell('1+1')])
compare_notebooks(ref, test, 'py:light')
def test_strict_raise_on_blank_line_removed():
ref = new_notebook(cells=[new_code_cell('1+1\n')])
test = new_notebook(cells=[new_code_cell('1+1')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light', allow_expected_differences=False)
def test_dont_raise_on_different_outputs():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])])
compare_notebooks(ref, test, 'md')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_outputs(raise_on_first_difference):
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', compare_outputs=True, raise_on_first_difference=raise_on_first_difference)
def test_test_round_trip_conversion():
notebook = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])], metadata={'main_language': 'python'})
round_trip_conversion(notebook, {'extension': '.py'}, update=True)
def test_mutiple_cells_differ():
nb1 = new_notebook(cells=[new_code_cell(''),
new_code_cell('2')])
nb2 = new_notebook(cells=[new_code_cell('1+1'),
new_code_cell('2\n2')])
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False)
assert 'Cells 1,2 differ' in exception_info.value.args[0]
def test_cell_metadata_differ():
nb1 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2', metadata={'additional': 'metadata1'})])
nb2 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2', metadata={'additional': 'metadata2'})])
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False)
assert "Cell metadata 'additional' differ" in exception_info.value.args[0]
def test_notebook_metadata_differ():
nb1 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')])
nb2 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')],
metadata={'kernelspec': {'language': 'python', 'name': 'python', 'display_name': 'Python'}})
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False, )
assert "Notebook metadata differ" in exception_info.value.args[0]
| [((701, 768), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raise_on_first_difference"""', '[True, False]'], {}), "('raise_on_first_difference', [True, False])\n", (724, 768), False, 'import pytest\n'), ((1156, 1223), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raise_on_first_difference"""', '[True, False]'], {}), "('raise_on_first_difference', [True, False])\n", (1179, 1223), False, 'import pytest\n'), ((2521, 2588), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raise_on_first_difference"""', '[True, False]'], {}), "('raise_on_first_difference', [True, False])\n", (2544, 2588), False, 'import pytest\n'), ((4046, 4113), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raise_on_first_difference"""', '[True, False]'], {}), "('raise_on_first_difference', [True, False])\n", (4069, 4113), False, 'import pytest\n'), ((3271, 3311), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""py:light"""'], {}), "(ref, test, 'py:light')\n", (3288, 3311), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((4008, 4042), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {}), "(ref, test, 'md')\n", (4025, 4042), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((5097, 5163), 'jupytext.compare.test_round_trip_conversion', 'round_trip_conversion', (['notebook', "{'extension': '.py'}"], {'update': '(True)'}), "(notebook, {'extension': '.py'}, update=True)\n", (5118, 5163), True, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((620, 653), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (633, 653), False, 'import pytest\n'), ((663, 697), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {}), "(ref, test, 'md')\n", (680, 697), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((1022, 1055), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (1035, 1055), False, 'import pytest\n'), ((1065, 1157), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {'raise_on_first_difference': 'raise_on_first_difference'}), "(ref, test, 'md', raise_on_first_difference=\n raise_on_first_difference)\n", (1082, 1157), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((1490, 1523), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (1503, 1523), False, 'import pytest\n'), ((1533, 1625), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {'raise_on_first_difference': 'raise_on_first_difference'}), "(ref, test, 'md', raise_on_first_difference=\n raise_on_first_difference)\n", (1550, 1625), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((1820, 1853), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (1833, 1853), False, 'import pytest\n'), ((1863, 1897), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {}), "(ref, test, 'md')\n", (1880, 1897), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((2162, 2195), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (2175, 2195), False, 'import pytest\n'), ((2205, 2239), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {}), "(ref, test, 'md')\n", (2222, 2239), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((2434, 2467), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (2447, 2467), False, 'import pytest\n'), ((2477, 2517), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""py:light"""'], {}), "(ref, test, 'py:light')\n", (2494, 2517), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((2819, 2852), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (2832, 2852), False, 'import pytest\n'), ((2862, 2960), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""py:light"""'], {'raise_on_first_difference': 'raise_on_first_difference'}), "(ref, test, 'py:light', raise_on_first_difference=\n raise_on_first_difference)\n", (2879, 2960), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((2966, 2999), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (2979, 2999), False, 'import pytest\n'), ((3009, 3107), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['test', 'ref', '"""py:light"""'], {'raise_on_first_difference': 'raise_on_first_difference'}), "(test, ref, 'py:light', raise_on_first_difference=\n raise_on_first_difference)\n", (3026, 3107), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((3479, 3512), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (3492, 3512), False, 'import pytest\n'), ((3522, 3596), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""py:light"""'], {'allow_expected_differences': '(False)'}), "(ref, test, 'py:light', allow_expected_differences=False)\n", (3539, 3596), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((4548, 4581), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (4561, 4581), False, 'import pytest\n'), ((4591, 4704), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {'compare_outputs': '(True)', 'raise_on_first_difference': 'raise_on_first_difference'}), "(ref, test, 'md', compare_outputs=True,\n raise_on_first_difference=raise_on_first_difference)\n", (4608, 4704), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((5414, 5447), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (5427, 5447), False, 'import pytest\n'), ((5475, 5535), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['nb1', 'nb2'], {'raise_on_first_difference': '(False)'}), '(nb1, nb2, raise_on_first_difference=False)\n', (5492, 5535), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((5920, 5953), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (5933, 5953), False, 'import pytest\n'), ((5981, 6041), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['nb1', 'nb2'], {'raise_on_first_difference': '(False)'}), '(nb1, nb2, raise_on_first_difference=False)\n', (5998, 6041), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((6487, 6520), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (6500, 6520), False, 'import pytest\n'), ((6548, 6608), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['nb1', 'nb2'], {'raise_on_first_difference': '(False)'}), '(nb1, nb2, raise_on_first_difference=False)\n', (6565, 6608), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((414, 443), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (431, 443), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((579, 608), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (596, 608), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((865, 894), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (882, 894), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((896, 921), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""Cell two"""'], {}), "('Cell two')\n", (909, 921), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((955, 984), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (972, 984), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((986, 1010), 'nbformat.v4.nbbase.new_raw_cell', 'new_raw_cell', (['"""Cell two"""'], {}), "('Cell two')\n", (998, 1010), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1323, 1352), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (1340, 1352), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1354, 1379), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""Cell two"""'], {}), "('Cell two')\n", (1367, 1379), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1413, 1442), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (1430, 1442), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1444, 1478), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""Modified cell two"""'], {}), "('Modified cell two')\n", (1457, 1478), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1699, 1746), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one\n\n\nsecond line"""'], {}), '("""Cell one\n\n\nsecond line""")\n', (1716, 1746), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1779, 1808), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (1796, 1808), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1976, 2023), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one\n\n\nsecond line"""'], {}), '("""Cell one\n\n\nsecond line""")\n', (1993, 2023), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2056, 2085), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (2073, 2085), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2118, 2150), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""second line"""'], {}), "('second line')\n", (2135, 2150), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2317, 2337), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (2330, 2337), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2371, 2422), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {'metadata': "{'metakey': 'value'}"}), "('1+1', metadata={'metakey': 'value'})\n", (2384, 2422), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2686, 2704), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (2699, 2704), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2738, 2756), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (2751, 2756), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2789, 2807), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {}), "('2')\n", (2802, 2807), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3184, 3210), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1\n """'], {}), "('1+1\\n ')\n", (3197, 3210), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3244, 3264), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (3257, 3264), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3391, 3413), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1\n"""'], {}), "('1+1\\n')\n", (3404, 3413), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3447, 3467), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (3460, 3467), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3673, 3693), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (3686, 3693), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3727, 3865), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {'outputs': "[{'data': {'text/plain': ['2']}, 'execution_count': 1, 'metadata': {},\n 'output_type': 'execute_result'}]"}), "('1+1', outputs=[{'data': {'text/plain': ['2']},\n 'execution_count': 1, 'metadata': {}, 'output_type': 'execute_result'}])\n", (3740, 3865), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((4208, 4228), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (4221, 4228), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((4262, 4400), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {'outputs': "[{'data': {'text/plain': ['2']}, 'execution_count': 1, 'metadata': {},\n 'output_type': 'execute_result'}]"}), "('1+1', outputs=[{'data': {'text/plain': ['2']},\n 'execution_count': 1, 'metadata': {}, 'output_type': 'execute_result'}])\n", (4275, 4400), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((4777, 4915), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {'outputs': "[{'data': {'text/plain': ['2']}, 'execution_count': 1, 'metadata': {},\n 'output_type': 'execute_result'}]"}), "('1+1', outputs=[{'data': {'text/plain': ['2']},\n 'execution_count': 1, 'metadata': {}, 'output_type': 'execute_result'}])\n", (4790, 4915), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5229, 5246), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['""""""'], {}), "('')\n", (5242, 5246), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5278, 5296), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {}), "('2')\n", (5291, 5296), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5329, 5349), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (5342, 5349), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5381, 5402), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2\n2"""'], {}), "('2\\n2')\n", (5394, 5402), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5663, 5681), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (5676, 5681), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5713, 5769), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {'metadata': "{'additional': 'metadata1'}"}), "('2', metadata={'additional': 'metadata1'})\n", (5726, 5769), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5802, 5820), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (5815, 5820), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5852, 5908), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {'metadata': "{'additional': 'metadata2'}"}), "('2', metadata={'additional': 'metadata2'})\n", (5865, 5908), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((6190, 6208), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (6203, 6208), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((6240, 6258), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {}), "('2')\n", (6253, 6258), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((6291, 6309), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (6304, 6309), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((6341, 6359), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {}), "('2')\n", (6354, 6359), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n')] |
MrAngelDo6pa/MedBotS | Cogs/Actions.py | 89e19d831507e20d0898114502967b2ad8ecf957 | import asyncio
import discord
import random
import datetime
from discord.ext import commands
from Cogs import DisplayName
from Cogs import Nullify
def setup(bot):
# Add the bot
bot.add_cog(Actions(bot))
class Actions(commands.Cog):
## class that handles storing and computing action messages
class actionable:
## these should be filled in the override class. any {} are replaced with target member's name
nothingList = [] # when you call without any arguments
botList = [] # when the action is done at the bot
selfList = [] # when the action is done at the user who called it
memberList = [] # when the action is done toward another member
itemList = [] # when the action is done on a string of text that is not a member
def computeAction(self, bot, ctx, target):
'''return a message based on the context and argument of the command'''
mesg = ""
if not target: # no arguments
mesg = random.choice(self.nothingList)
else:
targetMember = DisplayName.memberForName(target, ctx.message.guild)
if targetMember:
if self.botList and targetMember.id == bot.user.id: # actioning the bot
mesg = random.choice(self.botList) # if botList is empty we fail over to the member list
elif self.selfList and targetMember.id == ctx.message.author.id: # actioning themselves
mesg = random.choice(self.selfList)
else: # actioning another user
mesg = random.choice(self.memberList).replace("{}",DisplayName.name(targetMember))
else: # actioning an item
mesg = random.choice(self.itemList)
if '{}' in mesg:
mesg = mesg.format(target)
mesgFull = '*{}*, {}'.format(DisplayName.name(ctx.message.author), mesg)
mesgFull = Nullify.clean(mesgFull)
return mesgFull
## static definitions of all the action messages
class eating(actionable):
nothingList = [ 'you sit quietly and eat *nothing*...',
'you\'re *sure* there was something to eat, so you just chew on nothingness...',
'there comes a time when you need to realize that you\'re just chewing nothing for the sake of chewing. That time is now.']
botList = [ 'you try to eat *me* - but unfortunately, I saw it coming - your jaw hangs open as I deftly sidestep.',
'your mouth hangs open for a brief second before you realize that *I\'m* eating *you*.',
'I\'m a bot. You can\'t eat me.',
'your jaw clamps down on... wait... on nothing, because I\'m *digital!*.',
'what kind of bot would I be if I let you eat me?']
selfList = ['you clamp down on your own forearm - not surprisingly, it hurts.',
'you place a finger into your mouth, but *just can\'t* force yourself to bite down.',
'you happily munch away, but can now only wave with your left hand.',
'wait - you\'re not a sandwich!',
'you might not be the smartest...']
memberList = [ 'you unhinge your jaw and consume *{}* in one bite.',
'you try to eat *{}*, but you just can\'t quite do it - you spit them out, the taste of failure hanging in your mouth...',
'you take a quick bite out of *{}*. They probably didn\'t even notice.',
'you sink your teeth into *{}\'s* shoulder - they turn to face you, eyes wide as you try your best to scurry away and hide.',
'your jaw clamps down on *{}* - a satisfying *crunch* emanates as you finish your newest meal.']
itemList = [ 'you take a big chunk out of *{}*. *Delicious.*',
'your teeth sink into *{}* - it tastes satisfying.',
'you rip hungrily into *{}*, tearing it to bits!',
'you just can\'t bring yourself to eat *{}* - so you just hold it for awhile...',
'you attempt to bite into *{}*, but you\'re clumsier than you remember - and fail...']
class drinking(actionable):
nothingList = [ 'you stare at your glass full of *nothing*...',
'that cup must\'ve had something in it, so you drink *nothing*...',
'you should probably just go get a drink.',
'that desk looks pretty empty',
'are you sure you know what drinking is?',
'you desperatly search for something to drink']
botList = [ 'you try to drink *me*, but I dodge your straw.',
'You search for me, only to realise that *I* am already drinking you!',
'I\'m a bot. You can\'t drink me.',
'you stick a straw in... wait... in nothing, because I\'m *digital!*.',
'what do you think I am to let you drink me?',
'I don\'t think you would like the taste of me.',
'you can\'t drink me, I\'m a machine!']
selfList = ['you stab yourself with a straw - not surprisingly, it hurts.',
'you fit yourself in to a cup, but you just can\'t do it.',
'you happily drink away, but you are now very floppy.',
'wait - you\'re not a drink!',
'you might not be the smartest...',
'you might have some issues.',
'you try to drink yourself.',
'why would you drink yourself?']
memberList = [ 'you grab your lucky straw and empty *{}* in one sip.',
'you try to drink *{}*, but you just can\'t quite do it - you spit them out, the taste of failure hanging in your mouth...',
'you drink a small sip of *{}*. They probably didn\'t even notice.',
'you stab your straw into *{}\'s* shoulder - You run away as they run after you.',
'you happily drink away - *{}* starts to look like an empty Capri Sun package.',
'you are thirsty - *{}* sacrifices themself involuntarily.',
'somehow you end up emptying *{}*.']
itemList = ['you take a big sip of *{}*. *Delicious.*',
'your straw sinks into *{}* - it tastes satisfying.',
'you thirstly guzzle *{}*, it\'s lovely!',
'you just can\'t bring yourself to drink *{}* - so you just hold it for awhile...',
'you attempt to drain *{}*, but you\'re clumsier than you remember - and fail...',
'you drink *{}*.',
'*{}* dries up from your drinking.',
'*{}* starts resembling the Aral Sea.']
class booping(actionable):
nothingList = [ 'you stretch out your hand in the air, but there\'s nothing there...',
'you try and find someone to boop, but there\'s no one there.',
'you look around the channel for someone to boop.',
'you eye all the heads in the room, just waiting to be booped.',
'are you sure you have someone to boop?',
'I get it. You want to boop *someone*.']
selfList = ['you boop yourself on the nose with your finger.',
'you try to boop your head, but your hand gets lost along the way.',
'you happily boop yourself, but you are now very giddy.',
'wait - are you sure you want to boop yourself?',
'you might not be the smartest...',
'you might have some issues.',
'you try to boop yourself.',
'why would you boop yourself?']
memberList = [ 'you outstretch your lucky finger and boop *{}* in one go.',
'you try to boop *{}*, but you just can\'t quite do it - you miss their head, the taste of failure hanging stuck to your hand...',
'you sneak a boop onto *{}*. They probably didn\'t even notice.',
'you poke your hand onto *{}\'s* hand - You run away as they run after you.',
'you happily drum your fingers away - *{}* starts to look annoyed.',
'you\'re feeling boopy - *{}* sacrifices themself involuntarily.',
'somehow you end up booping *{}*.',
'you climb *{}*\'s head and use it as a bouncy castle... they feel amused.']
itemList = ['you put your hand onto *{}*\'s head. *Bliss.*',
'your hand touches *{}*\'s snoot - it feels satisfying.',
'you happily boop *{}*, it\'s lovely!',
'you just can\'t bring yourself to boop *{}* - so you just let your hand linger...',
'you attempt to boop *{}*, but you\'re clumsier than you remember - and fail...',
'you boop *{}*.',
'*{}* feels annoyed from your booping.',
'*{}* starts resembling a happy pupper.']
class spooky(actionable):
nothingList = [ 'you spook no one but yourself',
'you spook nothing, sp00py...',
'sadly, no one got spooked',
'it is sp00... you can\t spook air']
botList = [ 'you scared the living pumpkin out of me!',
'you spooked me so hard, I got the Heebie-jeebies...', # https://www.myenglishteacher.eu/blog/idioms-for-being-afraid/
'you sp00p me? But I\'m a bot... I can\'t be spooked!',
'sorry, but I cannot let you spook me; My digital emotions will get all messed up!'
'aaaaaaaaaah! Don\t you scare me like that again!']
selfList = ['go watch a scary movie to be absolutely sp00ped!',
'boo! Did you scare you?',
'you look yourself in the mirror and get a little scared...',
'get spooked by... yourself?',
'sp00py, but why spook yourself?']
memberList = [ 'you sp00p *{}* so hard that they start screaming!',
'you tried to sneak up on *{}*, but they heard you sneakin\' and fail...',
'it is sp00py time! Hey *{}*, boo!',
'congrats, *{}* dun sp00ked.',
'get spook3d *{}*!']
itemList = ['you spook *{}* with no reaction, leaving you looking weird...',
'*{}* got sp00p3d so hard, it ran away!',
'you trick or treat *{}* without any reaction...',
'you do your best to sp00p *{}*, but fail...',
'sp00py time! *{}* gets sp00ped harder than you thought and starts crying!']
class highfives(actionable):
nothingList = [ 'you stand alone for an eternity, hand raised up - desperate for any sort of recognition...',
'with a wild swing you throw your hand forward - the momentum carries you to the ground and you just lay there - high fiveless...',
'the only sound you hear as a soft *whoosh* as your hand connects with nothing...']
botList = [ 'the sky erupts with 1\'s and 0\'s as our hands meet in an epic high five of glory!',
'you beam up to the cloud and receive a quick high five from me before downloading back to Earth.',
'I unleash a fork-bomb of high five processes!',
'01001000011010010110011101101000001000000100011001101001011101100110010100100001']
selfList = ['ahh - high fiving yourself, classy...',
'that\'s uh... that\'s just clapping...',
'you run in a large circle - *totally* high fiving all your friends...',
'now you\'re at both ends of a high five!']
memberList = [ 'you and *{}* jump up for an epic high five - freeze-framing as the credits roll and some wicked 80s synth plays out.',
'you and *{}* elevate to a higher plane of existence in wake of that tremendous high five!',
'a 2 hour, 3 episode anime-esque fight scene unfolds as you and *{}* engage in a world-ending high five!',
'it *was* tomorrow - before you and *{}* high fived with enough force to spin the Earth in reverse!',
'like two righteous torpedoes - you and *{}* connect palms, subsequently deafening everyone in a 300-mile radius!']
itemList = ['neat... you just high fived *{}*.',
'your hand flops through the air - hitting *{}* with a soft thud.',
'you reach out a hand, gently pressing your palm to *{}*. A soft *"high five"* escapes your lips as a tear runs down your cheek...',
'like an open-handed piston of ferocity - you drive your palm into *{}*.']
class petting(actionable): # meow
nothingList = [ 'you absentmindedly wave your hand in the air.',
'you could have sworn there was a cat there!',
'you remember that there are no cats here.',
'you try to pet the cat, but miss because the cat is gone.']
botList = [ 'I may be electronic but I still appreciate pets.',
'*purrrrrrrrrrrrrrr*.',
'you electrocute yourself trying to pet a computer.']
selfList = ['you give yourself a nice pat on the head.',
'too bad there\'s no one else to pet you.',
'in lieu of anything else to pet, you pet yourself.',
'your hair is warm and soft.']
memberList = [ 'you give *{}* a pat on the head.',
'you rub your hand through *{}\'s* hair.',
'*{}* smiles from your petting.',
'you try to pet *{}*, but miss because they hid under the bed.',
'*{}* purrs from your petting.',
'you pet *{}* but they bite your hand',
'you try to pet *{}* but they hiss and run away.']
itemList = ['you rub *{}* but it doesn\'t feel like a cat.',
'you don\'t hear any purring from *{}*.',
'you hurt your hand trying to pet *{}*.']
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot):
self.bot = bot
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
@commands.command(pass_context=True)
async def eat(self, ctx, *, member : str = None):
"""Eat like a boss."""
msg = self.eating.computeAction(self.eating, self.bot, ctx, member) #python is silly and makes me do this for uninitialized classes
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def drink(self, ctx, *, member : str = None):
"""Drink like a boss."""
msg = self.drinking.computeAction(self.drinking, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def boop(self, ctx, *, member : str = None):
"""Boop da snoot."""
msg = self.booping.computeAction(self.booping, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def spook(self, ctx, *, member : str = None):
"""sp00ktober by camiel."""
if datetime.date.today().month == 10:
# make it extra sp00py because it is spooktober
await ctx.message.add_reaction("🎃")
msg = self.spooky.computeAction(self.spooky, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def highfive(self, ctx, *, member : str = None):
"""High five like a boss."""
msg = self.highfives.computeAction(self.highfives, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def pet(self, ctx, *, member : str = None):
"""pet kitties."""
msg = self.petting.computeAction(self.petting, self.bot, ctx, member)
await ctx.channel.send(msg)
return
| [((12454, 12489), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (12470, 12489), False, 'from discord.ext import commands\n'), ((12743, 12778), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (12759, 12778), False, 'from discord.ext import commands\n'), ((12976, 13011), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (12992, 13011), False, 'from discord.ext import commands\n'), ((13202, 13237), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (13218, 13237), False, 'from discord.ext import commands\n'), ((13564, 13599), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (13580, 13599), False, 'from discord.ext import commands\n'), ((13806, 13841), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (13822, 13841), False, 'from discord.ext import commands\n'), ((1708, 1731), 'Cogs.Nullify.clean', 'Nullify.clean', (['mesgFull'], {}), '(mesgFull)\n', (1721, 1731), False, 'from Cogs import Nullify\n'), ((924, 955), 'random.choice', 'random.choice', (['self.nothingList'], {}), '(self.nothingList)\n', (937, 955), False, 'import random\n'), ((984, 1036), 'Cogs.DisplayName.memberForName', 'DisplayName.memberForName', (['target', 'ctx.message.guild'], {}), '(target, ctx.message.guild)\n', (1009, 1036), False, 'from Cogs import DisplayName\n'), ((1650, 1686), 'Cogs.DisplayName.name', 'DisplayName.name', (['ctx.message.author'], {}), '(ctx.message.author)\n', (1666, 1686), False, 'from Cogs import DisplayName\n'), ((13327, 13348), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (13346, 13348), False, 'import datetime\n'), ((1533, 1561), 'random.choice', 'random.choice', (['self.itemList'], {}), '(self.itemList)\n', (1546, 1561), False, 'import random\n'), ((1149, 1176), 'random.choice', 'random.choice', (['self.botList'], {}), '(self.botList)\n', (1162, 1176), False, 'import random\n'), ((1337, 1365), 'random.choice', 'random.choice', (['self.selfList'], {}), '(self.selfList)\n', (1350, 1365), False, 'import random\n'), ((1459, 1489), 'Cogs.DisplayName.name', 'DisplayName.name', (['targetMember'], {}), '(targetMember)\n', (1475, 1489), False, 'from Cogs import DisplayName\n'), ((1415, 1445), 'random.choice', 'random.choice', (['self.memberList'], {}), '(self.memberList)\n', (1428, 1445), False, 'import random\n')] |
tobiasbaumann1/amd | marltoolbox/examples/tune_function_api/lola_pg_official.py | cb6190be92dea54db04ef9202d381b96f6f6218b | ##########
# Additional dependencies are needed:
# Follow the LOLA installation described in the tune_class_api/lola_pg_official.py file
##########
import os
import ray
from ray import tune
import marltoolbox.algos.lola.envs as lola_envs
import marltoolbox.algos.lola_dice.envs as lola_dice_envs
from marltoolbox.algos.lola import train_cg, train_exact, train_pg
from marltoolbox.envs.vectorized_coin_game import CoinGame, AsymCoinGame
from marltoolbox.utils import log
def trainer_fn(exp_name, num_episodes, trace_length, exact, pseudo, grid_size,
lr, lr_correction, batch_size, bs_mul, simple_net, hidden, reg,
gamma, lola_update, opp_model, mem_efficient, seed, set_zero,
warmup, changed_config, ac_lr, summary_len, use_MAE,
use_toolbox_env, clip_lola_update_norm, clip_loss_norm, entropy_coeff,
weigth_decay, **kwargs):
# Instantiate the environment
if exp_name == "IPD":
env = lola_envs.IPD(trace_length)
elif exp_name == "IMP":
env = lola_envs.IMP(trace_length)
elif exp_name == "CoinGame":
if use_toolbox_env:
env = CoinGame(config={
"batch_size": batch_size,
"max_steps": trace_length,
"grid_size": grid_size,
"get_additional_info": True,
"add_position_in_epi": False,
})
else:
env = lola_dice_envs.CG(trace_length, batch_size, grid_size)
env.seed(seed)
elif exp_name == "AsymCoinGame":
if use_toolbox_env:
env = AsymCoinGame(config={
"batch_size": batch_size,
"max_steps": trace_length,
"grid_size": grid_size,
"get_additional_info": True,
"add_position_in_epi": False,
})
else:
env = lola_dice_envs.AsymCG(trace_length, batch_size, grid_size)
env.seed(seed)
else:
raise ValueError(f"exp_name: {exp_name}")
# Import the right training function
if exact:
train_exact.train(env,
num_episodes=num_episodes,
trace_length=trace_length,
simple_net=simple_net,
corrections=lola_update,
pseudo=pseudo,
num_hidden=hidden,
reg=reg,
lr=lr,
lr_correction=lr_correction,
gamma=gamma)
elif exp_name in ("IPD", "IMP"):
train_pg.train(env,
num_episodes=num_episodes,
trace_length=trace_length,
batch_size=batch_size,
gamma=gamma,
set_zero=set_zero,
lr=lr,
corrections=lola_update,
simple_net=simple_net,
hidden=hidden,
mem_efficient=mem_efficient)
elif exp_name in ("CoinGame", "AsymCoinGame"):
train_cg.train(env,
num_episodes=num_episodes,
trace_length=trace_length,
batch_size=batch_size,
bs_mul=bs_mul,
gamma=gamma,
grid_size=grid_size,
lr=lr,
corrections=lola_update,
opp_model=opp_model,
hidden=hidden,
mem_efficient=mem_efficient,
asymmetry=exp_name == "AsymCoinGame",
warmup=warmup,
changed_config=changed_config,
ac_lr=ac_lr,
summary_len=summary_len,
use_MAE=use_MAE,
use_toolbox_env=use_toolbox_env,
clip_lola_update_norm=clip_lola_update_norm,
clip_loss_norm=clip_loss_norm,
entropy_coeff=entropy_coeff,
weigth_decay=weigth_decay,
)
else:
raise ValueError(f"exp_name: {exp_name}")
def lola_training(config):
trainer_fn(**config)
def get_tune_config(full_config: dict) -> dict:
# Sanity
assert full_config['exp_name'] in {"CoinGame", "IPD", "IMP", "AsymCoinGame"}
if full_config['exact']:
assert full_config['exp_name'] != "CoinGame", "Can't run CoinGame with --exact."
assert full_config['exp_name'] != "AsymCoinGame", "Can't run AsymCoinGame with --exact."
# Resolve default parameters
if full_config['exact']:
full_config['num_episodes'] = 50 if full_config['num_episodes'] is None else full_config['num_episodes']
full_config['trace_length'] = 200 if full_config['trace_length'] is None else full_config['trace_length']
full_config['lr'] = 1. if full_config['lr'] is None else full_config['lr']
elif full_config['exp_name'] in {"IPD", "IMP"}:
full_config['num_episodes'] = 600000 if full_config['num_episodes'] is None else full_config['num_episodes']
full_config['trace_length'] = 150 if full_config['trace_length'] is None else full_config['trace_length']
full_config['batch_size'] = 4000 if full_config['batch_size'] is None else full_config['batch_size']
full_config['lr'] = 1. if full_config['lr'] is None else full_config['lr']
elif full_config['exp_name'] == "CoinGame" or full_config['exp_name'] == "AsymCoinGame":
full_config['num_episodes'] = 100000 if full_config['num_episodes'] is None else full_config['num_episodes']
full_config['trace_length'] = 150 if full_config['trace_length'] is None else full_config['trace_length']
full_config['batch_size'] = 4000 if full_config['batch_size'] is None else full_config['batch_size']
full_config['lr'] = 0.005 if full_config['lr'] is None else full_config['lr']
if full_config['exp_name'] in ("IPD", "CoinGame", "AsymCoinGame"):
full_config['gamma'] = 0.96 if full_config['gamma'] is None else full_config['gamma']
elif full_config['exp_name'] == "IMP":
full_config['gamma'] = 0.9 if full_config['gamma'] is None else full_config['gamma']
return full_config
def main(debug):
exp_name, _ = log.log_in_current_day_dir(f"LOLA_PG")
tune_hparams = {
"exp_name": exp_name,
# Dynamically set
"num_episodes": 3 if debug else None,
"trace_length": 6 if debug else None,
"lr": None,
"gamma": None,
"batch_size": 12 if debug else None,
# "exp_name": "IPD",
# "exp_name": "IMP",
"exp_name": "CoinGame",
# "exp_name": "AsymCoinGame",
"pseudo": False,
"grid_size": 3,
"lola_update": True,
"opp_model": False,
"mem_efficient": True,
"lr_correction": 1,
"bs_mul": 1 / 10,
"simple_net": True,
"hidden": 32,
"reg": 0,
"set_zero": 0,
"exact": False,
"warmup": 1,
"seed": 1,
"changed_config": False,
"ac_lr": 1.0,
"summary_len": 1,
"use_MAE": False,
"use_toolbox_env": True,
"clip_loss_norm": False,
"clip_lola_update_norm": False,
"clip_lola_correction_norm": 3.0,
"clip_lola_actor_norm": 10.0,
"entropy_coeff": 0.001,
"weigth_decay": 0.03,
}
tune_config = get_tune_config(tune_hparams)
ray.init(num_cpus=os.cpu_count(), num_gpus=0)
tune_analysis = tune.run(lola_training, name=tune_hparams["exp_name"], config=tune_config)
ray.shutdown()
return tune_analysis
if __name__ == "__main__":
debug_mode = True
main(debug_mode)
| [((6377, 6415), 'marltoolbox.utils.log.log_in_current_day_dir', 'log.log_in_current_day_dir', (['f"""LOLA_PG"""'], {}), "(f'LOLA_PG')\n", (6403, 6415), False, 'from marltoolbox.utils import log\n'), ((7639, 7713), 'ray.tune.run', 'tune.run', (['lola_training'], {'name': "tune_hparams['exp_name']", 'config': 'tune_config'}), "(lola_training, name=tune_hparams['exp_name'], config=tune_config)\n", (7647, 7713), False, 'from ray import tune\n'), ((7718, 7732), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (7730, 7732), False, 'import ray\n'), ((947, 974), 'marltoolbox.algos.lola.envs.IPD', 'lola_envs.IPD', (['trace_length'], {}), '(trace_length)\n', (960, 974), True, 'import marltoolbox.algos.lola.envs as lola_envs\n'), ((2057, 2286), 'marltoolbox.algos.lola.train_exact.train', 'train_exact.train', (['env'], {'num_episodes': 'num_episodes', 'trace_length': 'trace_length', 'simple_net': 'simple_net', 'corrections': 'lola_update', 'pseudo': 'pseudo', 'num_hidden': 'hidden', 'reg': 'reg', 'lr': 'lr', 'lr_correction': 'lr_correction', 'gamma': 'gamma'}), '(env, num_episodes=num_episodes, trace_length=trace_length,\n simple_net=simple_net, corrections=lola_update, pseudo=pseudo,\n num_hidden=hidden, reg=reg, lr=lr, lr_correction=lr_correction, gamma=gamma\n )\n', (2074, 2286), False, 'from marltoolbox.algos.lola import train_cg, train_exact, train_pg\n'), ((1017, 1044), 'marltoolbox.algos.lola.envs.IMP', 'lola_envs.IMP', (['trace_length'], {}), '(trace_length)\n', (1030, 1044), True, 'import marltoolbox.algos.lola.envs as lola_envs\n'), ((2579, 2818), 'marltoolbox.algos.lola.train_pg.train', 'train_pg.train', (['env'], {'num_episodes': 'num_episodes', 'trace_length': 'trace_length', 'batch_size': 'batch_size', 'gamma': 'gamma', 'set_zero': 'set_zero', 'lr': 'lr', 'corrections': 'lola_update', 'simple_net': 'simple_net', 'hidden': 'hidden', 'mem_efficient': 'mem_efficient'}), '(env, num_episodes=num_episodes, trace_length=trace_length,\n batch_size=batch_size, gamma=gamma, set_zero=set_zero, lr=lr,\n corrections=lola_update, simple_net=simple_net, hidden=hidden,\n mem_efficient=mem_efficient)\n', (2593, 2818), False, 'from marltoolbox.algos.lola import train_cg, train_exact, train_pg\n'), ((7591, 7605), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (7603, 7605), False, 'import os\n'), ((3096, 3672), 'marltoolbox.algos.lola.train_cg.train', 'train_cg.train', (['env'], {'num_episodes': 'num_episodes', 'trace_length': 'trace_length', 'batch_size': 'batch_size', 'bs_mul': 'bs_mul', 'gamma': 'gamma', 'grid_size': 'grid_size', 'lr': 'lr', 'corrections': 'lola_update', 'opp_model': 'opp_model', 'hidden': 'hidden', 'mem_efficient': 'mem_efficient', 'asymmetry': "(exp_name == 'AsymCoinGame')", 'warmup': 'warmup', 'changed_config': 'changed_config', 'ac_lr': 'ac_lr', 'summary_len': 'summary_len', 'use_MAE': 'use_MAE', 'use_toolbox_env': 'use_toolbox_env', 'clip_lola_update_norm': 'clip_lola_update_norm', 'clip_loss_norm': 'clip_loss_norm', 'entropy_coeff': 'entropy_coeff', 'weigth_decay': 'weigth_decay'}), "(env, num_episodes=num_episodes, trace_length=trace_length,\n batch_size=batch_size, bs_mul=bs_mul, gamma=gamma, grid_size=grid_size,\n lr=lr, corrections=lola_update, opp_model=opp_model, hidden=hidden,\n mem_efficient=mem_efficient, asymmetry=exp_name == 'AsymCoinGame',\n warmup=warmup, changed_config=changed_config, ac_lr=ac_lr, summary_len=\n summary_len, use_MAE=use_MAE, use_toolbox_env=use_toolbox_env,\n clip_lola_update_norm=clip_lola_update_norm, clip_loss_norm=\n clip_loss_norm, entropy_coeff=entropy_coeff, weigth_decay=weigth_decay)\n", (3110, 3672), False, 'from marltoolbox.algos.lola import train_cg, train_exact, train_pg\n'), ((1124, 1285), 'marltoolbox.envs.vectorized_coin_game.CoinGame', 'CoinGame', ([], {'config': "{'batch_size': batch_size, 'max_steps': trace_length, 'grid_size':\n grid_size, 'get_additional_info': True, 'add_position_in_epi': False}"}), "(config={'batch_size': batch_size, 'max_steps': trace_length,\n 'grid_size': grid_size, 'get_additional_info': True,\n 'add_position_in_epi': False})\n", (1132, 1285), False, 'from marltoolbox.envs.vectorized_coin_game import CoinGame, AsymCoinGame\n'), ((1405, 1459), 'marltoolbox.algos.lola_dice.envs.CG', 'lola_dice_envs.CG', (['trace_length', 'batch_size', 'grid_size'], {}), '(trace_length, batch_size, grid_size)\n', (1422, 1459), True, 'import marltoolbox.algos.lola_dice.envs as lola_dice_envs\n'), ((1566, 1731), 'marltoolbox.envs.vectorized_coin_game.AsymCoinGame', 'AsymCoinGame', ([], {'config': "{'batch_size': batch_size, 'max_steps': trace_length, 'grid_size':\n grid_size, 'get_additional_info': True, 'add_position_in_epi': False}"}), "(config={'batch_size': batch_size, 'max_steps': trace_length,\n 'grid_size': grid_size, 'get_additional_info': True,\n 'add_position_in_epi': False})\n", (1578, 1731), False, 'from marltoolbox.envs.vectorized_coin_game import CoinGame, AsymCoinGame\n'), ((1851, 1909), 'marltoolbox.algos.lola_dice.envs.AsymCG', 'lola_dice_envs.AsymCG', (['trace_length', 'batch_size', 'grid_size'], {}), '(trace_length, batch_size, grid_size)\n', (1872, 1909), True, 'import marltoolbox.algos.lola_dice.envs as lola_dice_envs\n')] |
true7/srt | src/cut_link/utils.py | d5accd411e73ade4ed40a41759e95cb20fbda98d | import string
import random
import json
from calendar import month_name
from django.conf import settings
SHORTLINK_MIN = getattr(settings, "SHORTLINK_MIN", 6)
def code_generator(size=SHORTLINK_MIN):
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def create_shortlink(instance):
new_link = code_generator()
class_ = instance.__class__
query_set = class_.objects.filter(shortlink=new_link)
if query_set.exists():
return create_shortlink()
return new_link
def json_data_func(instance):
''' Return json format data, ready for passing into AmCharts.
Contains 2 items - name of the month and count of distinct
links, which were cut on the website.
'''
class_ = instance.__class__
# FIXME. The problem is every next year it will add results above
result = []
for month in range(1, len(month_name)):
count_use = class_.objects.filter(pub_date__month=month).count()
data = dict(month=month_name[month], count=count_use)
result.append(data)
json_data = json.dumps(result)
return json_data
| [((1110, 1128), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (1120, 1128), False, 'import json\n'), ((270, 290), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (283, 290), False, 'import random\n')] |
blankenberg/galaxy-data-resource | lib/tool_shed/scripts/bootstrap_tool_shed/bootstrap_util.py | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | #!/usr/bin/python
import argparse
import ConfigParser
import os
import sys
new_path = [ os.path.join( os.getcwd(), "lib" ) ]
new_path.extend( sys.path[1:] )
sys.path = new_path
from galaxy import eggs
eggs.require( "SQLAlchemy >= 0.4" )
import galaxy.webapps.tool_shed.model.mapping as tool_shed_model
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.exc import OperationalError
from tool_shed.util import xml_util
def check_db( config_parser ):
dburi = None
if config_parser.has_option( 'app:main', 'database_connection' ):
dburi = config_parser.get( 'app:main', 'database_connection' )
elif config_parser.has_option( 'app:main', 'database_file' ):
db_file = config_parser.get( 'app:main', 'database_file' )
dburi = "sqlite:///%s?isolation_level=IMMEDIATE" % db_file
else:
print 'The database configuration setting is missing from the tool_shed.ini file. Add this setting before attempting to bootstrap.'
exit(1)
sa_session = None
database_exists_message = 'The database configured for this Tool Shed is not new, so bootstrapping is not allowed. '
database_exists_message += 'Create a new database that has not been migrated before attempting to bootstrap.'
try:
model = tool_shed_model.init( config_parser.get( 'app:main', 'file_path' ), dburi, engine_options={}, create_tables=False )
sa_session = model.context.current
print database_exists_message
exit(1)
except ProgrammingError, e:
pass
except OperationalError, e:
pass
try:
if sa_session is not None:
result = sa_session.execute( 'SELECT version FROM migrate_version' ).first()
if result[0] >= 2:
print database_exists_message
exit(1)
else:
pass
except ProgrammingError, e:
pass
if config_parser.has_option( 'app:main', 'hgweb_config_dir' ):
hgweb_config_parser = ConfigParser.ConfigParser()
hgweb_dir = config_parser.get( 'app:main', 'hgweb_config_dir' )
hgweb_config_file = os.path.join( hgweb_dir, 'hgweb.config' )
if not os.path.exists( hgweb_config_file ):
exit(0)
hgweb_config_parser.read( hgweb_config_file )
configured_repos = hgweb_config_parser.items( 'paths' )
if len( configured_repos ) >= 1:
message = "This Tool Shed's hgweb.config file contains entries, so bootstrapping is not allowed. Delete"
message += " the current hgweb.config file along with all associated repositories in the configured "
message += "location before attempting to boostrap."
print
exit(1)
else:
exit(0)
else:
exit(0)
exit(0)
def admin_user_info( config_parser ):
user_info_config = os.path.abspath( os.path.join( os.getcwd(), 'lib/tool_shed/scripts/bootstrap_tool_shed', 'user_info.xml' ) )
tree, error_message = xml_util.parse_xml( user_info_config )
if tree is None:
print "The XML file ", user_info_config, " seems to be invalid, using defaults."
email = '[email protected]'
password = 'testuser'
username = 'admin'
else:
root = tree.getroot()
for elem in root:
if elem.tag == 'email':
email = elem.text
elif elem.tag == 'password':
password = elem.text
elif elem.tag == 'username':
username = elem.text
print '%s__SEP__%s__SEP__%s' % ( username, email, password )
return 0
def get_local_tool_shed_url( config_parser ):
port = '9009'
if config_parser.has_section( 'server:main' ):
if config_parser.has_option( 'server:main', 'port' ):
port = config_parser.get( 'server:main', 'port' )
host = '127.0.0.1'
print 'http://%s:%s' % ( host, port )
return 0
def main( args ):
config_parser = ConfigParser.ConfigParser()
if os.path.exists( args.config ):
config_parser.read( args.config )
else:
return 1
if args.method == 'check_db':
return check_db( config_parser )
elif args.method == 'admin_user_info':
return admin_user_info( config_parser )
elif args.method == 'get_url':
return get_local_tool_shed_url( config_parser )
else:
return 1
parser = argparse.ArgumentParser()
parser.add_argument( '-c', '--config_file', dest='config', action='store', default='config/tool_shed.ini.sample' )
parser.add_argument( '-e', '--execute', dest='method', action='store', default='check_db' )
args = parser.parse_args()
if __name__ == '__main__':
exit( main( args ) )
| [] |
jonnangle/moto-1 | moto/dynamodbstreams/responses.py | 40b4e299abb732aad7f56cc0f680c0a272a46594 | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import dynamodbstreams_backends
from six import string_types
class DynamoDBStreamsHandler(BaseResponse):
@property
def backend(self):
return dynamodbstreams_backends[self.region]
def describe_stream(self):
arn = self._get_param("StreamArn")
return self.backend.describe_stream(arn)
def list_streams(self):
table_name = self._get_param("TableName")
return self.backend.list_streams(table_name)
def get_shard_iterator(self):
arn = self._get_param("StreamArn")
shard_id = self._get_param("ShardId")
shard_iterator_type = self._get_param("ShardIteratorType")
sequence_number = self._get_param("SequenceNumber")
# according to documentation sequence_number param should be string
if isinstance(sequence_number, string_types):
sequence_number = int(sequence_number)
return self.backend.get_shard_iterator(
arn, shard_id, shard_iterator_type, sequence_number
)
def get_records(self):
arn = self._get_param("ShardIterator")
limit = self._get_param("Limit")
if limit is None:
limit = 1000
return self.backend.get_records(arn, limit)
| [] |
ytorzuk-altran/openvino | tools/mo/openvino/tools/mo/front/mxnet/zeros_ext.py | 68d460a3bb578a738ba0e4d0e1f2e321afa73ab0 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.ops.const import Const
class ZerosFrontExtractor(FrontExtractorOp):
op = '_zeros'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
shape = list(attrs.tuple('shape', int, None))
zero_shapes = []
for i, s in enumerate(shape):
if s == 0:
shape[i] = 1
zero_shapes.append(i)
update_attrs = {
'shape': np.ndarray(shape),
'value': np.zeros(shape),
'zero_shapes': zero_shapes
}
# update the attributes of the node
Const.update_node_stat(node, update_attrs)
return cls.enabled
| [((439, 478), 'openvino.tools.mo.front.mxnet.extractors.utils.get_mxnet_layer_attrs', 'get_mxnet_layer_attrs', (['node.symbol_dict'], {}), '(node.symbol_dict)\n', (460, 478), False, 'from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs\n'), ((892, 934), 'openvino.tools.mo.ops.const.Const.update_node_stat', 'Const.update_node_stat', (['node', 'update_attrs'], {}), '(node, update_attrs)\n', (914, 934), False, 'from openvino.tools.mo.ops.const import Const\n'), ((733, 750), 'numpy.ndarray', 'np.ndarray', (['shape'], {}), '(shape)\n', (743, 750), True, 'import numpy as np\n'), ((773, 788), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (781, 788), True, 'import numpy as np\n')] |
Jumpscale/jumpscale_portal8 | tools/jslib_builder.py | 3a4d56a1ba985b68fe9b525aed2486a54808332f |
from JumpScale import j
class builder():
# @property
# def buildDir(self):
# return j.sal.fs.joinPaths(j.dirs.tmpDir, "jsbuilder")
@property
def cuisine(self):
return j.tools.cuisine.local
# ALL NOT NEEDED ANY LONGER USE bower
# def angular(self):
# version = "1.5.9"
# url = "http://code.angularjs.org/%s/angular-%s.zip" % (version, version)
# path = j.do.download(url, to='', overwrite=False, retry=3, timeout=0)
# dpath = j.sal.fs.joinPaths(self.buildDir, "angular")
# j.sal.fs.removeDirTree(dpath)
# z = j.tools.zipfile.get(path)
# z.extract(self.buildDir)
# z.close()
# j.sal.fs.renameDir(j.sal.fs.joinPaths(self.buildDir, "angular-%s" % sversion), dpath)
# # self._removeMapFiles(dpath)
#
# def _removeMapFiles(self, path):
# for item in j.sal.fs.find(path, "*.js.map"):
# item = "%s/%s" % (path, item)
# # print(item)
# j.sal.fs.remove(item)
#
# def bootstrap(self):
# version = "3.3.7"
# url = "https://github.com/twbs/bootstrap/releases/download/v%s/bootstrap-%s-dist.zip" % (version, version)
# path = j.do.download(url, to='', overwrite=False, retry=3, timeout=0)
# dpath = j.sal.fs.joinPaths(self.buildDir, "bootstrap")
# j.sal.fs.removeDirTree(dpath)
# z = j.tools.zipfile.get(path)
# z.extract(self.buildDir)
# z.close()
# j.sal.fs.renameDir(j.sal.fs.joinPaths(self.buildDir, "bootstrap-%s-dist" % version), dpath)
# # self._removeMapFiles(dpath)
#
# def codemirror(self):
#
# version = "5.9"
# url = "http://codemirror.net/codemirror-%s.zip" % version
# path = j.do.download(url, to='', overwrite=False, retry=3, timeout=0)
# dpath = j.sal.fs.joinPaths(self.buildDir, "codemirror")
# j.sal.fs.removeDirTree(dpath)
# z = j.tools.zipfile.get(path)
# z.extract(self.buildDir)
# z.close()
# j.sal.fs.renameDir(j.sal.fs.joinPaths(self.buildDir, "codemirror-%s" % version), dpath)
# @property
# def npm(self):
# if self._npm == False:
# if j.sal.fs.exists("%s/npm" % j.dirs.binDir, followlinks=True) == False:
# self.cuisine.apps.nodejs.install()
# self._npm = "%snpm" % j.dirs.binDir
# return self._npm
# @property
# def bower(self):
# if self._bower == False:
# if j.sal.fs.exists("%s/bower" % j.dirs.binDir, followlinks=True) == False:
# self.cuisine.apps.nodejs.install()
# self._bower = "%sbower" % j.dirs.binDir
# return self._bower
# def famous(self):
# url = "https://github.com/Famous/engine-seed"
# cdest = j.do.pullGitRepo(url)
# res = j.sal.process.executeWithoutPipe("cd %s;%s install" % (cdest, self.npm))
#
# def flatui(self):
# url = "https://github.com/designmodo/Flat-UI.git"
# cdest = j.do.pullGitRepo(url)
# print("npm/bower install")
# res = j.sal.process.executeWithoutPipe("cd %s;%s install;%s install" % (cdest, self.npm, self.bower))
#
# def do1(self):
# j.sal.fs.createDir(j.sal.fs.joinPaths(j.dirs.tmpDir, "jsbuilder"))
# if self.checkIPFS == False:
# self.getIPFS()
# # self.angular()
# # self.bootstrap()
# # self.codemirror()
# # self.famous()
# self.flatui()
def do(self):
if self.checkIPFS == False:
self.getIPFS()
# self.cuisine.apps.nodejs.bowerInstall(["jquery", "flatui", "bootstrap", "famous", "codemirror", "font-awesome", "jqplot",
# "underscore", "spin", "moment", "http://DlhSoft.com/Packages/DlhSoft.KanbanLibrary.zip", "jqwidgets", "d3", "angular-latest"])
cmd = "cd $tmpDir/bower;ipfs -c $cfgDir/ipfs/main/ add -r bower_components"
print("IPFS upload, can take couple of minutes")
res = self.cuisine.core.run(cmd)
def checkIPFS(self):
return j.sal.nettools.checkUrlReachable("http://localhost:5001/webui") == True
def getIPFS(self):
j.tools.cuisine.local.apps.ipfs.install()
j.tools.cuisine.local.apps.ipfs.start()
b = builder()
b.do()
| [((4246, 4287), 'JumpScale.j.tools.cuisine.local.apps.ipfs.install', 'j.tools.cuisine.local.apps.ipfs.install', ([], {}), '()\n', (4285, 4287), False, 'from JumpScale import j\n'), ((4296, 4335), 'JumpScale.j.tools.cuisine.local.apps.ipfs.start', 'j.tools.cuisine.local.apps.ipfs.start', ([], {}), '()\n', (4333, 4335), False, 'from JumpScale import j\n'), ((4142, 4205), 'JumpScale.j.sal.nettools.checkUrlReachable', 'j.sal.nettools.checkUrlReachable', (['"""http://localhost:5001/webui"""'], {}), "('http://localhost:5001/webui')\n", (4174, 4205), False, 'from JumpScale import j\n')] |
Anindya-Prithvi/CO_M21_Assignment | SimpleSimulator/samuelator.py | 524bd2b866dd58a6358354cda65e2136ecd46e50 | import sys
import warnings
import matplotlib.pyplot as plt
from parsets import IMACC, IMG, PROGC, REGFLPC, ExecE, plot
warnings.filterwarnings("ignore")
MEM = IMACC(sys.stdin.read()) # Load memory from stdin
PC = PROGC(0) # Start from the first instruction
RF = REGFLPC() # initialize register and flags
EE = ExecE(MEM)
IM = IMG()
halted = False
cycle = 0
if MEM.inst_mem == ["0" * 16 for i in range(256)]:
halted = True
while not halted:
Instruction = MEM.getData(PC) # Get current instruction
IM.imgx.append(cycle)
IM.imgy.append(PC.PC)
halted, new_PC, new_regs = EE.execute(Instruction, RF.asdct(), IM, cycle)
# Update RF compute new_PC
RF.update(new_regs, new_PC)
PC.dump()
# Print PC
RF.dump()
# Print RF state
PC.update(new_PC)
# Update PC
cycle += 1
MEM.dump() # Print memory state
# plotting
plot(plt, IM)
| [((121, 154), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (144, 154), False, 'import warnings\n'), ((217, 225), 'parsets.PROGC', 'PROGC', (['(0)'], {}), '(0)\n', (222, 225), False, 'from parsets import IMACC, IMG, PROGC, REGFLPC, ExecE, plot\n'), ((267, 276), 'parsets.REGFLPC', 'REGFLPC', ([], {}), '()\n', (274, 276), False, 'from parsets import IMACC, IMG, PROGC, REGFLPC, ExecE, plot\n'), ((315, 325), 'parsets.ExecE', 'ExecE', (['MEM'], {}), '(MEM)\n', (320, 325), False, 'from parsets import IMACC, IMG, PROGC, REGFLPC, ExecE, plot\n'), ((331, 336), 'parsets.IMG', 'IMG', ([], {}), '()\n', (334, 336), False, 'from parsets import IMACC, IMG, PROGC, REGFLPC, ExecE, plot\n'), ((868, 881), 'parsets.plot', 'plot', (['plt', 'IM'], {}), '(plt, IM)\n', (872, 881), False, 'from parsets import IMACC, IMG, PROGC, REGFLPC, ExecE, plot\n'), ((168, 184), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (182, 184), False, 'import sys\n')] |
LiReNa00/JDBot | utils/converters.py | c85b31e272d5394ba5debc26b8b5357fb9d3d844 | import discord
import re
import emoji
import contextlib
import typing
import datetime
from discord.ext import commands
from discord.http import Route
class BetterMemberConverter(commands.Converter):
async def convert(self, ctx, argument):
try:
user = await commands.MemberConverter().convert(ctx, argument)
except commands.MemberNotFound:
user = None
if user is None:
tag = re.match(r"#?(\d{4})", argument)
if tag:
if ctx.guild:
test = discord.utils.get(ctx.guild.members, discriminator=tag.group(1))
user = test or ctx.author
if ctx.guild is None:
user = await BetterUserconverter().convert(ctx, argument)
user = user or ctx.author
return user
class BetterUserconverter(commands.Converter):
async def convert(self, ctx, argument):
try:
user = await commands.UserConverter().convert(ctx, argument)
except commands.UserNotFound:
user = None
if not user and ctx.guild:
try:
user = await commands.MemberConverter().convert(ctx, argument)
except commands.MemberNotFound:
user = None
if user is None:
role = None
with contextlib.suppress(commands.RoleNotFound, commands.NoPrivateMessage):
role = await commands.RoleConverter().convert(ctx, argument)
if role:
if role.is_bot_managed():
user = role.tags.bot_id
user = await ctx.bot.try_user(user)
if user is None:
tag = re.match(r"#?(\d{4})", argument)
if tag and not ctx.bot.users:
test = discord.utils.get(ctx.bot.users, discriminator=tag.group(1))
user = test or ctx.author
return user
class EmojiBasic:
def __init__(self, id: int, url: str):
self.id = id
self.url = url
@classmethod
async def convert(cls, ctx, argument):
match = re.match(r"(?P<id>[0-9]{15,21})", argument)
if match:
emoji_id = match.group(0)
extentions = ["gif", "png"]
for x in extentions:
response = await ctx.bot.session.get(f"https://cdn.discordapp.com/emojis/{emoji_id}.{x}")
if response.ok:
return cls(emoji_id, response.real_url)
else:
return None
class EmojiConverter(commands.Converter):
async def convert(self, ctx: commands.Context, arg: str):
emojis = emoji.unicode_codes.EMOJI_UNICODE["en"].values()
try:
return await commands.PartialEmojiConverter().convert(ctx, arg)
except commands.PartialEmojiConversionFailure:
pass
if arg.rstrip("\N{variation selector-16}") in emojis or arg in emojis:
return discord.PartialEmoji(name=arg)
else:
raise commands.BadArgument(f"{arg} is not an emoji")
class ColorConverter(commands.Converter):
async def convert(self, ctx, argument):
try:
color = await commands.ColourConverter().convert(ctx, argument)
except commands.BadColourArgument:
color = None
if not color and not argument.isdigit():
argument = list(s for s in argument.split(" ") if s)
if color and argument.isdigit():
argument = int(argument)
if isinstance(argument, int):
if argument > 16777215:
await ctx.send(f"{argument} is not valid color, 16777215 will be used instead.")
argument = 16777215
color = discord.Colour(argument)
if isinstance(argument, list):
argument = sorted(filter(lambda x: x.isdigit(), argument))
argument = [int(n) for n in argument][:3]
try:
color = discord.Colour.from_rgb(*argument)
except TypeError:
color = None
if color:
if color.value > 16777215:
color = discord.Colour(16777215)
return color
def generate_snowflake(dt: typing.Optional[datetime.datetime] = None) -> int:
"""Returns a numeric snowflake pretending to be created at the given date but more accurate and random than time_snowflake.
If No dt is not passed, it makes one from the current time using utcnow.
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
dt = dt or discord.utils.utcnow()
return int(dt.timestamp() * 1000 - 1420070400000) << 22 | 0x3FFFFF
class ObjectPlus(discord.Object):
@property
def worker_id(self) -> int:
""":class:`int`: Returns the worker id that made the snowflake."""
return (self.id & 0x3E0000) >> 17
@property
def process_id(self) -> int:
""":class:`int`: Returns the process id that made the snowflake."""
return (self.id & 0x1F000) >> 12
@property
def increment_id(self) -> int:
""":class:`int`: Returns the increment id that made the snowflake."""
return self.id & 0xFFF
class ObjectPlusConverter(commands.converter.IDConverter[commands.Converter]):
async def convert(self, ctx: commands.Context, argument: str) -> ObjectPlus:
match = self._get_id_match(argument) or re.match(r"<(?:@(?:!|&)?|#)([0-9]{15,20})>$", argument)
if match is None:
raise discord.errors.ObjectNotFound(argument)
result = int(match.group(1))
return ObjectPlus(id=result)
# remove if edpy adds my pull request into the master.
| [((2119, 2161), 're.match', 're.match', (['"""(?P<id>[0-9]{15,21})"""', 'argument'], {}), "('(?P<id>[0-9]{15,21})', argument)\n", (2127, 2161), False, 'import re\n'), ((4783, 4805), 'discord.utils.utcnow', 'discord.utils.utcnow', ([], {}), '()\n', (4803, 4805), False, 'import discord\n'), ((441, 473), 're.match', 're.match', (['"""#?(\\\\d{4})"""', 'argument'], {}), "('#?(\\\\d{4})', argument)\n", (449, 473), False, 'import re\n'), ((1714, 1746), 're.match', 're.match', (['"""#?(\\\\d{4})"""', 'argument'], {}), "('#?(\\\\d{4})', argument)\n", (1722, 1746), False, 'import re\n'), ((2961, 2991), 'discord.PartialEmoji', 'discord.PartialEmoji', ([], {'name': 'arg'}), '(name=arg)\n', (2981, 2991), False, 'import discord\n'), ((3024, 3070), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['f"""{arg} is not an emoji"""'], {}), "(f'{arg} is not an emoji')\n", (3044, 3070), False, 'from discord.ext import commands\n'), ((3742, 3766), 'discord.Colour', 'discord.Colour', (['argument'], {}), '(argument)\n', (3756, 3766), False, 'import discord\n'), ((5610, 5664), 're.match', 're.match', (['"""<(?:@(?:!|&)?|#)([0-9]{15,20})>$"""', 'argument'], {}), "('<(?:@(?:!|&)?|#)([0-9]{15,20})>$', argument)\n", (5618, 5664), False, 'import re\n'), ((5711, 5750), 'discord.errors.ObjectNotFound', 'discord.errors.ObjectNotFound', (['argument'], {}), '(argument)\n', (5740, 5750), False, 'import discord\n'), ((1358, 1427), 'contextlib.suppress', 'contextlib.suppress', (['commands.RoleNotFound', 'commands.NoPrivateMessage'], {}), '(commands.RoleNotFound, commands.NoPrivateMessage)\n', (1377, 1427), False, 'import contextlib\n'), ((3976, 4010), 'discord.Colour.from_rgb', 'discord.Colour.from_rgb', (['*argument'], {}), '(*argument)\n', (3999, 4010), False, 'import discord\n'), ((4153, 4177), 'discord.Colour', 'discord.Colour', (['(16777215)'], {}), '(16777215)\n', (4167, 4177), False, 'import discord\n'), ((283, 309), 'discord.ext.commands.MemberConverter', 'commands.MemberConverter', ([], {}), '()\n', (307, 309), False, 'from discord.ext import commands\n'), ((977, 1001), 'discord.ext.commands.UserConverter', 'commands.UserConverter', ([], {}), '()\n', (999, 1001), False, 'from discord.ext import commands\n'), ((2740, 2772), 'discord.ext.commands.PartialEmojiConverter', 'commands.PartialEmojiConverter', ([], {}), '()\n', (2770, 2772), False, 'from discord.ext import commands\n'), ((3199, 3225), 'discord.ext.commands.ColourConverter', 'commands.ColourConverter', ([], {}), '()\n', (3223, 3225), False, 'from discord.ext import commands\n'), ((1168, 1194), 'discord.ext.commands.MemberConverter', 'commands.MemberConverter', ([], {}), '()\n', (1192, 1194), False, 'from discord.ext import commands\n'), ((1458, 1482), 'discord.ext.commands.RoleConverter', 'commands.RoleConverter', ([], {}), '()\n', (1480, 1482), False, 'from discord.ext import commands\n')] |
AJK-dev/kissim | kissim/cli/encode.py | 15375000d47b5d5485322fc725809f853a3659de | """
kissim.cli.encode
Encode structures (generate fingerprints) from CLI arguments.
"""
import numpy as np
from kissim.api import encode
from kissim.cli.utils import configure_logger
def encode_from_cli(args):
"""
Encode structures.
Parameters
----------
args : argsparse.Namespace
CLI arguments.
"""
configure_logger(args.output)
structure_klifs_ids = _parse_structure_klifs_ids(args.input)
encode(structure_klifs_ids, args.output, args.local, args.ncores)
def _parse_structure_klifs_ids(args_input):
"""
Parse structure KLIFS IDs.
Parameters
----------
args_input : list of str
Either path to txt file with structure KLIFS ID (one ID per row) or one or more structure
KLIFS IDs.
Returns
-------
list of int
List of structure KLIFS IDs.
"""
if len(args_input) == 1:
try:
structure_klifs_ids = [int(args_input[0])]
except ValueError:
structure_klifs_ids = np.genfromtxt(fname=args_input[0], dtype=int).tolist()
else:
structure_klifs_ids = [int(i) for i in args_input]
return structure_klifs_ids
| [((344, 373), 'kissim.cli.utils.configure_logger', 'configure_logger', (['args.output'], {}), '(args.output)\n', (360, 373), False, 'from kissim.cli.utils import configure_logger\n'), ((443, 508), 'kissim.api.encode', 'encode', (['structure_klifs_ids', 'args.output', 'args.local', 'args.ncores'], {}), '(structure_klifs_ids, args.output, args.local, args.ncores)\n', (449, 508), False, 'from kissim.api import encode\n'), ((1016, 1061), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': 'args_input[0]', 'dtype': 'int'}), '(fname=args_input[0], dtype=int)\n', (1029, 1061), True, 'import numpy as np\n')] |
ZiyaoWei/pyMatrixProfile | distanceProfile.py | 1c88e1558e2bc5210d328d253572f5ff7fab1a5e | import numpy as np
from util import *
def naiveDistanceProfile(tsA, idx, m, tsB = None):
"""Return the distance profile of query against ts. Use the naive all pairs comparison algorithm.
>>> np.round(naiveDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
distanceProfile = []
n = len(tsB)
for i in range(n - m + 1):
distanceProfile.append(zNormalizedEuclideanDistance(query, tsB[i : i + m]))
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
def stampDistanceProfile(tsA, idx, m, tsB = None):
"""
>>> np.round(stampDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
n = len(tsB)
distanceProfile = mass(query, tsB)
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
if __name__ == "__main__":
import doctest
doctest.testmod()
| [((1672, 1689), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (1687, 1689), False, 'import doctest\n'), ((887, 923), 'numpy.full', 'np.full', (['(n - m + 1)', 'idx'], {'dtype': 'float'}), '(n - m + 1, idx, dtype=float)\n', (894, 923), True, 'import numpy as np\n'), ((1581, 1617), 'numpy.full', 'np.full', (['(n - m + 1)', 'idx'], {'dtype': 'float'}), '(n - m + 1, idx, dtype=float)\n', (1588, 1617), True, 'import numpy as np\n')] |
theo-dim/cash-gels-thesis | test_0000.py | de8c1b20f766aa1c58d8f692373c76683d165a66 | import pyplot as plt
import numpy as np
from sklearn import linear_model
| [] |
Subsets and Splits