id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
63122
|
<gh_stars>0
import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
@app.route("/")
@app.route("/get_exercises")
def get_exercises():
exercises = list(mongo.db.exercises.find({}))
print(list(exercises))
return render_template("exercises.html", exercises=exercises)
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
# check if username already exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
flash("Username already exists")
return redirect(url_for("register"))
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(register)
# put the new user into 'session' cookie
session["user"] = request.form.get("username").lower()
flash("Registration Successful!")
return redirect(url_for("profile", username=session["user"]))
return render_template("register.html")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
# check if username exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# ensure hashed password matches user input
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome, {}".format(
request.form.get("username")))
return redirect(url_for(
"profile", username=session["user"]))
else:
# invalid password match
flash("Incorrect Username and/or Password")
# return redirect(url_for("profile", username=session["user"]))
return redirect(url_for("login"))
else:
# username doesn't exist
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/profile/<username>", methods=["GET", "POST"])
def profile(username):
# get the session user's username from db
username = mongo.db.users.find_one(
{"username": session["user"]})["username"]
if session["user"]:
return render_template("profile.html", username=username)
return redirect(url_for("login"))
@app.route("/logout")
def logout():
# remove user from session cookie
flash("You have been logged out")
session.pop("user")
return redirect(url_for("login"))
@app.route("/add_exercises", methods=["GET", "POST"])
def add_exercises():
if request.method == "POST":
exercises = {
"category_name": request.form.get("category_name"),
"exercise_name": request.form.get("exercise_name"),
"exercise_description": request.form.get("exercise_description"),
"sets_description": request.form.get("sets_description"),
"reps_description": request.form.get("reps_description"),
"created_by": session["user"]
}
mongo.db.exercises.insert_one(exercises)
flash("Exercise Successfully Added")
return redirect(url_for("get_exercises"))
categories = mongo.db.categories.find().sort("category_name", 1)
return render_template("add_exercises.html", categories=categories)
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=True)
|
StarcoderdataPython
|
3258730
|
import inspect
import types
from . import _main as main
from . import _writer as writer
from ._manager import LogManager, LogSimpleManager
from . import settings
def loggable(
log_addr='*',
*,
log_args=True,
log_results=True,
log_enter=True,
log_exit=True,
log_path=True,
short=None,
long=None,
short_det = None,
long_det = None,
hidden=False,
):
""" All-purpose decorator for setting up logging for "loggables"
It is used as a decorator for functions, methods, properties and classes (it must be used for classes which have
decorated methods or else the methods will be "dangling"!).
"""
verbosity_settings = dict(
log_args = log_args,
log_results = log_results,
log_enter = log_enter,
log_exit = log_exit,
log_path = log_path,
)
def loggable_decorator(x):
""" Actual decorator for "loggables"
It decorates functions/methods/property accesors, but also classes with any of the above.
"""
nonlocal log_addr, verbosity_settings, log_path, hidden
# If it is a function or method
if isinstance(x, types.FunctionType):
# replace the wildcard `*` in the given name with the actual name of the function
log_addr = log_addr.replace('*', x.__name__.strip('_'), 1)
if x.__qualname__ != x.__name__:
# It is a method, so a manager is created that has parent temporarily set to `None` it will be sorted
# out when the class will be decorated. It is also temporarily attached to `_methodtolog` property of
# the method. It hangs there until the class is decorated -- then all the `_methodtolog` will be cleaned
# up. If not, the logger is "dangling" -- that means that the class of this method was not decorated as
# it should.
x._methodtolog = LogSimpleManager(
addr = log_addr,
log_path = log_path,
func_parent = None,
func_name = x.__name__,
verbosity_settings = verbosity_settings,
hidden = hidden,
)
else:
# standalone function, so module can be given for a parent
lfm = LogSimpleManager(
addr = log_addr,
log_path = log_path,
func_parent = inspect.getmodule(x),
func_name = x.__name__,
verbosity_settings = verbosity_settings,
hidden = hidden,
)
# That's it for a function, so it can be added to the registry
lfm.add(auto_on=main._logging_enabled)
elif isinstance(x, classmethod):
# if it is a class method, the manager is created similarily as for a method, only the name must be digged a
# one step deeper
log_addr = log_addr.replace('*', x.__func__.__name__.strip('_'), 1)
x._methodtolog = LogSimpleManager(
addr = log_addr,
log_path = log_path,
func_parent = None,
func_name = x.__func__.__name__,
verbosity_settings = verbosity_settings,
hidden = hidden,
)
elif isinstance(x, type):
# Finally a class is decorated.
if issubclass(x, LogManager):
# If it is an "aunt" class, the decorator performes a singlenton semantic That is it creates a single
# object, and registers it in the registry
manager = x(log_addr, log_path, hidden)
manager.add(auto_on=main._logging_enabled)
else:
# It is a regular user's class Now we will hopefully collect all the managers that were temporarily
# attached to methods `_methodtolog` properties
log_addr = log_addr.replace('*', x.__name__.strip('_'), 1)
for prop_name in dir(x):
# for each member of the class we try...
# First we must find the member, and that means we must traverse the Method Resolution Order
for mro_class in x.__mro__:
try:
member = x.__getattribute__(mro_class, prop_name)
except AttributeError:
# The member is not in this class so we move one step in MRO.
pass
else:
# We found the member, so we can break from the loop
break
else:
# The loop was never broken.
# So we haven't found the member anuwhere in the `__mro__` - this should never happen, because
# the member was returned by `dir(x)` so it should exist somwhere. To fail safe (quaietly) we
# assign a `None` value to the member that will fail in expected manner down the line at
# `member._methodtolog.log_path`.
member = None
if isinstance(member, property):
# if it is an actual property we will have potentially three managers to sort out
members = ((member.fget, 'fget'), (member.fset, 'fset'), (member.fdel, 'fdel'))
else:
# if it is a regular method we have just one manager
members = ((member, None),)
for member, subname in members:
try:
# Now we just try to update the manager that is hanging in the function. If it is not
# hanging there that means that we have something other than decorated method here end the
# exception occurs.
#
# The `log_path` value is really only meaningful in the class decorator, but it is needed in
# all method managers, hence it is copied here
member._methodtolog.log_path = log_path
# New name for the wrapper is created from the name given in the class decorator, and the
# name obtained when the method was decorated
member._methodtolog.addr = log_addr + '.' + member._methodtolog.addr
# the parent is finally known and can be assigned to the manager
member._methodtolog.func_parent = x
# if `subname` we are in a property
if subname:
# what was stored before in the manager as a name in fact was the name of the property
# so it has to be rewriten
member._methodtolog.set_as_property_manager(member._methodtolog.func_name, subname)
# Function name is now one of the accesor functions: `fget`, `fset` or `fdel`
# The method is finnaly properly set up and can be added to the registry
member._methodtolog.add(auto_on=main._logging_enabled)
# This temporary member is no longer needed
del member._methodtolog
except Exception:
# It was not a decorated method (most of the time it is not), so we do nothing
pass
# When we decorate a class we can assign a logging "repr"s here. One is "short" and one is "long". For
# description see docstring of `enh_repr` function.
loggable_class(x, short=short, long=long, short_det=short_det, long_det=long_det)
# After decoration we return the original method/function, so the class/module has exactly the same structure as
# it would have it wasn't decorated at all. All the information needed is stored in the managers now. When the
# logging is turned on, the wrappers are created, and module/class is altered
return x
return loggable_decorator
def loggable_class(class_, *,
short = None,
long = None,
short_det = None,
long_det = None,
):
""" Kind-of decorator for classes that are not in our control
It is used for so-called outer classes (i.e. classes imported from other modules that still should have nice
representation in logs).
"""
# A class that is not in our control (like a class imported from another module) that can be an argument or result
# of our code, can be assigned a "short" and "long" "repr". For description see docstring of `enh_repr`. This kind
# of class is called "outer".
if short is not None:
# Assigning a short repr for outer class is meaningless because an outer class can never be a producer of a
# log (it have no decortated methods). But maybe short and long will be used somwhere else.
if not settings.deterministic_identificators:
writer.short_class_repr[class_] = short
elif short_det is not None:
writer.short_class_repr[class_] = short_det
else:
writer.short_class_repr[class_] = lambda y: "<>"
if long is not None:
if not settings.deterministic_identificators:
writer.long_class_repr[class_] = long
elif long_det is not None:
writer.long_class_repr[class_] = long_det
else:
writer.long_class_repr[class_] = lambda y: f"<obj of {str(type(y)).strip('<>')}>"
|
StarcoderdataPython
|
3235669
|
from django.contrib.admin import helpers
from django.core.urlresolvers import reverse
from django.db import transaction
from django.shortcuts import render, redirect
from django.template.response import TemplateResponse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from orchestra.admin.utils import get_object_from_url
@transaction.atomic
def update_orders(modeladmin, request, queryset, extra_context=None):
if not queryset:
return
if request.POST.get('post') == 'confirmation':
num = 0
services = []
for service in queryset:
updates = service.update_orders()
num += len(updates)
services.append(str(service.pk))
modeladmin.log_change(request, service, _("Orders updated"))
if num == 1:
url = reverse('admin:orders_order_change', args=(updates[0][0].pk,))
msg = _('<a href="%s">One related order</a> has benn updated') % url
else:
url = reverse('admin:orders_order_changelist')
url += '?service__in=%s' % ','.join(services)
msg = _('<a href="%s">%s related orders</a> have been updated') % (url, num)
modeladmin.message_user(request, mark_safe(msg))
return
updates = []
for service in queryset:
updates += service.update_orders(commit=False)
opts = modeladmin.model._meta
context = {
'title': _("Update orders will cause the following."),
'action_name': 'Update orders',
'action_value': 'update_orders',
'updates': updates,
'queryset': queryset,
'opts': opts,
'app_label': opts.app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'obj': get_object_from_url(modeladmin, request),
}
return render(request, 'admin/services/service/update_orders.html', context)
update_orders.url_name = 'update-orders'
update_orders.short_description = _("Update orders")
def view_help(modeladmin, request, queryset):
opts = modeladmin.model._meta
context = {
'title': _("Need some help?"),
'opts': opts,
'queryset': queryset,
'obj': queryset.get(),
'action_name': _("help"),
'app_label': opts.app_label,
}
return TemplateResponse(request, 'admin/services/service/help.html', context)
view_help.url_name = 'help'
view_help.tool_description = _("Help")
def clone(modeladmin, request, queryset):
service = queryset.get()
fields = modeladmin.get_fields(request)
query = []
for field in fields:
model_field = type(service)._meta.get_field(field)
if model_field.rel:
value = getattr(service, field + '_id')
elif 'Boolean' in model_field.__class__.__name__:
value = 'True' if getattr(service, field) else ''
else:
value = getattr(service, field)
query.append('%s=%s' % (field, value))
opts = service._meta
url = reverse('admin:%s_%s_add' % (opts.app_label, opts.model_name))
url += '?%s' % '&'.join(query)
return redirect(url)
|
StarcoderdataPython
|
1764629
|
import numpy as np
import probtorch
import torch
from torchvision.utils import make_grid
from base import BaseTrainer
from utils import inf_loop, MetricTracker
class Trainer(BaseTrainer):
"""
Trainer class
"""
def __init__(self, model, criterion, metric_ftns, optimizer, config, data_loader,
valid_data_loader=None, lr_scheduler=None, len_epoch=None,
num_particles=1):
super().__init__(model, criterion, metric_ftns, optimizer, config)
self.config = config
self.data_loader = data_loader
if len_epoch is None:
# epoch-based training
self.len_epoch = len(self.data_loader)
else:
# iteration-based training
self.data_loader = inf_loop(data_loader)
self.len_epoch = len_epoch
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = int(np.sqrt(data_loader.batch_size))
self.num_particles = num_particles
self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
def validate(self, epochs=1):
"""
Full validation logic
"""
metrics = []
for epoch in range(epochs):
result = self._valid_epoch(epoch)
# save logged informations into log dict
log = {'epoch': epoch}
log.update(result)
# print logged informations to the screen
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
metrics.append(result)
return metrics
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
self.model.train()
self.train_metrics.reset()
for batch_idx, (data, target) in enumerate(self.data_loader):
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
self.train_metrics.update('loss', loss.item())
for met in self.metric_ftns:
self.train_metrics.update(met.__name__, met(target))
if batch_idx % self.log_step == 0:
self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(
epoch,
self._progress(batch_idx),
loss))
self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
if batch_idx == self.len_epoch:
break
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{'val_'+k : v for k, v in val_log.items()})
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.model.eval()
self.valid_metrics.reset()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_data_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')
self.valid_metrics.update('loss', loss.item())
valid_metric_metadata = {
'dataset': self.valid_data_loader.dataset,
'model': self.model,
'device': self.device
}
for met in self.metric_ftns:
self.valid_metrics.update(met.__name__, met(output, target, metadata=valid_metric_metadata))
self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# add histogram of model parameters to the tensorboard
for name, p in self.model.named_parameters():
self.writer.add_histogram(name, p, bins='auto')
return self.valid_metrics.result()
def _progress(self, batch_idx):
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loader, 'n_samples'):
current = batch_idx * self.data_loader.batch_size
total = self.data_loader.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
|
StarcoderdataPython
|
3263573
|
import time
import pyautogui
# Prevents pyautogui.FailSafeException to be raised if the
# mouse is in one of the screen corners during move(),
# see https://pyautogui.readthedocs.io/en/latest/#fail-safes
pyautogui.FAILSAFE = False
SECONDS_BETWEEN_MOVEMENTS = 60 * 3
def move():
pyautogui.move(+1, +1) # 1 pixel down and to the right
pyautogui.move(-1, -1) # 1 pixel up and to the left
print("Wiggled mouse, current time: " + time.ctime())
while(True):
move()
time.sleep(SECONDS_BETWEEN_MOVEMENTS)
|
StarcoderdataPython
|
195643
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test base estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators import base
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import log_loss
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
class BaseTest(tf.test.TestCase):
"""Test base estimators."""
def testOneDim(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
feature_columns = learn.infer_real_valued_columns_from_input(x)
regressor = learn.LinearRegressor(feature_columns=feature_columns)
regressor.fit(x, y, max_steps=100)
score = mean_squared_error(y, np.array(list(regressor.predict(x))))
self.assertLess(score, 1.0, "Failed with score = {0}".format(score))
def testIris(self):
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, [x for x in iris.target], max_steps=100)
score = accuracy_score(iris.target, list(classifier.predict(iris.data)))
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
def testIrisAllVariables(self):
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, [x for x in iris.target], max_steps=100)
self.assertEqual(
classifier.get_variable_names(),
["centered_bias_weight",
"centered_bias_weight/Adagrad",
"global_step",
# Double slashes appear because the column name is empty. If it was not
# empty, the variable names would be "linear/column_name/weight" etc.
"linear//weight",
"linear//weight/Ftrl",
"linear//weight/Ftrl_1",
"linear/bias_weight",
"linear/bias_weight/Ftrl",
"linear/bias_weight/Ftrl_1"])
def testIrisSummaries(self):
iris = datasets.load_iris()
output_dir = tempfile.mkdtemp() + "learn_tests/"
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3, model_dir=output_dir)
classifier.fit(iris.data, iris.target, max_steps=100)
score = accuracy_score(iris.target, list(classifier.predict(iris.data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
# TODO(ipolosukhin): Check that summaries are correctly written.
def testIrisContinueTraining(self):
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, iris.target, steps=100)
score1 = accuracy_score(iris.target, list(classifier.predict(iris.data)))
classifier.fit(iris.data, iris.target, steps=500)
score2 = accuracy_score(iris.target, list(classifier.predict(iris.data)))
self.assertGreater(
score2, score1,
"Failed with score2 {0} <= score1 {1}".format(score2, score1))
def testIrisStreaming(self):
iris = datasets.load_iris()
def iris_data():
while True:
for x in iris.data:
yield x
def iris_predict_data():
for x in iris.data:
yield x
def iris_target():
while True:
for y in iris.target:
yield y
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris_data(), iris_target(), max_steps=500)
score1 = accuracy_score(iris.target,
list(classifier.predict(iris.data)))
score2 = accuracy_score(iris.target,
list(classifier.predict(iris_predict_data())))
self.assertGreater(score1, 0.5, "Failed with score = {0}".format(score1))
self.assertEqual(score2, score1, "Scores from {0} iterator doesn't "
"match score {1} from full "
"data.".format(score2, score1))
def testIris_proba(self):
# If sklearn available.
if log_loss:
random.seed(42)
iris = datasets.load_iris()
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, iris.target, max_steps=250)
score = log_loss(iris.target, list(classifier.predict_proba(iris.data)))
self.assertLess(score, 0.8, "Failed with score = {0}".format(score))
def testBoston(self):
random.seed(42)
boston = datasets.load_boston()
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(boston.data))
regressor.fit(boston.data, boston.target, max_steps=500)
score = mean_squared_error(
boston.target, np.array(list(regressor.predict(boston.data))))
self.assertLess(score, 150, "Failed with score = {0}".format(score))
def testUnfitted(self):
estimator = learn.TensorFlowEstimator(model_fn=None, n_classes=1)
with self.assertRaises(base.NotFittedError):
estimator.predict([1, 2, 3])
with self.assertRaises(base.NotFittedError):
estimator.save("/tmp/path")
if __name__ == "__main__":
tf.test.main()
|
StarcoderdataPython
|
15974
|
<reponame>ZhengyangXu/Algorithm-Daily-Practice
#
# @lc app=leetcode.cn id=461 lang=python3
#
# [461] 汉明距离
#
# https://leetcode-cn.com/problems/hamming-distance/description/
#
# algorithms
# Easy (79.21%)
# Likes: 459
# Dislikes: 0
# Total Accepted: 137K
# Total Submissions: 170K
# Testcase Example: '1\n4'
#
# 两个整数之间的汉明距离指的是这两个数字对应二进制位不同的位置的数目。
#
# 给出两个整数 x 和 y,计算它们之间的汉明距离。
#
# 注意:
# 0 ≤ x, y < 2^31.
#
# 示例:
#
#
# 输入: x = 1, y = 4
#
# 输出: 2
#
# 解释:
# 1 (0 0 0 1)
# 4 (0 1 0 0)
# ↑ ↑
#
# 上面的箭头指出了对应二进制位不同的位置。
#
#
#
# @lc code=start
class Solution:
def hammingDistance(self, x: int, y: int) -> int:
count = 0
z = x ^ y
while z:
z &= z-1
count += 1
return count
# @lc code=end
# def hammingDistance(self, x: int, y: int) -> int:
# count = 0
# for i in range(32):
# if x & 1 != y & 1:
# count += 1
# x >>= 1
# y >>= 1
# return count
# def hammingDistance(self, x: int, y: int) -> int:
# count = 0
# z = x ^ y
# for i in range(32):
# if z & 1:
# count += 1
# z >>= 1
# return count
# def hammingDistance(self, x: int, y: int) -> int:
# count = 0
# z = x ^ y
# while z:
# count += z & 1
# z >>= 1
# return count
|
StarcoderdataPython
|
108178
|
<reponame>raccoongang/askup<filename>askup_lti/urls.py
from django.conf.urls import url
from .provider import lti_launch
urlpatterns = [
url(r'^launch/?(?:/qset/(?P<qset_id>\d+)/?)?$', lti_launch, name='launch'),
]
|
StarcoderdataPython
|
10560
|
class IOEngine(object):
def __init__(self, node):
self.node = node
self.inputs = []
self.outputs = []
def release(self):
self.inputs = None
self.outputs = None
self.node = None
def updateInputs(self, names):
# remove prior outputs
for inputNode in self.inputs:
if not inputNode in names:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
newInputs = []
for nodeId in names:
if self.node.model.existNode(nodeId):
newInputs.append(nodeId)
if not nodeId in self.inputs:
self.node.model.getNode(nodeId).ioEngine.addOutput(
self.node.identifier)
self.inputs = newInputs
def removeOutput(self, nodeId):
if nodeId in self.outputs:
self.outputs.remove(nodeId)
def removeInput(self, nodeId):
if nodeId in self.inputs:
self.inputs.remove(nodeId)
def addOutput(self, nodeId):
self.outputs.append(nodeId)
def updateNodeId(self, oldId, newId):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(
inputNode).ioEngine.updateOutputId(oldId, newId)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(
outputNode).ioEngine.updateInputId(oldId, newId)
def updateOnDeleteNode(self):
for inputNode in self.inputs:
if self.node.model.existNode(inputNode):
self.node.model.getNode(inputNode).ioEngine.removeOutput(
self.node.identifier)
for outputNode in self.outputs:
if self.node.model.existNode(outputNode):
self.node.model.getNode(outputNode).ioEngine.removeInput(
self.node.identifier)
def updateOutputId(self, oldId, newId):
if oldId in self.outputs:
self.outputs.remove(oldId)
self.outputs.append(newId)
def updateInputId(self, oldId, newId):
if oldId in self.inputs:
self.inputs.remove(oldId)
self.inputs.append(newId)
self.node.updateDefinitionForChangeId(oldId, newId)
|
StarcoderdataPython
|
148021
|
<reponame>tschoonj/cgat-daisy<filename>daisy/tasks/FASTQMetrics.py
from .BAMMetrics import run_metric_bam_fastqc
class run_metric_fastq_fastqc(run_metric_bam_fastqc):
name = "fastq_fastqc"
|
StarcoderdataPython
|
1612847
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ...tf3d.object_detection.model."""
import functools
import tensorflow as tf
from tf3d import standard_fields
from tf3d.losses import box_prediction_losses
from tf3d.losses import classification_losses
from tf3d.object_detection import model
class ObjectDetectionModelTest(tf.test.TestCase):
def get_inputs(self, num_voxels, num_classes):
return {
standard_fields.InputDataFields.num_valid_voxels:
tf.constant([num_voxels - 20], dtype=tf.int32),
standard_fields.InputDataFields.voxel_positions:
tf.random.uniform(
shape=(1, num_voxels, 3), minval=-10.0, maxval=10.0),
standard_fields.InputDataFields.voxel_features:
tf.random.uniform(
shape=(1, num_voxels, 10), minval=-2.0, maxval=2.0),
standard_fields.InputDataFields.voxel_xyz_indices:
tf.random.uniform(
shape=(1, num_voxels, 3),
minval=-100,
maxval=100,
dtype=tf.int32),
standard_fields.InputDataFields.object_center_voxels:
tf.random.uniform(
shape=(1, num_voxels, 3), minval=-10.0, maxval=10.0),
standard_fields.InputDataFields.object_length_voxels:
tf.random.uniform(
shape=(1, num_voxels, 1), minval=0.01, maxval=10.0),
standard_fields.InputDataFields.object_height_voxels:
tf.random.uniform(
shape=(1, num_voxels, 1), minval=0.01, maxval=10.0),
standard_fields.InputDataFields.object_width_voxels:
tf.random.uniform(
shape=(1, num_voxels, 1), minval=0.01, maxval=10.0),
standard_fields.InputDataFields.object_rotation_matrix_voxels:
tf.random.uniform(
shape=(1, num_voxels, 3, 3), minval=-1.0, maxval=1.0),
standard_fields.InputDataFields.object_class_voxels:
tf.random.uniform(
shape=(1, num_voxels, 1),
minval=0,
maxval=num_classes,
dtype=tf.int32),
standard_fields.InputDataFields.object_instance_id_voxels:
tf.random.uniform(
shape=(1, num_voxels, 1), minval=0, maxval=10, dtype=tf.int32),
}
def test_call_train(self):
num_classes = 5
loss_fn_box_corner_distance_on_voxel_tensors = functools.partial(
box_prediction_losses.box_corner_distance_loss_on_voxel_tensors,
is_intermediate=False,
loss_type='absolute_difference',
is_balanced=True)
loss_fn_box_classification_using_center_distance = functools.partial(
classification_losses.box_classification_using_center_distance_loss,
is_intermediate=False,
is_balanced=True,
max_positive_normalized_distance=0.3)
loss_fn_hard_negative_classification = functools.partial(
classification_losses.hard_negative_classification_loss,
is_intermediate=False,
gamma=1.0)
loss_names_to_functions = {
'box_corner_distance_loss_on_voxel_tensors':
loss_fn_box_corner_distance_on_voxel_tensors,
'box_classification_using_center_distance_loss':
loss_fn_box_classification_using_center_distance,
'hard_negative_classification_loss':
loss_fn_hard_negative_classification,
}
loss_names_to_weights = {
'box_corner_distance_loss_on_voxel_tensors': 5.0,
'box_classification_using_center_distance_loss': 1.0,
'hard_negative_classification_loss': 1.0,
}
object_detection_model = model.ObjectDetectionModel(
loss_names_to_functions=loss_names_to_functions,
loss_names_to_weights=loss_names_to_weights,
num_classes=num_classes,
predict_rotation_x=True,
predict_rotation_y=True,
predict_rotation_z=True)
num_voxels = 100
inputs = self.get_inputs(num_voxels=num_voxels, num_classes=num_classes)
outputs = object_detection_model(inputs, training=True)
self.assertEqual(
outputs[standard_fields.DetectionResultFields.object_semantic_voxels]
.get_shape(), (1, num_voxels, num_classes))
self.assertEqual(
outputs[standard_fields.DetectionResultFields.object_center_voxels]
.get_shape(), (1, num_voxels, 3))
self.assertEqual(
outputs[standard_fields.DetectionResultFields.object_length_voxels]
.get_shape(), (1, num_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields.object_height_voxels]
.get_shape(), (1, num_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields.object_width_voxels]
.get_shape(), (1, num_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_x_cos_voxels].get_shape(),
(1, num_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_x_sin_voxels].get_shape(),
(1, num_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_y_cos_voxels].get_shape(),
(1, num_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_y_sin_voxels].get_shape(),
(1, num_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_z_cos_voxels].get_shape(),
(1, num_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_z_sin_voxels].get_shape(),
(1, num_voxels, 1))
def test_call_eval(self):
num_classes = 5
object_detection_model = model.ObjectDetectionModel(
num_classes=num_classes,
predict_rotation_x=True,
predict_rotation_y=True,
predict_rotation_z=True)
num_voxels = 100
inputs = self.get_inputs(num_voxels=num_voxels, num_classes=num_classes)
outputs = object_detection_model(inputs, training=False)
num_valid_voxels = inputs[
standard_fields.InputDataFields.num_valid_voxels].numpy()[0]
self.assertEqual(
outputs[standard_fields.DetectionResultFields.object_semantic_voxels]
.get_shape(), (num_valid_voxels, num_classes))
self.assertEqual(
outputs[standard_fields.DetectionResultFields.object_center_voxels]
.get_shape(), (num_valid_voxels, 3))
self.assertEqual(
outputs[standard_fields.DetectionResultFields.object_length_voxels]
.get_shape(), (num_valid_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields.object_height_voxels]
.get_shape(), (num_valid_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields.object_width_voxels]
.get_shape(), (num_valid_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_x_cos_voxels].get_shape(),
(num_valid_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_x_sin_voxels].get_shape(),
(num_valid_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_y_cos_voxels].get_shape(),
(num_valid_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_y_sin_voxels].get_shape(),
(num_valid_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_z_cos_voxels].get_shape(),
(num_valid_voxels, 1))
self.assertEqual(
outputs[standard_fields.DetectionResultFields
.object_rotation_z_sin_voxels].get_shape(),
(num_valid_voxels, 1))
self.assertIn(standard_fields.DetectionResultFields.objects_center, outputs)
self.assertIn(standard_fields.DetectionResultFields.objects_length, outputs)
self.assertIn(standard_fields.DetectionResultFields.objects_height, outputs)
self.assertIn(standard_fields.DetectionResultFields.objects_width, outputs)
self.assertIn(standard_fields.DetectionResultFields.objects_rotation_matrix,
outputs)
self.assertIn(standard_fields.DetectionResultFields.objects_rotation_x_cos,
outputs)
self.assertIn(standard_fields.DetectionResultFields.objects_rotation_x_sin,
outputs)
self.assertIn(standard_fields.DetectionResultFields.objects_rotation_y_cos,
outputs)
self.assertIn(standard_fields.DetectionResultFields.objects_rotation_y_sin,
outputs)
self.assertIn(standard_fields.DetectionResultFields.objects_rotation_z_cos,
outputs)
self.assertIn(standard_fields.DetectionResultFields.objects_rotation_z_sin,
outputs)
if __name__ == '__main__':
tf.test.main()
|
StarcoderdataPython
|
3285375
|
#!/usr/bin/env python
"""
This Script does the preprocessing of a data set in form
of a shifting window over the data. This is needed for
the Saliency and the Word Influence Calculations.
"""
import os
import sys
import argparse
import numpy
import h5py
import itertools
__author__ = '<NAME>'
class Indexer:
def __init__(self):
self.counter = 1
self.d = {}
self.rev = {}
self._lock = False
def convert(self, w):
if w not in self.d:
if self._lock:
return self.d["<unk>"]
self.d[w] = self.counter
self.rev[self.counter] = w
self.counter += 1
return self.d[w]
def lock(self):
self._lock = True
def write(self, outfile):
out = open(outfile, "w")
items = [(v, k) for k, v in self.d.items()]
items.sort()
for v, k in items:
print >>out, k, v
out.close()
def get_data(args):
target_indexer = Indexer()
#add special words to indices in the target_indexer
target_indexer.convert("<s>")
target_indexer.convert("<unk>")
target_indexer.convert("</s>")
def convert(targetfile, batchsize, seqlength, outfile):
words = []
for i, targ_orig in enumerate(targetfile):
targ_orig = targ_orig.replace("<eos>", "")
targ = targ_orig.strip().split() + ["</s>"]
target_sent = [target_indexer.convert(w) for w in targ]
words += target_sent
#Don't let the shifting window get too big for memory reasons.
words = words[:1200000]
# plus 1 for torch.
targ_output = numpy.array(words[1:] + \
[target_indexer.convert("</s>")])
words = numpy.array(words)
print (words.shape, "shape of the word array before preprocessing")
# Write output.
f = h5py.File(outfile, "w")
#number of batches of windows
size = words.shape[0] / (batchsize*seqlength)
size = size * seqlength - seqlength + 1
print (size, "number of blocks after conversion")
original_index = numpy.array([i+1 for i, v in enumerate(words)])
f["target"] = numpy.zeros((size, batchsize, seqlength), dtype=int)
f["indices"] = numpy.zeros((size, batchsize, seqlength), dtype=int)
f["target_output"] = numpy.zeros((size, batchsize, seqlength), dtype=int)
pos = 0
for row in range(batchsize):
for batch in range(size):
f["target"][batch, row] = words[pos:pos+seqlength]
f["indices"][batch, row] = original_index[pos:pos+seqlength]
f["target_output"][batch, row] = targ_output[pos:pos+seqlength]
pos = pos + 1
print (row+1)/float(batchsize)*100, "% processed"
f["target_size"] = numpy.array([target_indexer.counter])
f["words"] = words
f["set_size"] = words.shape[0]
convert(args.targetfile, args.batchsize, args.seqlength, args.outputfile + ".hdf5")
target_indexer.lock()
convert(args.targetvalfile, args.batchsize, args.seqlength, args.outputfile + "val" + ".hdf5")
target_indexer.write(args.outputfile + ".targ.dict")
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('targetfile', help="Target Input file",
type=argparse.FileType('r'))
parser.add_argument('targetvalfile', help="Target Input file",
type=argparse.FileType('r'))
parser.add_argument('batchsize', help="Batchsize",
type=int)
parser.add_argument('seqlength', help="Sequence length",
type=int)
parser.add_argument('outputfile', help="HDF5 output file",
type=str)
args = parser.parse_args(arguments)
get_data(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
StarcoderdataPython
|
1671270
|
import sublime
import sublime_plugin
import os
import sys
import subprocess
import locale
class MailtoCommand(sublime_plugin.TextCommand):
def run(self, edit):
try:
settings = sublime.load_settings('Mailto.sublime-settings')
self.run_bin(settings.get('command'))
except (Exception) as (exception):
sublime.error_message(__name__ + ': ' + str(exception))
def run_bin(self, parameters):
try:
if not parameters:
raise NotFoundError('parameters not found')
for k, v in enumerate(parameters):
parameters[k] = v.replace('%d', self.getCurrentDirectory())
parameters[k] = v.replace('%f', self.getCurrentFileName())
args = parameters
encoding = locale.getpreferredencoding(do_setlocale=True)
subprocess.Popen(args, cwd=self.getCurrentDirectory())
except (OSError) as (exception):
print str(exception)
sublime.error_message(__name__ + ': command was not found')
except (Exception) as (exception):
sublime.error_message(__name__ + ': ' + str(exception))
def getCurrentFileName(self):
return self.view.file_name()
def getCurrentDirectory(self):
return os.path.dirname(self.view.file_name())
|
StarcoderdataPython
|
3322281
|
from collections import Iterable
from django.http import HttpResponse, JsonResponse
from xm_s_common.decorator import common_ajax_response
from xm_s_common.utils import format_return, generate_date_range
from xm_s_common.page import Page
from explorer.interface import ExplorerBase
@common_ajax_response
def stat_auth_user_for_chart(request):
return format_return(0)
|
StarcoderdataPython
|
1645573
|
from os import path as op
from sys import path as pat
pa = op.abspath(op.join(op.dirname(__file__), '..', '..'))
pa = pa + "/common"
pat.insert(1, pa)
import general_log_switch
# I suck at file navigation especially in c++, slows me down to a great extent, thats why using this script as an redirector to general_log_from above
|
StarcoderdataPython
|
3393545
|
import numpy as np
import eolearn
from eolearn.core import FeatureType, EOTask
from pathlib import Path
from osgeo import gdal
import os
import shutil
import subprocess
import rasterio
from eolearn.io.local_io import ExportToTiffTask, ImportFromTiffTask
class MultitempSpeckleFiltering(EOTask):
def __init__(self, otb_path, feature_name = "BANDS-S1-IW", path_in = './', window = 3):
'''
Multitemporal filtering ONLY for Sentinel-1 data using OTB
Parameters:
otb_path (str) : Path where bin from Orfeo Toolbox package is installed
path_in (str) : Path to write the temporary files (removed at the end of the process)
window (int) : window to apply for Quegan filter for SAR data
'''
self.feature_name = feature_name
self.otb_path = otb_path
self.path_in = path_in
self.window = window
@staticmethod
def _refactor_dates(t):
# Add dates as suffix
year, d, m = str(t.year), str(t.day), str(t.month)
if len(d)==1 :
d = '0'+d
if len(m)==1 :
m = '0'+m
return '{0}{1}{2}'.format(year, m, d)
def _apply_OTB_cmd(self, pol, ram = 8):
path_in = os.path.join(self.path_in, 'S1_'+pol)
s1_images = os.listdir(path_in)
infiles = [os.path.join(path_in, k) for k in s1_images]
infiles.sort()
cmd = [os.path.join(self.otb_path, "otbcli_MultitempFilteringOutcore"), "-inl"]
cmd += infiles
cmd += ['-wr', str(self.window), '-oc', os.path.join(path_in, 'outcore.tif'), '-ram', str(8)]
outdir = Path(path_in+'_filtered')
if not outdir.exists() :
os.mkdir(outdir)
subprocess.call(cmd, shell=False)
cmd = [os.path.join(self.otb_path, "otbcli_MultitempFilteringFilter"), "-inl"]
cmd += infiles
cmd += ['-enl', os.path.join(outdir, 'enl.tif'),
'-wr', str(self.window),
'-filtpath', outdir,
'-oc', os.path.join(path_in, 'outcore.tif'),
'-ram', str(ram)]
subprocess.call(cmd, shell=False)
outfiles = [os.path.join(outdir, k.split('.')[0]+'_filtered.tif') for k in s1_images]
outfiles.sort()
return infiles, outdir, outfiles
def _save_temporary_geotiff(self, i, date, eopatch):
## TODO : Find a way to write temporary file without writing on disk using ExportToTiffTask to make the process faster
export = ExportToTiffTask(feature=self.feature_name,
folder=os.path.join(self.path_in, 'S1_VV/S1_VV_' + date),
band_indices=[0],
date_indices=[i])
export.execute(eopatch)
export = ExportToTiffTask(feature=self.feature_name,
folder=os.path.join(self.path_in, 'S1_VH/S1_VH_' + date),
band_indices=[1],
date_indices=[i])
export.execute(eopatch)
def execute(self, eopatch, ram = 8):
if os.path.exists(os.path.join(self.path_in, 'S1_VV')):
shutil.rmtree(os.path.join(self.path_in, 'S1_VV'))
shutil.rmtree(os.path.join(self.path_in, 'S1_VH'))
os.mkdir(os.path.join(self.path_in, 'S1_VV'))
os.mkdir(os.path.join(self.path_in, 'S1_VH'))
times = list(eopatch.timestamp)
for i, t in enumerate(times):
date = self._refactor_dates(t)
self._save_temporary_geotiff(i,date,eopatch)
########################################################################################################
for pol in ['VV', 'VH']:
infiles, outdir, outfiles = self._apply_OTB_cmd(pol,ram)
##########################################################################
reference_file = infiles[0]
with rasterio.open(reference_file) as src0:
meta = src0.meta
meta['nodata'] = 0.0
meta['dtype'] = 'float32'
meta['count'] = len(times)
path_tif = outfiles[0].split('_2017')[0] + '.tif'
if 'outcore_filtered.tif' in os.listdir(outdir):
outfiles.remove(os.path.join(outdir, 'outcore_filtered.tif'))
outfiles.sort()
with rasterio.open(path_tif, 'w', **meta) as dst:
for i in range(1, len(times) + 1):
img = gdal.Open(outfiles[i - 1]).ReadAsArray()
dst.write_band(i, img)
import_tif = ImportFromTiffTask((FeatureType.DATA, pol + '_filtered'), path_tif)
eopatch = import_tif.execute(eopatch)
shutil.rmtree(os.path.join(self.path_in, 'S1_VV_filtered'))
shutil.rmtree(os.path.join(self.path_in, 'S1_VH_filtered'))
shutil.rmtree(os.path.join(self.path_in, 'S1_VV'))
shutil.rmtree(os.path.join(self.path_in, 'S1_VH'))
return eopatch
class PanSharpening(EOTask):
def __init__(self, fname = 'BANDS',
otb_path = '/home/s999379/git-repo/OTB-7.4.0-Linux64/bin',
path_temporary_files = './tempo'):
'''
Multitemporal filtering ONLY for Sentinel-1 data using OTB
Parameters:
fname (str) : Name of the feature stored in data that gathers the bands
otb_path (str) : Path where bin from Orfeo Toolbox package is installed
path_temporary_files (str) : path to save the temporary geotiff file to call OTB
'''
self.fname = fname
self.otb_path = otb_path
self.path_temporary_files = path_temporary_files
@staticmethod
def _refactor_dates(t):
# Add dates as suffix
year, d, m = str(t.year), str(t.day), str(t.month)
if len(d)==1 :
d = '0'+d
if len(m)==1 :
m = '0'+m
return '{0}{1}{2}'.format(year, m, d)
def _extracted_from__save_temporary_geotiff(self, date, i, eopatch, band_indices=None):
if band_indices is None :
band_indices = list(range(4))
export = ExportToTiffTask(feature=self.fname,
folder=os.path.join(self.path_temporary_files, 'PAN_' + date),
band_indices=[-1],
date_indices=[i])
export.execute(eopatch)
export = ExportToTiffTask(feature=self.fname,
folder=os.path.join(self.path_temporary_files, 'BANDS_' + date),
band_indices=band_indices,
date_indices=[i])
export.execute(eopatch)
def _apply_OTB_cmd(self, date):
cm = [os.path.join(self.otb_path, 'otbcli_Pansharpening'),
'-inp',os.path.join(self.path_temporary_files,'PAN_' + date +'.tif'),
'-inxs', os.path.join(self.path_temporary_files,'BANDS_' + date +'.tif'),
'-method', 'lmvm',
'-out', os.path.join(self.path_temporary_files, 'Pansharpened_' + date +'.tif'),
'float']
subprocess.call(cm, shell=False)
def _clean_temporary_files(self):
shutil.rmtree(self.path_temporary_files)
def execute(self, eopatch, band_indices=None):
times = list(eopatch.timestamp)
pan_bands = []
for i, t in enumerate(times):
date = self._refactor_dates(t)
self._extracted_from__save_temporary_geotiff(date, i, eopatch, band_indices)
self._apply_OTB_cmd(date)
img = gdal.Open(os.path.join(self.path_temporary_files, 'Pansharpened_' + date +'.tif')).ReadAsArray()
img = np.moveaxis(img, 0, -1)
pan_bands.append(img)
pan_bands = np.stack(pan_bands, axis =0)
self._clean_temporary_files()
eopatch.add_feature(eolearn.core.FeatureType.DATA, 'BANDS-PAN', pan_bands)
return eopatch
|
StarcoderdataPython
|
3273667
|
<gh_stars>0
from sprite import FacingSprite
from sprite import VEC
STARTING_HEALTH = 3
KNOCKBACK_DISTANCE = 20
class Enemy(FacingSprite):
def __init__(self,
x,
y,
width,
height,
walking_path,
image_dir,
vel=VEC(0, 0)):
super().__init__(x, y, height, width, image_dir, vel)
self.bounds = walking_path
self.health = STARTING_HEALTH
def die(self):
self.kill()
def knockback(self, left):
if left:
self.pos.x -= KNOCKBACK_DISTANCE
else:
self.pos.x += KNOCKBACK_DISTANCE
self.update_rectangle()
def hit(self):
self.health -= 1
if self.health <= 0:
self.die()
|
StarcoderdataPython
|
1741182
|
from django.db.models import Case, When, IntegerField
from rest_framework import mixins, viewsets, generics
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
from rest_auth.registration.views import SocialLoginView
from rest_framework import filters
from rest_framework.permissions import IsAuthenticated
from cities_light.models import City
from good_spot.users import serializers as users_serializers
from good_spot.users import models as users_models
from good_spot.users.openapidoc import documentation
from good_spot.users.permissions import IsOwnerPermission
class FacebookLogin(
documentation.FacebookLoginJsonSchema,
SocialLoginView):
adapter_class = FacebookOAuth2Adapter
class UserViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
documentation.UserViewSetJsonSchema,
viewsets.GenericViewSet):
queryset = users_models.User.objects.all()
serializer_class = users_serializers.UserModelSerializer
permission_classes = (IsAuthenticated, IsOwnerPermission)
class SearchCityFilter(filters.SearchFilter):
def filter_queryset(self, request, queryset, view):
qs = super(SearchCityFilter, self).filter_queryset(request, queryset, view)
if self.get_search_terms(request):
qs = qs.annotate(
priority=Case(
When(
name__istartswith=self.get_search_terms(request)[0], then=1
),
default=0,
output_field=IntegerField()
)
).order_by('-priority')
return qs
class SearchCityListAPIView(generics.ListAPIView):
queryset = City.objects.select_related('country')
serializer_class = users_serializers.SearchCitySerializer
filter_backends = (SearchCityFilter,)
search_fields = ('^name', '^alternate_names')
|
StarcoderdataPython
|
1743420
|
<reponame>xyleey/SNH48Live
import base64
import email.mime.text
import auth
gmail_client = None # Uninitialized
# Optionally called from command line scripts to pass in args. If not
# called manually, the client is still initialized on first use.
def init_gmail_client(args=None, scopes='gmail.send'):
global gmail_client
if gmail_client is None:
gmail_client = auth.get_gmail_client(args, scopes)
def send_mail(subject, body, to):
init_gmail_client()
msg = email.mime.text.MIMEText(body)
msg['to'] = to
msg['subject'] = subject
encoded_msg = base64.urlsafe_b64encode(msg.as_bytes()).decode('utf-8')
# https://developers.google.com/resources/api-libraries/documentation/gmail/v1/python/latest/gmail_v1.users.messages.html#send
gmail_client.users().messages().send( # pylint: disable=no-member
userId='me',
body={'raw': encoded_msg},
).execute()
|
StarcoderdataPython
|
4809430
|
from data_collection.management.commands import (
BaseXpressDCCsvInconsistentPostcodesImporter,
)
class Command(BaseXpressDCCsvInconsistentPostcodesImporter):
council_id = "E06000038"
addresses_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019Reading.tsv"
stations_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019Reading.tsv"
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "310078632":
rec["postcode"] = "RG2 0NS"
if uprn in [
"310080475", # RG304RX -> RG304RY : 75 St. Michael`s Road, Tilehurst, Reading
"310077642", # RG27EX -> RG27EZ : Flat 2, The Farmhouse, Sherfield Drive, Sherfield Drive, Reading
]:
rec["accept_suggestion"] = True
if uprn in [
"310015114", # RG315AN -> RG48LP : Flat 9, 52 School Road, Tilehurst, Reading
"310013330", # RG20GA -> RG304RU : Flat 1, 25 Commercial Road, Reading
"310081622", # RG20FU -> RG13FF : Flat 16 Harlequin House, Padworth Avenue, Reading
"310008459", # RG303ES -> RG303QN : Flat 17 Priory Point, 36 Southcote Lane, Southcote, Reading
"310055368", # RG27RU -> RG303HY : White Barn, 45A Cressingham Road, Reading
"310023684", # RG17YJ -> RG17YG : 1 Prospect Cottages, Prospect Mews, Reading
"310023686", # RG17YJ -> RG17YG : 2 Prospect Cottages, Prospect Mews, Reading
"310023687", # RG17YJ -> RG17YG : 3 Prospect Cottages, Prospect Mews, Reading
]:
rec["accept_suggestion"] = False
if (
record.addressline1 == "Flat 1"
and record.addressline2 == "6 Gosbrook Road"
and record.addressline3 == "Caversham"
and record.addressline4 == "Reading"
):
return None
return rec
|
StarcoderdataPython
|
3314641
|
<reponame>allenai/zest
"""Tests for zest.modeling.tasks."""
|
StarcoderdataPython
|
3226662
|
import optparse
import tokenize
import warnings
# Polyfill stdin loading/reading lines
# https://gitlab.com/pycqa/flake8-polyfill/blob/1.0.1/src/flake8_polyfill/stdin.py#L52-57
try:
from flake8.engine import pep8
stdin_get_value = pep8.stdin_get_value
readlines = pep8.readlines
except ImportError:
from flake8 import utils
import pycodestyle
stdin_get_value = utils.stdin_get_value
readlines = pycodestyle.readlines
from flake8_quotes.__about__ import __version__
from flake8_quotes.docstring_detection import get_docstring_tokens
class QuoteChecker(object):
name = __name__
version = __version__
INLINE_QUOTES = {
# When user wants only single quotes
"'": {
'good_single': "'",
'bad_single': '"',
'single_error_message': 'Double quotes found but single quotes preferred',
},
# When user wants only double quotes
'"': {
'good_single': '"',
'bad_single': "'",
'single_error_message': 'Single quotes found but double quotes preferred',
},
}
# Provide aliases for Windows CLI support
# https://github.com/zheller/flake8-quotes/issues/49
INLINE_QUOTES['single'] = INLINE_QUOTES["'"]
INLINE_QUOTES['double'] = INLINE_QUOTES['"']
MULTILINE_QUOTES = {
"'": {
'good_multiline': "'''",
'good_multiline_ending': '\'"""',
'bad_multiline': '"""',
'multiline_error_message': 'Double quote multiline found but single quotes preferred',
},
'"': {
'good_multiline': '"""',
'good_multiline_ending': '"\'\'\'',
'bad_multiline': "'''",
'multiline_error_message': 'Single quote multiline found but double quotes preferred',
},
}
# Provide Windows CLI and multi-quote aliases
MULTILINE_QUOTES['single'] = MULTILINE_QUOTES["'"]
MULTILINE_QUOTES['double'] = MULTILINE_QUOTES['"']
MULTILINE_QUOTES["'''"] = MULTILINE_QUOTES["'"]
MULTILINE_QUOTES['"""'] = MULTILINE_QUOTES['"']
DOCSTRING_QUOTES = {
"'": {
'good_docstring': "'''",
'bad_docstring': '"""',
'docstring_error_message': 'Double quote docstring found but single quotes preferred',
},
'"': {
'good_docstring': '"""',
'bad_docstring': "'''",
'docstring_error_message': 'Single quote docstring found but double quotes preferred',
},
}
# Provide Windows CLI and docstring-quote aliases
DOCSTRING_QUOTES['single'] = DOCSTRING_QUOTES["'"]
DOCSTRING_QUOTES['double'] = DOCSTRING_QUOTES['"']
DOCSTRING_QUOTES["'''"] = DOCSTRING_QUOTES["'"]
DOCSTRING_QUOTES['"""'] = DOCSTRING_QUOTES['"']
def __init__(self, tree, lines=None, filename='(none)'):
self.filename = filename
self.lines = lines
@staticmethod
def _register_opt(parser, *args, **kwargs):
"""
Handler to register an option for both Flake8 3.x and 2.x.
This is based on:
https://github.com/PyCQA/flake8/blob/3.0.0b2/docs/source/plugin-development/cross-compatibility.rst#option-handling-on-flake8-2-and-3
It only supports `parse_from_config` from the original function and it
uses the `Option` object returned to get the string.
"""
try:
# Flake8 3.x registration
parser.add_option(*args, **kwargs)
except (optparse.OptionError, TypeError):
# Flake8 2.x registration
parse_from_config = kwargs.pop('parse_from_config', False)
option = parser.add_option(*args, **kwargs)
if parse_from_config:
parser.config_options.append(option.get_opt_string().lstrip('-'))
@classmethod
def add_options(cls, parser):
cls._register_opt(parser, '--quotes', action='store',
parse_from_config=True, type='choice',
choices=sorted(cls.INLINE_QUOTES.keys()),
help='Deprecated alias for `--inline-quotes`')
cls._register_opt(parser, '--inline-quotes', default="'",
action='store', parse_from_config=True, type='choice',
choices=sorted(cls.INLINE_QUOTES.keys()),
help="Quote to expect in all files (default: ')")
cls._register_opt(parser, '--multiline-quotes', default=None, action='store',
parse_from_config=True, type='choice',
choices=sorted(cls.MULTILINE_QUOTES.keys()),
help='Quote to expect in all files (default: """)')
cls._register_opt(parser, '--docstring-quotes', default=None, action='store',
parse_from_config=True, type='choice',
choices=sorted(cls.DOCSTRING_QUOTES.keys()),
help='Quote to expect in all files (default: """)')
cls._register_opt(parser, '--avoid-escape', default=None, action='store_true',
parse_from_config=True,
help='Avoiding escaping same quotes in inline strings (enabled by default)')
cls._register_opt(parser, '--no-avoid-escape', dest='avoid_escape', default=None, action='store_false',
parse_from_config=False,
help='Disable avoiding escaping same quotes in inline strings')
@classmethod
def parse_options(cls, options):
# Define our default config
# cls.config = {good_single: ', good_multiline: ''', bad_single: ", bad_multiline: """}
cls.config = {}
cls.config.update(cls.INLINE_QUOTES["'"])
cls.config.update(cls.MULTILINE_QUOTES['"""'])
cls.config.update(cls.DOCSTRING_QUOTES['"""'])
# If `options.quotes` was specified, then use it
if hasattr(options, 'quotes') and options.quotes is not None:
# https://docs.python.org/2/library/warnings.html#warnings.warn
warnings.warn('flake8-quotes has deprecated `quotes` in favor of `inline-quotes`. '
'Please update your configuration')
cls.config.update(cls.INLINE_QUOTES[options.quotes])
# Otherwise, use the supported `inline_quotes`
else:
# cls.config = {good_single: ', good_multiline: """, bad_single: ", bad_multiline: '''}
# -> {good_single: ", good_multiline: """, bad_single: ', bad_multiline: '''}
cls.config.update(cls.INLINE_QUOTES[options.inline_quotes])
# If multiline quotes was specified, overload our config with those options
if hasattr(options, 'multiline_quotes') and options.multiline_quotes is not None:
# cls.config = {good_single: ', good_multiline: """, bad_single: ", bad_multiline: '''}
# -> {good_single: ', good_multiline: ''', bad_single: ", bad_multiline: """}
cls.config.update(cls.MULTILINE_QUOTES[options.multiline_quotes])
# If docstring quotes was specified, overload our config with those options
if hasattr(options, 'docstring_quotes') and options.docstring_quotes is not None:
cls.config.update(cls.DOCSTRING_QUOTES[options.docstring_quotes])
# If avoid escaped specified, add to config
if hasattr(options, 'avoid_escape') and options.avoid_escape is not None:
cls.config.update({'avoid_escape': options.avoid_escape})
else:
cls.config.update({'avoid_escape': True})
def get_file_contents(self):
if self.filename in ('stdin', '-', None):
return stdin_get_value().splitlines(True)
else:
if self.lines:
return self.lines
else:
return readlines(self.filename)
def run(self):
file_contents = self.get_file_contents()
noqa_line_numbers = self.get_noqa_lines(file_contents)
errors = self.get_quotes_errors(file_contents)
for error in errors:
if error.get('line') not in noqa_line_numbers:
yield (error.get('line'), error.get('col'), error.get('message'), type(self))
def get_noqa_lines(self, file_contents):
tokens = [Token(t) for t in tokenize.generate_tokens(lambda L=iter(file_contents): next(L))]
return [token.start_row
for token in tokens
if token.type == tokenize.COMMENT and token.string.endswith('noqa')]
def get_quotes_errors(self, file_contents):
tokens = [Token(t) for t in tokenize.generate_tokens(lambda L=iter(file_contents): next(L))]
docstring_tokens = get_docstring_tokens(tokens)
for token in tokens:
if token.type != tokenize.STRING:
# ignore non strings
continue
# Remove any prefixes in strings like `u` from `u"foo"`
# DEV: `last_quote_char` is 1 character, even for multiline strings
# `"foo"` -> `"foo"`
# `b"foo"` -> `"foo"`
# `br"foo"` -> `"foo"`
# `b"""foo"""` -> `"""foo"""`
last_quote_char = token.string[-1]
first_quote_index = token.string.index(last_quote_char)
prefix = token.string[:first_quote_index].lower()
unprefixed_string = token.string[first_quote_index:]
# Determine if our string is multiline-based
# "foo"[0] * 3 = " * 3 = """
# "foo"[0:3] = "fo
# """foo"""[0:3] = """
is_docstring = token in docstring_tokens
is_multiline_string = unprefixed_string[0] * 3 == unprefixed_string[0:3]
start_row, start_col = token.start
# If our string is a docstring
# DEV: Docstring quotes must come before multiline quotes as it can as a multiline quote
if is_docstring:
if self.config['good_docstring'] in unprefixed_string:
continue
yield {
'message': 'Q002 ' + self.config['docstring_error_message'],
'line': start_row,
'col': start_col,
}
# Otherwise if our string is multiline
elif is_multiline_string:
# If our string is or containing a known good string, then ignore it
# (""")foo""" -> good (continue)
# '''foo(""")''' -> good (continue)
# (''')foo''' -> possibly bad
if self.config['good_multiline'] in unprefixed_string:
continue
# If our string ends with a known good ending, then ignore it
# '''foo("''') -> good (continue)
# Opposite, """foo"""", would break our parser (cannot handle """" ending)
if unprefixed_string.endswith(self.config['good_multiline_ending']):
continue
# Output our error
yield {
'message': 'Q001 ' + self.config['multiline_error_message'],
'line': start_row,
'col': start_col,
}
# Otherwise (string is inline quote)
else:
# 'This is a string' -> Good
# 'This is a "string"' -> Good
# 'This is a \"string\"' -> Good
# 'This is a \'string\'' -> Bad (Q003) Escaped inner quotes
# '"This" is a \'string\'' -> Good Changing outer quotes would not avoid escaping
# "This is a string" -> Bad (Q000)
# "This is a 'string'" -> Good Avoids escaped inner quotes
# "This is a \"string\"" -> Bad (Q000)
# "\"This\" is a 'string'" -> Good
string_contents = unprefixed_string[1:-1]
# If string preferred type, check for escapes
if last_quote_char == self.config['good_single']:
if not self.config['avoid_escape'] or 'r' in prefix:
continue
if (self.config['good_single'] in string_contents and
not self.config['bad_single'] in string_contents):
yield {
'message': 'Q003 Change outer quotes to avoid escaping inner quotes',
'line': start_row,
'col': start_col,
}
continue
# If not preferred type, only allow use to avoid escapes.
if not self.config['good_single'] in string_contents:
yield {
'message': 'Q000 ' + self.config['single_error_message'],
'line': start_row,
'col': start_col,
}
class Token:
"""Python 2 and 3 compatible token"""
def __init__(self, token):
self.token = token
@property
def type(self):
return self.token[0]
@property
def string(self):
return self.token[1]
@property
def start(self):
return self.token[2]
@property
def start_row(self):
return self.token[2][0]
@property
def start_col(self):
return self.token[2][1]
|
StarcoderdataPython
|
111924
|
<reponame>AndreHenkel/dl_visualisation_comparison
#loads images from a predefined folder and puts it into a torch batch
import torch
import utils
import torchvision as tv
from torchvision import transforms
import urllib.request
import tarfile
import os, random
class DataLoader:
def __init__(self, path, data_set_url, batch_size=10):
self.path = path
self.data_set_url = data_set_url
self.download_and_unpack_dataset(self.data_set_url,path)
self.resize = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
print("Using Dataset: ", path+"/imagenette2-320/val")
self.dataset = tv.datasets.ImageFolder(root=path+"/imagenette2-320/val",transform=self.resize)
#NOTE!
#expecting the folder structure to be and ordered as in imagenette2-320/val here: https://github.com/fastai/imagenette
#ordering expeccting: tench, English Springer, cassette player, chain saw, church, french horn, garbage truck, gas pump, golf ball, parachute
self.idx_map = [0, 217, 482, 491, 497, 566, 569, 571, 574, 701]
#self.idx_map = [217,566,482,491,497,569,571,574,701,0]
self.data_loader = torch.utils.data.DataLoader(self.dataset,batch_size=batch_size, shuffle=True)
# print("classes:")
# print(self.dataset.class_to_idx)
def _select_random_image(self, path):
im_path = path+"/"+random.choice(os.listdir(path))
if os.path.isdir(im_path) == True:
im_path = self._select_random_image(im_path) #recursive
return im_path
def download_and_unpack_dataset(self,url, dest):
if not os.path.isdir(self.path+"/imagenette2-320"):
print("No Dataset was detected. Downloading and extracting: " + self.data_set_url)
print("Into destination: ",dest)
print("...")
tar_file = url
ftpstream = urllib.request.urlopen(tar_file)
tar_file = tarfile.open(fileobj=ftpstream, mode="r|gz")
tar_file.extractall(path=dest)
tar_file.close()
else:
print(self.path+"/imagenette2-320"+" was detected. No new download required!")
print("Dataset is ready for use!")
def get_image_batch(self):
"""
also already puts the correct label on it, with the idx_map
"""
batch,idx = next(iter(self.data_loader))
label = []
for i in idx:
label.append(self.idx_map[i])
return batch, label
def get_data_loader(self):
return self.data_loader
# """
# also resizes it already in advance for AlexNet
# """
#
# image_batch = torch.Tensor((batch_size,3,224,224))
#
# for i in range(batch_size):
# im_path = self._select_random_image(self.path)
# image = utils.load_image(im_path) #Tensor (3,224,224)
# image_batch[i] = image
#
# return image_batch
|
StarcoderdataPython
|
196240
|
#!/usr/bin/env python3
import json
import threading
from urllib.request import urlopen
#nodemcu v2 running espeasy
nodemcu='http://192.168.0.70/json'
class json_noise:
def __init__(self, callback):
self.is_alive=True
self._noise=0
self.thread=None
self.startreading()
self._callback=callback
def print_msg(self,txt):
self._callback(txt);
def get_noise(self):
return self._noise
def getdata(self):
# print("getdata: ...")
n=0
try:
while self.is_alive:
with urlopen(nodemcu) as r:
result = json.loads(r.read().decode(r.headers.get_content_charset('utf-8')))
#print(result, "\n=================")
noise=result['Sensors'][0]["Analog"]
#print ("noise: ", noise)
# print ("noise int: ", int(noise*1024/1000*3/10))
n=int(noise*1024/1000*3/10)
self._noise=n
self.print_msg("noise; {0}".format(n))
except:
self.print_msg("exception json read")
# print("getdata done.")
return;
def startreading(self):
self.thread = threading.Thread(target=self.getdata)
self.thread.daemon=True # let's terminat with CTRL+C in main
self.thread.start()
|
StarcoderdataPython
|
3369409
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
"""Tests for client.py covering NotebookClient."""
# Third-party imports
import pytest
from qtpy.QtWidgets import QWidget
import requests
# Local imports
from spyder_notebook.widgets.client import NotebookClient
class MockPlugin(QWidget):
def get_plugin_actions(self):
return []
@pytest.fixture
def plugin(qtbot):
"""
Construct mock plugin with NotebookClient for use in tests.
Use `plugin.client` to access the client.
"""
plugin = MockPlugin()
qtbot.addWidget(plugin)
client = NotebookClient(plugin, '/path/notebooks/ham.ipynb')
plugin.client = client
server_info = {'notebook_dir': '/path/notebooks',
'url': 'fake_url',
'token': '<PASSWORD>'}
client.register(server_info)
return plugin
def test_notebookclient_get_kernel_id(plugin, mocker):
"""Basic unit test for NotebookClient.get_kernel_id()."""
response = mocker.Mock()
content = b'[{"kernel": {"id": "42"}, "notebook": {"path": "ham.ipynb"}}]'
response.content = content
response.status_code = requests.codes.ok
mocker.patch('requests.get', return_value=response)
kernel_id = plugin.client.get_kernel_id()
assert kernel_id == '42'
def test_notebookclient_get_kernel_id_with_fields_missing(plugin, mocker):
"""Test NotebookClient.get_kernel_id() if response has fields missing."""
response = mocker.Mock()
content = (b'[{"kernel": {"id": "1"}, "notebook": {"spam": "eggs"}},'
b' {"kernel": {"id": "2"}},'
b' {"kernel": {"id": "3"}, "notebook": {"path": "ham.ipynb"}}]')
response.content = content
response.status_code = requests.codes.ok
mocker.patch('requests.get', return_value=response)
kernel_id = plugin.client.get_kernel_id()
assert kernel_id == '3'
def test_notebookclient_get_kernel_id_with_error_status(plugin, mocker):
"""Test NotebookClient.get_kernel_id() when response has error status."""
response = mocker.Mock()
content = b'{"message": "error"}'
response.content = content
response.status_code = requests.codes.forbidden
mocker.patch('requests.get', return_value=response)
MockMessageBox = mocker.patch('spyder_notebook.widgets.client.QMessageBox')
plugin.client.get_kernel_id()
MockMessageBox.warning.assert_called()
def test_notebookclient_get_kernel_id_with_exception(plugin, mocker):
"""Test NotebookClient.get_kernel_id() when request raises an exception."""
exception = requests.exceptions.ProxyError('kaboom')
mocker.patch('requests.get', side_effect=exception)
MockMessageBox = mocker.patch('spyder_notebook.widgets.client.QMessageBox')
plugin.client.get_kernel_id()
MockMessageBox.warning.assert_called()
|
StarcoderdataPython
|
120483
|
<gh_stars>0
assert __name__ == "__main__"
from roca import *
from revoice import *
from roca.common import *
import sys, os, getopt
dbPath = os.environ.get("ROCA_VOICEDB_PATH", None)
wavPath = None
outName = None
analyzeF0, analyzeHNM, analyzeSinEnv = False, False, False
optList, args = getopt.getopt(sys.argv[1:], [], [
"db=",
"wav=",
"f0",
"hnm",
"sinenv",
"all",
])
for opt, arg in optList:
if(opt == "--db"):
dbPath = arg
elif(opt == "--wav"):
wavPath = arg
if(outName is None):
outName = os.path.splitext(os.path.basename(wavPath))[0]
elif(opt == "--out"):
outName = out
elif(opt == "--f0"):
analyzeF0 = True
elif(opt == "--hnm"):
analyzeHNM = True
elif(opt == "--sinenv"):
analyzeSinEnv = True
elif(opt == "--all"):
analyzeF0, analyzeHNM, analyzeSinEnv = True, True, True
else:
assert False, "Invalid option %s" % (opt)
assert dbPath is not None, "Database path is not specified."
assert wavPath is not None, "Wave file path is not specified."
voiceDB = voicedb.DevelopmentVoiceDB(dbPath)
w, sr = loadWav(wavPath)
f0Object, hnmObject, envObject = None, None, None
f0ObjectPath, hnmObjectPath, sinEnvObjectPath = "%s.f0Obj" % (outName,), "%s.hnmObj" % (outName,), "%s.sinEnvObj" % (outName,)
if(w.ndim != 1):
print("Warning: Multichannel audio is not supported, use left channel only", file = sys.stderr)
w = w.T[0]
if(not sr in (44100, 48000)):
print("Warning: Samprate of 44100 or 48000 is recommended", file = sys.stderr)
if(analyzeF0):
print("F0...")
pyinProc = pyin.Processor(sr)
obsProbList = pyinProc(w)
monopitchProc = monopitch.Processor(*monopitch.parameterFromPYin(pyinProc))
f0List = monopitchProc(w, obsProbList)
f0Object = objects.F0Object(f0List, monopitchProc.hopSize, sr)
del pyinProc, obsProbList, monopitchProc, f0List
voiceDB.saveObject(f0ObjectPath, f0Object)
if((analyzeHNM or analyzeSinEnv) and f0Object is None):
assert voiceDB.hasObject(f0ObjectPath), "No f0 object available."
f0Object = voiceDB.loadObject(f0ObjectPath)
if(analyzeHNM):
print("HNM...")
hnmProc = hnm.Analyzer(sr, harmonicAnalysisMethod = "qfft")
assert f0Object.hopSize == hnmProc.hopSize
f0List, hFreqList, hAmpList, hPhaseList, sinusoidEnergyList, noiseEnvList, noiseEnergyList = hnmProc(w, f0Object.f0List)
# update f0
f0Object.f0List = f0List
voiceDB.saveObject(f0ObjectPath, f0Object)
hnmObject = objects.HNMObject(sr, hnmProc.hopSize, hFreqList, hAmpList, hPhaseList, sinusoidEnergyList, noiseEnvList, noiseEnergyList)
voiceDB.saveObject(hnmObjectPath, hnmObject)
del hnmProc, f0List, hFreqList, hAmpList, hPhaseList, sinusoidEnergyList, noiseEnvList, noiseEnergyList
if(analyzeSinEnv):
print("SinEnv...")
if(hnmObject is None):
assert voiceDB.hasObject(hnmObjectPath), "No hnm object available."
hnmObject = voiceDB.loadObject(hnmObjectPath)
synProc = hnm.Synther(sr)
envProc = mfienvelope.Processor(sr)
assert hnmObject.hopSize == envProc.hopSize
assert hnmObject.hopSize == f0Object.hopSize
assert synProc.hopSize == envProc.hopSize
synthed = synProc(f0Object.f0List, hnmObject.hFreqList, hnmObject.hAmpList, hnmObject.hPhaseList, hnmObject.sinusoidEnergyList, hnmObject.noiseEnvList, hnmObject.noiseEnergyList)
sinEnv = envProc(w, f0Object.f0List)
sinEnvObject = objects.EnvObject(sinEnv, envProc.hopSize, sr)
voiceDB.saveObject(sinEnvObjectPath, sinEnvObject)
|
StarcoderdataPython
|
3320809
|
from kodexa import Assistant, AssistantResponse, AssistantContext
from kodexa.model.model import BaseEvent
class ExampleAssistant(Assistant):
"""
This is an example of an assistant
"""
def __init__(self, my_param: str):
self.my_param = my_param
def process_event(self, event: BaseEvent, context: AssistantContext) -> AssistantResponse:
pass
|
StarcoderdataPython
|
1714281
|
"""C190701Permission URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from app01 import views
urlpatterns = [
path('admin/', admin.site.urls),
path('login/', views.login),
path('customers/list/', views.customers),
path('customers/add/', views.add_customers),
re_path('updatecustomers/(\d+)', views.updatecustomers),
re_path('delecustomers/(\d+)', views.delecustomers),
path('order/list/', views.orders),
path('order/add/', views.add_order),
re_path('updateorder/(\d+)', views.updateorder),
re_path('deleorder/(\d+)', views.deleorder),
]
|
StarcoderdataPython
|
3394004
|
<filename>api/routes/v3/evaluationroutes.py<gh_stars>1-10
from flask import Blueprint
from flask import request
from api.utils.helpers import ServerMethods
from api.utils.vsae.formater import EquationFormating
# import modules for binary tree parsing
from api.modules.binarytree.stacktotree import Index as buildTreeFromExpression
from api.modules.binarytree.treeevaulute import Evaulute
# import modules for shunting yard algorithm
from api.modules.shuntingyardalgorithm.algorithm import Index as shuntingYardAlgorithmEvaluation
evalRoutes = Blueprint("evaluation_routes", __name__, url_prefix='/api/v3/eval')
@evalRoutes.route('/sya', methods=['POST']) # shunting yard algorithm evaluate
def sya():
data = request.json
expression = data['expression']
res = f'{shuntingYardAlgorithmEvaluation(expression)}'
return ServerMethods.dispatchJSON({'result': res, 'algorithm': 'shunting yard algorithm'})
@evalRoutes.route('/bte', methods=['POST']) # binary tree evaluate
def bte():
data = request.json
expression = data['expression']
fExpression, baseFormat = EquationFormating.defFormat(expression)
bTree = buildTreeFromExpression(fExpression)
res = f'{Evaulute._evaluteAndGetOnlyResult(bTree)}'
return ServerMethods.dispatchJSON({'result': res, 'algorithm': 'binary tree evaluation'})
|
StarcoderdataPython
|
3253909
|
# Copyright (c) 2008 - 2011, <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>
# This is Free Software. See LICENSE for license information.
import sys
import email
import re
import logging
import gettor.utils
import gettor.packages
class requestMail:
def __init__(self, config):
""" Read message from stdin, try to assign some values already.
"""
# Read email from stdin
self.rawMessage = sys.stdin.read()
self.parsedMessage = email.message_from_string(self.rawMessage)
self.config = config
self.request = {}
self.request['user'] = self.parsedMessage["Return-Path"]
self.request['ouraddr'] = self.getRealTo(self.parsedMessage["to"])
self.request['locale'] = self.getLocaleInTo(self.request['ouraddr'])
self.request['package'] = None
self.request['split'] = False
self.request['forward'] = None
self.request['valid'] = False # This will get set by gettor.filters
def getRealTo(self, toField):
"""If someone wrote to `<EMAIL>', the `From:' field
in our reply should reflect that. So, use the `To:' field from the
incoming mail, but filter out everything except the gettor@ address.
"""
regexGettor = '.*(<)?(<EMAIL>)+(?(1)>).*'
match = re.match(regexGettor, toField)
if match:
return match.group(2)
else:
# Fall back to default From: address
return self.config.MAIL_FROM
def getLocaleInTo(self, address):
"""See whether the user sent his email to a 'plus' address, for
instance to gettor+fa@tpo. Plus addresses are the current
mechanism to set the reply language
"""
regexPlus = '.*(<)?(\w+\+(\w+)@\w+(?:\.\w+)+)(?(1)>)'
match = re.match(regexPlus, address)
if match:
locale = match.group(3)
logging.debug("User requested language %s" % locale)
return self.checkAndGetLocale(locale)
else:
logging.debug("Not a 'plus' address")
return self.config.DEFAULT_LOCALE
def parseMail(self):
"""Main mail parsing routine. Returns a RequestVal value class
"""
if self.parsedMessage.is_multipart():
for part in self.parsedMessage.walk():
if part.get_content_maintype() == "text":
# We found a text part, parse it
self.parseTextPart(part.get_payload(decode=1))
else:
# Not a multipart message, just parse along what we've got
self.parseTextPart(self.parsedMessage.get_payload(decode=1))
if self.request['package'] is None:
logging.debug("User didn't select any packages")
return self.request
def parseTextPart(self, text):
"""If we've found a text part in a multipart email or if we just want
to parse a non-multipart message, this is the routine to call with
the text body as its argument
"""
lines = gettor.utils.stripHTMLTags(text).split('\n')
for line in lines:
if self.request['package'] is None:
self.request['package'] = self.matchPackage(line)
if self.request['split'] is False:
self.request['split'] = self.matchSplit(line)
if self.request['forward'] is None:
self.request['forward'] = self.matchForwardCommand(line)
def matchPackage(self, line):
"""Look up which package the user is requesting.
"""
for p in self.config.PACKAGES.keys():
matchme = ".*" + p + ".*"
match = re.match(matchme, line, re.DOTALL)
if match:
logging.debug("User requested package %s" % p)
return p
return None
def matchSplit(self, line):
"""If we find 'split' somewhere we assume that the user wants a split
delivery
"""
match = re.match(".*split.*", line, re.DOTALL)
if match:
logging.debug("User requested a split delivery")
return True
else:
return False
def matchForwardCommand(self, line):
"""Check if we have a command from the GetTor admin in this email.
Command lines always consists of the following syntax:
'Command: <password> <command part 1> <command part 2>'
For the forwarding command, part 1 is the email address of the
recipient, part 2 is the package name of the package that needs
to be forwarded.
The password is checked against the password found in the file
configured as cmdPassFile in the GetTor configuration.
"""
match = re.match(".*[Cc]ommand:\s+(.*)$", line, re.DOTALL)
if match:
logging.debug("Command received from %s" % self.request['user'])
cmd = match.group(1).split()
length = len(cmd)
assert length == 3, "Wrong command syntax"
auth = cmd[0]
# Package is parsed by the usual package parsing mechanism
package = cmd[1]
address = cmd[2]
verified = gettor.utils.verifyPassword(self.config, auth)
assert verified == True, \
"Unauthorized attempt to command from: %s" \
% self.request['user']
return address
else:
return None
def checkAndGetLocale(self, locale):
"""Look through our aliases list for languages and check if the user
requested an alias rather than an 'official' language name. If he
does, map back to that official name. Also, if the user didn't
request a language we support, fall back to default.
"""
for (lang, aliases) in self.config.SUPP_LANGS.items():
if lang == locale:
return locale
if aliases is not None:
if locale in aliases:
logging.debug("Request for %s via alias %s" % (lang, alias))
# Return the 'official' name
return lang
else:
logging.debug("Requested language %s not supported. Fallback: %s" \
% (self.replyLocale, self.config.DEFAULT_LOCALE))
self.replyLocale = self.config.DEFAULT_LOCALE
return
def getRawMessage(self):
return self.rawMessage
|
StarcoderdataPython
|
55157
|
from .utils import simple_hash
from .torrentbase import TorrentBase
from .torrentdetails import TorrentDetails
class Torrent(TorrentBase):
def fetch_details(self, timeout=30) -> TorrentDetails:
"""
Retrieve details about this torrent (e.g link, description, files...)
Parameters:
timeout: int - Timeout in seconds.
Returns:
TorrentDetails - Torrent details.
Raises:
ValueError - Missing some properties.
RequestError - Something went wrong requesting the search page.
Timeout - The search lasted longer than timeout.
"""
details_data = self.provider.fetch_details_data(self, timeout)
# the torrent details are a combination of the data
# we already have and the new data found in the info page
details = TorrentDetails(**{**self.data, **details_data})
return details
|
StarcoderdataPython
|
137258
|
<reponame>kathatherine/anaconda-project
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
"""The ``activate`` command which prepares a project to run and prints commands to source in your shell."""
from __future__ import absolute_import, print_function
import os
try:
from shlex import quote # pragma: no cover (py3 only)
except ImportError: # pragma: no cover (py2 only)
from pipes import quote
from anaconda_project.internal.cli.prepare_with_mode import prepare_with_ui_mode_printing_errors
from anaconda_project.internal.cli.project_load import load_project
def activate(dirname, ui_mode, conda_environment, command_name):
"""Prepare project and return lines to be sourced.
Future direction: should also activate the proper conda env.
Returns:
None on failure or a list of lines to print.
"""
project = load_project(dirname)
result = prepare_with_ui_mode_printing_errors(project,
ui_mode=ui_mode,
env_spec_name=conda_environment,
command_name=command_name)
if result.failed:
return None
exports = []
# sort so we have deterministic output order for tests
sorted_keys = list(result.environ.keys())
sorted_keys.sort()
for key in sorted_keys:
value = result.environ[key]
if key not in os.environ or os.environ[key] != value:
exports.append("export {key}={value}".format(key=key, value=quote(value)))
return exports
def main(args):
"""Start the activate command and return exit status code."""
result = activate(args.directory, args.mode, args.env_spec, args.command)
if result is None:
return 1
else:
for line in result:
print(line)
return 0
|
StarcoderdataPython
|
185059
|
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import tensorflow as tf
from ccgnet import experiment as exp
from ccgnet.finetune import *
from ccgnet import layers
from ccgnet.layers import *
import numpy as np
import time
import random
from sklearn.metrics import balanced_accuracy_score
from ccgnet.Dataset import Dataset, DataLoader
from Featurize.Coformer import Coformer
from Featurize.Cocrystal import Cocrystal
def verify_dir_exists(dirname):
if os.path.isdir(os.path.dirname(dirname)) == False:
os.makedirs(os.path.dirname(dirname))
def make_dataset(fp_size, radii):
data1 = Dataset(abs_path+'/CC_Table/CC_Table.tab', mol_blocks_dir=abs_path+'/Mol_Blocks.dir')
data1.make_embedding_dataset(fp_type='ecfp', nBits=fp_size, radii=radii, processes=15, make_dataframe=True)
return data1
def build_model(
layer_1_size,
layer_2_size,
layer_3_size,
act_func,
dropout,
merge,
forward_layer_1_size,
forward_layer_2_size,
forward_layer_3_size,
forward_act_func,
forward_dropout
):
class DNN_5(object):
def build_model(self, inputs, is_training, global_step=None):
fps = inputs[0]
labels = inputs[1]
tags = inputs[2]
fps = tf.reshape(fps, [-1, int(fps.get_shape()[-1].value/2)])
with tf.compat.v1.variable_scope('FC_1') as scope:
fps = tf.compat.v1.layers.dense(fps, layer_1_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
if layer_2_size != None:
with tf.compat.v1.variable_scope('FC_2') as scope:
fps = tf.compat.v1.layers.dense(fps, layer_2_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
if layer_3_size != None:
with tf.compat.v1.variable_scope('FC_3') as scope:
fps = tf.compat.v1.layers.dense(fps, layer_3_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
if merge == 'add':
with tf.compat.v1.variable_scope('merge_add') as scope:
fp_size = fps.get_shape()[-1].value
fps = tf.reshape(fps, [-1, 2, fp_size])
fps = tf.reduce_sum(fps, axis=1)
elif merge == 'concat':
with tf.compat.v1.variable_scope('merge_concat') as scope:
fp_size = fps.get_shape()[-1].value
fps = tf.reshape(fps, [-1, fp_size*2])
with tf.compat.v1.variable_scope('Forward_FC_1') as scope:
fps = tf.compat.v1.layers.dense(fps, forward_layer_1_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = forward_act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
if forward_layer_2_size != None:
with tf.compat.v1.variable_scope('Forward_FC_2') as scope:
fps = tf.compat.v1.layers.dense(fps, forward_layer_2_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = forward_act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
if forward_layer_3_size != None:
with tf.compat.v1.variable_scope('Forward_FC_3') as scope:
fps = tf.compat.v1.layers.dense(fps, forward_layer_3_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = forward_act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
fps = layers.make_fc_layer(fps, 2, is_training=is_training, with_bn=False, act_func=None)
return fps, labels
return DNN_5()
def black_box_function(args_dict):
tf.reset_default_graph()
fp_size = args_dict['fp_size']
radii = args_dict['fp_radii']
batch_size = args_dict['batch_size']
layer_1_size = args_dict['layer_1_size']
layer_2_size = args_dict['layer_2_size']
layer_3_size = args_dict['layer_3_size']
act_fun = args_dict['act_fun']
dropout = args_dict['dropout']
merge = args_dict['merge']
forward_layer_1_size = args_dict['forward_layer_1_size']
forward_layer_2_size = args_dict['forward_layer_2_size']
forward_layer_3_size = args_dict['forward_layer_3_size']
forward_act_fun = args_dict['forward_act_fun']
forward_dropout = args_dict['forward_dropout']
# data spliting
data = make_dataset(fp_size, radii)
train_data, valid_data = data.split(train_samples=train_samples, valid_samples=valid_samples, with_fps=True)
# make save dir
snapshot_path = abs_path+'/bayes_snapshot/'
model_name = 'BayesOpt-FP/'
verify_dir_exists(snapshot_path+model_name)
if os.listdir(snapshot_path+model_name) == []:
dataset_name = 'Step_0/'
else:
l_ = [int(i.split('_')[1]) for i in os.listdir(snapshot_path+model_name) if 'Step_' in i]
dataset_name = 'Step_{}/'.format(max(l_)+1)
# training
tf.reset_default_graph()
model = build_model(layer_1_size, layer_2_size, layer_3_size, act_fun, dropout, merge, forward_layer_1_size,
forward_layer_2_size, forward_layer_3_size, forward_act_fun, forward_dropout)
model = exp.Model(model, train_data, valid_data, with_test=False, snapshot_path=snapshot_path, use_subgraph=False, use_desc=False, build_fc=True,
model_name=model_name, dataset_name=dataset_name+'/time_0')
history = model.fit(num_epoch=100, save_info=True, save_att=False, silence=0, train_batch_size=batch_size,
max_to_keep=1, metric='loss')
loss = min(history['valid_cross_entropy'])
tf.reset_default_graph()
print('\nLoss: {}'.format(loss))
print(str(args_dict))
return loss
from hyperopt import hp
import hyperopt.pyll.stochastic
args_dict = {
'fp_size': hp.choice('fp_size', [128,256,512,1024,2048,4096]),
'fp_radii': hp.choice('fp_radii', (1,2,3)),
'batch_size':hp.choice('batch_size', (64,128,256)),
'layer_1_size':hp.choice('layer_1_size', (128,256,512,1024,2048)),
'layer_2_size':hp.choice('layer_2_size', (128,256,512,1024,2048, None)),
'layer_3_size':hp.choice('layer_3_size', (128,256,512,1024,2048, None)),
'act_fun':hp.choice('act_fun', (tf.nn.relu, tf.nn.elu, tf.nn.tanh)),
'dropout':hp.uniform('dropout', 0.0, 0.75),
'merge':hp.choice('merge',('add', 'concat')),
'forward_layer_1_size':hp.choice('forward_layer_1_size', (128,256,512,1024,2048)),
'forward_layer_2_size':hp.choice('forward_layer_2_size', (128,256,512,1024,2048, None)),
'forward_layer_3_size':hp.choice('forward_layer_3_size', (128,256,512,1024,2048, None)),
'forward_act_fun':hp.choice('forward_act_fun', (tf.nn.relu, tf.nn.elu, tf.nn.tanh)),
'forward_dropout':hp.uniform('forward_dropout', 0.0, 0.75),
}
from hyperopt import fmin, tpe, hp, Trials
abs_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
fold_10 = eval(open(abs_path+'/Fold_10.dir').read())
Samples = fold_10['fold-0']['train']+fold_10['fold-0']['valid']
## sample spliting
random.shuffle(Samples)
num_sample = len(Samples)
train_num = int(0.9 * num_sample)
train_samples = Samples[:train_num]
valid_samples = Samples[train_num:]
trials = Trials()
best = fmin(
fn=black_box_function,
space=args_dict,
algo=tpe.suggest,
max_evals=100,
trials=trials,
trials_save_file='trials_save_file-FP')
print('best:')
print(best)
|
StarcoderdataPython
|
1725150
|
<filename>hwhandler_api/core/__init__.py
from .base_system import *
from .base_command import *
from .system_fsm import *
|
StarcoderdataPython
|
3262565
|
<reponame>warrenbailey/github
import requests
from requests.auth import HTTPBasicAuth
import sys
BASE_URL = "https://api.github.com"
def list_repos(username, api_token):
url = BASE_URL + f"/users/{username}/repos"
print(f"Sending request to {url}")
response = requests.get(url, auth=HTTPBasicAuth(username, api_token))
print(response.json())
return response.json()
def delete_repos(username, api_token, repos):
for repo in repos:
print(repo['name'])
name = repo['name']
if name.endswith('production') or name.endswith('staging') or name.endswith('dev'):
delete_repo(name, username, api_token)
def delete_repo(repo, username, api_token):
print(f"Deleting repo {repo}")
url = BASE_URL + f"/repos/{username}/{repo}"
print(url)
response = requests.delete(url, auth=HTTPBasicAuth(username, api_token))
print(response.status_code)
if __name__ == '__main__':
username = sys.argv[1]
api_token = sys.argv[2]
repos = list_repos(username, api_token)
delete_repos(username, api_token, repos)
|
StarcoderdataPython
|
68499
|
from django.urls import path
from .views import AccessDeniedView
urlpatterns = [
path("access-denied/", AccessDeniedView.as_view(), name="access-denied"),
]
|
StarcoderdataPython
|
3332249
|
"""The main program"""
from .app import run
if __name__ == '__main__':
run()
|
StarcoderdataPython
|
1717179
|
import bge
scene = bge.logic.getCurrentScene()
controller = bge.logic.getCurrentController()
collection_parent = controller.owner.get('d3dt_collection')
collection = { obj.name: obj for obj in scene.objectsInactive if obj.parent and obj.parent.name == collection_parent }
def add(charObj):
return scene.addObject(charObj, controller.owner, 0)
def placeObject(obj, column):
scale = controller.owner.localScale.x
offset = 0.025*scale
obj.position.x = (scale+offset)*column
obj.scaling = controller.owner.scaling
obj.visible = True
obj.setParent(controller.owner)
def spawn(text):
column = 0
for char in text:
if char != ' ':
placeObject(add(collection[char]), column)
column += 1
spawn(controller.owner.get('d3dt_text'))
|
StarcoderdataPython
|
1733749
|
<gh_stars>10-100
from __future__ import unicode_literals
from django.apps import AppConfig
class AccessConfig(AppConfig):
name = 'access'
|
StarcoderdataPython
|
3345624
|
<filename>ufcrl/fighters.py<gh_stars>1-10
###############################################################################################
# mens
###############################################################################################
MENS_CATCH_WEIGHT = 'mens catch weight' # ??.?kg
MENS_STRAWWEIGHT = 'mens strawweight' # 52.5kg
MENS_FLYWEIGHT = 'mens flyweight' # 56.7kg
ALEXANDRE_PANTOJA = '<NAME>'
ASHKAN_MOKHTARIAN = '<NAME>'
BEN_NGUYEN = '<NAME>'
BRANDON_MORENO = '<NAME>'
DEIVESON_FIGUEIREDO = '<NAME>'
DEMETRIOUS_JOHNSON = '<NAME>'
DUSTIN_ORTIZ = '<NAME>'
ELIAS_GARCIA = '<NAME>'
ERIC_SHELTON = '<NAME>'
HECTOR_SANDOVAL = '<NAME>'
HENRY_CEJUDO = '<NAME>'
JARRED_BROOKS = '<NAME>'
JENEL_LAUSA = '<NAME>'
JOBY_SANCHEZ = '<NAME>'
JOHN_MORAGA = '<NAME>'
JORDAN_ESPINOSA = '<NAME>'
JOSE_TORRES = '<NAME>'
JOSEPH_BENAVIDEZ = '<NAME>'
JOSEPH_MORALES = '<NAME>'
JUSSIER_FORMIGA = '<NAME>'
JUSTIN_SCOGGINS = '<NAME>'
KAI_KARA_FRANCE = 'Kai Kara-France'
LOUIS_SMOLKA = 'Louis Smolka'
MAGOMED_BIBULATOV = '<NAME>'
MARCO_BELTRAN = '<NAME>'
MATHEUS_NICOLAU = '<NAME>'
MATT_SCHNELL = '<NAME>'
NAOKI_INOUE = '<NAME>'
NEIL_SEERY = '<NAME>'
RAULIAN_PAIVA = '<NAME>'
RAY_BORG = '<NAME>'
ROBERTO_SANCHEZ = '<NAME>'
ROGERIO_BONTORIN = '<NAME>'
RYAN_BENOIT = '<NAME>'
SAID_NURMAGOMEDOV = '<NAME>'
SERGIO_PETTIS = '<NAME>'
TIM_ELLIOTT = '<NAME>'
ULKA_SASAKI = '<NAME>'
WILSON_REIS = '<NAME>'
MENS_BANTAMWEIGHT = 'mens bantamweight' # 61.2kg
AIEMANN_ZAHABI = '<NAME>'
ALBERT_MORALES = '<NAME>'
ALEJANDRO_PEREZ = '<NAME>'
ALEX_PEREZ = '<NAME>'
ALJAMAIN_STERLING = '<NAME>'
ANDERSON_BERLINGERI_DOS_SANTOS = '<NAME>'
ANDRE_EWELL = '<NAME>'
ANDRE_SOUKHAMTHATH = '<NAME>'
AUGUSTO_MENDES = '<NAME>'
BEA_MALECKI = '<NAME>'
BENITO_LOPEZ = '<NAME>'
BOSTON_SALMON = '<NAME>'
BRAD_KATONA = '<NAME>'
BRAD_PICKETT = '<NAME>'
BRETT_JOHNS = '<NAME>'
BRIAN_KELLEHER = '<NAME>'
BRYAN_CARAWAY = '<NAME>'
CARLOS_HUACHIN_QUIROZ = '<NAME>'
CHRIS_GUTIERREZ = '<NAME>'
CODY_GARBRANDT = '<NAME>'
COLE_SMITH = '<NAME>'
DAMIAN_STASIAK = '<NAME>'
DAVEY_GRANT = '<NAME>'
DIEGO_RIVAS = '<NAME>'
DOM_PILARTE = '<NAME>'
DOMINICK_CRUZ = '<NAME>'
DOUGLAS_SILVA_DE_ANDRADE = '<NAME>'
EDDIE_WINELAND = '<NAME>'
EDUARDA_SANTANA = '<NAME>'
FRANKIE_SAENZ = '<NAME>'
GABRIEL_SILVA = '<NAME>'
GRIGORY_POPOV = '<NAME>'
GUIDO_CANNETTI = '<NAME>'
HENRY_BRIONES = '<NAME>'
IURI_ALCANTARA = '<NAME>'
JIMMIE_RIVERA = '<NAME>'
JIN_SOON_SON = '<NAME>'
JOE_SOTO = '<NAME>'
JOHN_DODSON = '<NAME>'
JOHN_LINEKER = '<NAME>'
JOHNNY_EDUARDO = '<NAME>'
JONATHAN_MARTINEZ = '<NAME>'
JOSE_QUINONEZ = '<NAME>'
JOURNEY_NEWSON = '<NAME>'
KWAN_HO_KWAK = 'Kwan Ho Kwak'
KYUNG_HO_KANG = 'Kyung ho Kang'
LIU_PINGYUAN = 'Liu Pingyuan'
LUKE_SANDERS = '<NAME>'
MANNY_BERMUDEZ = '<NAME>'
MARIO_BAUTISTA = '<NAME>'
MARK_DE_LA_ROSA = 'Mark de la Rosa'
MARLON_MORAES = '<NAME>'
MARLON_VERA = '<NAME>'
MARTIN_DAY = '<NAME>'
MATTHEW_LOPEZ = '<NAME>'
MERAB_DVALISHVILI = '<NAME>'
MICHINORI_TANAKA = '<NAME>'
MITCH_GAGNON = '<NAME>'
MONTEL_JACKSON = '<NAME>'
NATHANIEL_WOOD = '<NAME>'
NOHELN_HERNANDEZ = '<NAME>'
PATRICK_WILLIAMS = '<NAME>'
PEDRO_MUNHOZ = '<NAME>'
PETR_YAN = '<NAME>'
RANI_YAHYA = '<NAME>'
RAPHAEL_ASSUNCAO = '<NAME>'
REGINALDO_VIEIRA = '<NAME>'
RENAN_BARAO = '<NAME>'
RICARDO_RAMOS = '<NAME>'
RICKY_SIMON = '<NAME>'
ROB_FONT = '<NAME>'
RUSSEL_DOANE = '<NAME>'
RYAN_MACDONALD = '<NAME>'
SEAN_O_MALLEY = '<NAME>\'Malley'
SONG_YADONG = 'Song Yadong'
SU_MUDAERJI = 'Su Mudaerji'
THOMAS_ALMEIDA = '<NAME>'
TJ_DILLASHAW = 'TJ Dillashaw'
TOM_DUQUESNOY = '<NAME>'
URIJAH_FABER = '<NAME>'
VINCE_MORALES = '<NAME>'
MENS_FEATHERWEIGHT = 'mens featherweight' # 65.8kg
ALEX_CACERES = '<NAME>'
ALEXANDER_VOLKANOVSKI = '<NAME>'
ANDERSON_DOS_SANTOS = '<NAME>'
ANDRE_FILI = '<NAME>'
ARNOLD_ALLEN = '<NAME>'
ARTEM_LOBOV = '<NAME>'
AUSTIN_ARNETT = '<NAME>'
BHARAT_KANDARE = '<NAME>'
BJ_PENN = 'BJ Penn'
BOBBY_MOFFETT = '<NAME>'
BRANDON_DAVIS = '<NAME>'
BRIAN_ORTEGA = '<NAME>'
BRYCE_MITCHELL = '<NAME>'
CALVIN_KATTAR = '<NAME>'
CARLS_JOHN_DE_TOMAS = '<NAME>'
CHAD_MENDES = '<NAME>'
CHAN_SUNG_JUNG = '<NAME>'
CHARLES_ROSA = '<NAME>'
CHAS_SKELLY = 'Ch<NAME>'
CHRIS_FISHGOLD = '<NAME>'
CHRIS_GRUETZEMACHER = '<NAME>'
CODY_STAMANN = '<NAME>'
CORY_SANDHAGEN = '<NAME>'
CUB_SWANSON = 'Cub Swanson'
DAN_IGE = '<NAME>'
DARREN_ELKINS = '<NAME>'
DENNIS_BERMUDEZ = '<NAME>'
DENNIS_SIVER = '<NAME>'
DOOHO_CHOI = '<NAME>'
ENRIQUE_BARZOLA = '<NAME>'
FELIPE_ARANTES = '<NAME>'
FELIPE_COLARES = '<NAME>'
FRANKIE_EDGAR = '<NAME>'
GAVIN_TUCKER = '<NAME>'
GABRIEL_BENITEZ = '<NAME>'
GERALDO_DE_FREITAS = '<NAME>'
GILBERT_MELENDEZ = '<NAME>'
GODOFREDO_PEPEY = '<NAME>'
GRANT_DAWSON = '<NAME>'
GRAY_MAYNARD = '<NAME>'
HAKEEM_DAWODU = '<NAME>'
HUMBERTO_BANDENAY = '<NAME>'
JASON_KNIGHT = '<NAME>'
JEREMY_KENNEDY = '<NAME>'
JEREMY_STEPHENS = '<NAME>'
JORDAN_GRIFFIN = '<NAME>'
JOSE_ALDO = '<NAME>'
JULIO_ACRE = '<NAME>'
KEVIN_AGUILAR = '<NAME>'
KHALID_TAHA = '<NAME>'
KRON_GRACIE = '<NAME>'
KURT_HOLOBAUGH = '<NAME>'
KYLE_BOCHNIAK = '<NAME>'
LUIS_PENA = '<NAME>'
MAKWAN_AMIRKHANI = '<NAME>'
MARTIN_BRAVO = '<NAME>'
MATT_BESSETTE = '<NAME>'
MATT_SAYLES = '<NAME>'
MAX_HOLLOWAY = '<NAME>'
MICHAEL_TRIZANO = '<NAME>'
MICHEL_QUINONES = '<NAME>'
MIKE_DE_LA_TORRE = '<NAME>'
MIKE_GRUNDY = '<NAME>'
MIKE_SANTIAGO = '<NAME>'
MIRSAD_BEKTIC = '<NAME>'
MIZUTO_HIROTA = '<NAME>'
MOVSAR_EVLOEV = '<NAME>'
MYLES_JURY = '<NAME>'
NAD_NARIMANI = '<NAME>'
PHILLIPE_NOVER = '<NAME>'
RANDY_COSTA = '<NAME>'
RAONI_BARCELOS = '<NAME>'
RENATO_MOICANO = '<NAME>'
RICARDO_LAMAS = '<NAME>'
RICK_GLENN = '<NAME>'
ROLANDO_DY = '<NAME>'
RONY_JASON = '<NAME>'
RYAN_HALL = '<NAME>'
SAM_SICILIA = '<NAME>'
SEUNGWOO_CHOI = '<NAME>'
SHANE_BURGOS = 'Sh<NAME>'
SHANE_YOUNG = 'Shane Young'
SHEYMON_DA_SILVA_MORAES = '<NAME>'
SODIQ_YUSUFF = '<NAME>'
STEVEN_PETERSON = '<NAME>'
SUMAN_MOKHTARIAN = '<NAME>'
SUNG_BIN_JO = 'Sung Bin Jo'
TERRION_WARE = 'Terrion Ware'
TERUTO_ISHIHARA = '<NAME>'
WANG_GUAN = 'Wang Guan'
WULIJI_BUREN = '<NAME>'
YAIR_RODRIGUEZ = '<NAME>'
YOSHINORI_HORIE = '<NAME>'
ZABIT_MAGOMEDSHARIPOV = '<NAME>omedsharipov'
MENS_LIGHTWEIGHT = 'mens lightweight' # 70.3kg
ABEL_TRUJILLO = '<NAME>'
ADRIANO_MARTINS = '<NAME>'
AL_IAQUINTA = 'Al Iaquinta'
ALAN_PATRICK = '<NAME>'
ALESSANDRO_RICCI = '<NAME>'
ALEX_DA_SILVA = '<NAME>'
ALEX_GORGEES = '<NAME>'
ALEX_WHITE = '<NAME>'
ALEXANDER_HERNANDEZ = '<NAME>'
ALEXANDER_YAKOVLEV = '<NAME>'
ALVARO_HERRERA = '<NAME>'
ANDREW_HOLBROOK = '<NAME>'
ANTHONY_PETTIS = '<NAME>'
ANTHONY_ROCCO_MARTIN = '<NAME>'
ARMAN_TSARUKYAN = '<NAME>'
AUSTIN_HUBBARD = '<NAME>'
BENEIL_DARIUSH = '<NAME>'
BOBBY_GREEN = '<NAME>'
CALLAN_POTTER = '<NAME>'
CHARLES_JOURDAIN = '<NAME>'
CHARLES_OLIVEIRA = '<NAME>'
CHRIS_WADE = '<NAME>'
CHRISTOS_GIAGOS = '<NAME>'
CLAUDIO_PUELLES = '<NAME>'
CLAY_GUIDA = '<NAME>'
CONOR_MCGREGOR = '<NAME>'
DAMIEN_BROWN = '<NAME>'
DAMIR_HADZOVIC = '<NAME>'
DAMIR_ISMAGULOV = '<NAME>'
DAN_HOOKER = '<NAME>'
DAN_MORET = '<NAME>'
DANIEL_TEYMUR = '<NAME>'
DANILO_BELLUARDO = '<NAME>'
DANNY_HENRY = '<NAME>'
DARRELL_HORCHER = '<NAME>'
DAVID_TEYMUR = '<NAME>'
DESMOND_GREEN = '<NAME>'
DEVIN_POWELL = '<NAME>'
DEVONTE_SMITH = '<NAME>'
DIEGO_FERREIRA = '<NAME>'
DIEGO_SANCHEZ = '<NAME>'
DRAKKAR_KLOSE = '<NAME>'
DREW_DOBER = '<NAME>'
DUSTIN_POIRIER = '<NAME>'
EDDIE_ALVAREZ = '<NAME>'
EDSON_BARBOZA = '<NAME>'
ERIK_KOCH = '<NAME>'
EVAN_DUNHAM = '<NAME>'
FELIPE_SILVA = '<NAME>'
FRANCISCO_TRINALDO = '<NAME>'
FRANKIE_PEREZ = '<NAME>'
GILBERT_BURNS = '<NAME>'
GLEISON_TIBAU = '<NAME>'
GREGOR_GILLESPIE = '<NAME>'
HACRAN_DIAS = '<NAME>'
ISLAM_MAKHACHEV = '<NAME>'
JAMES_VICK = '<NAME>'
JARED_GORDON = '<NAME>'
JASON_GONZALEZ = '<NAME>'
JASON_SAGGO = '<NAME>'
JC_COTTRELL = '<NAME>'
JESUS_PINEDO = '<NAME>'
JOAQUIM_SILVA = '<NAME>'
JOE_DUFFY = '<NAME>'
JOE_LAUZON = '<NAME>'
JOEL_ALVAREZ = '<NAME>'
JOHN_GUNTHER = '<NAME>'
JOHN_MAKDESSI = '<NAME>'
JOHNNY_CASE = '<NAME>'
JON_TUCK = '<NAME>'
JORDAN_RINALDI = '<NAME>'
JOSHUA_BURKMAN = '<NAME>'
JOSH_EMMETT = '<NAME>'
JULIAN_EROSA = '<NAME>'
JUSTIN_GAETHJE = '<NAME>'
KAJAN_JOHNSON = '<NAME>'
KEVIN_LEE = '<NAME>'
KHABIB_NURMAGOMEDIV = '<NAME>'
KYLE_NELSON = '<NAME>'
LANDO_VANNATA = '<NAME>'
LEONARDO_SANTOS = '<NAME>'
MADS_BURNELL = '<NAME>'
MAGOMED_MUSTAFAEV = '<NAME>'
MAIRBEK_TAISUMOV = '<NAME>'
MARC_DIAKIESE = '<NAME>'
MARC_POLO_REYES = '<NAME>'
MARCIN_HELD = '<NAME>'
MARCOS_ROSA_MARIANO = '<NAME>'
MATT_FREVOLA = '<NAME>'
MATT_WIMAN = '<NAME>'
MICHAEL_JOHNSON = '<NAME>'
MICHAEL_MCBRIDE = '<NAME>'
MICHEL_PRAZERES = '<NAME>'
MITCH_CLARKE = '<NAME>'
NASRAT_HAQPARAST = '<NAME>'
NICK_HEIN = '<NAME>'
NIK_LENTZ = '<NAME>'
OLIVIER_AUBIN_MERCIER = '<NAME>'
PAUL_FELDER = '<NAME>'
RAFAEL_FIZIEV = '<NAME>'
RASHID_MAGOMEDOV = '<NAME>'
REZA_MADADI = '<NAME>'
ROOSEVELT_ROBERTS = '<NAME>'
ROSS_PEARSON = '<NAME>'
RUSTAM_KHABILOV = '<NAME>'
SAGE_NORTHCUTT = 'Sage Northcutt'
SCOTT_HOLTZMAN = '<NAME>'
STEVIE_RAY = '<NAME>'
TAKANORI_GOMI = '<NAME>'
TE_EDWARDS = 'Te Edwards'
TEEMU_PACKALEN = 'Teemu Packalen'
THIAGO_MOISES = '<NAME>'
THIBAULT_GOUTI = '<NAME>'
THOMAS_GIFFORD = '<NAME>'
TONY_FERGUSON = '<NAME>'
VINC_PICHEL = '<NAME>'
WILL_BROOKS = '<NAME>'
MENS_WELTERWEIGHT = 'mens welterweight' # 77.1kg
ABDUL_RAZAK_ALHASSAN = '<NAME>'
ALAN_JOUBAN = '<NAME>'
ALBERTO_MINA = '<NAME>'
ALEKSEI_KUNCHENKO = '<NAME>'
ALEX_GARCIA = '<NAME>'
ALEX_MORONO = '<NAME>'
ALEX_OLIVEIRA = '<NAME>'
ALEX_REYES = '<NAME>'
BARTOSZ_FABINSKI = '<NAME>'
BELAL_MUHAMMAD = '<NAME>'
BEN_ASKREN = '<NAME>'
BEN_SAUNDERS = '<NAME>'
BOBBY_NASH = '<NAME>'
BOJAN_VELICKOVIC = '<NAME>'
BRANDON_THATCH = '<NAME>'
BRIAN_CAMOZZI = '<NAME>'
BRYAN_BARBERENA = '<NAME>'
CARLO_PEDERSOLI_JR = '<NAME> Jr'
CARLOS_CONDIT = '<NAME>'
CHAD_LAPRISE = '<NAME>'
CHANCE_RENCOUNTRE = '<NAME>'
CHARLIE_WARD = '<NAME>'
CLAUDIO_DA_SILVA = '13-1-0'
CM_PUNK = 'CM Punk'
COLBY_COVINGTON = '<NAME>'
COURT_MCGEE = '<NAME>'
CRAIG_WHITE = '<NAME>'
CURTIS_MILLENDER = '<NAME>'
DAICHI_ABE = '<NAME>'
DANNY_ROBERTS = '<NAME>'
DARREN_TILL = '<NAME>'
DAVI_RAMOS = '<NAME>'
DAVID_ZAWADA = '<NAME>'
DEMIAN_MAIA = '<NAME>'
DERRICK_KRANTZ = '<NAME>'
DHIEGO_LIMA = '<NAME>'
DOMINIQUE_STEELE = '<NAME>'
DONALD_CERRONE = '<NAME>'
DONG_HYUN_KIM = '<NAME>'
DWIGHT_GRANT = 'Dwight Grant'
ELIZEU_ZALESKI_DOS_SANTOS = '<NAME> dos Santos'
EMIL_MEEK = '<NAME>'
ERICK_SILVA = '<NAME>'
FRANK_CAMACHO = '<NAME>'
GALORE_BOFANDO = '<NAME>'
GEOFF_NEAL = '<NAME>'
GEORGE_SULLIVAN = '<NAME>'
GUNNAR_NELSON = '<NAME>'
HECTOR_ALDANA = '<NAME>'
HYUN_GYU_KIM = '<NAME>'
ISMAIL_NAURDIEV = '<NAME>'
JAKE_ELLENBERGER = '<NAME>'
JAKE_MATTHEWS = '<NAME>'
JALIN_TURNER = '<NAME>'
JAMES_KRAUSE = '<NAME>'
JESSE_TAYLOR = '<NAME>'
JESSIN_AYARI = '<NAME>'
JIMMY_WALLHEAD = '<NAME>'
JOE_PROCTOR = '<NAME>'
JOHNY_HENDRICKS = '<NAME>'
JORDAN_MEIN = '<NAME>'
JORGE_MASVIDAL = '<NAME>'
KAMARU_USMAN = '<NAME>'
KEITA_NAKAMURA = '<NAME>'
KIICHI_KUNIMOTO = '<NAME>'
KYLE_PREPOLEC = '<NAME>'
KYLE_STEWART = '<NAME>'
LAUREANO_STAROPOLI = '<NAME>'
LEON_EDWARDS = '<NAME>'
LI_JINGLIANG = '<NAME>'
LUAN_CHAGAS = 'Luan Chagas'
LUIGI_VENDRAMINI = '<NAME>'
LUKE_JUMEAU = '<NAME>'
LYMAN_GOOD = '<NAME>'
MATT_BROWN = '<NAME>'
MAX_GRIFFIN = '<NAME>'
MICHAEL_CHIESA = '<NAME>'
MICHEL_PEREIRA = '<NAME>'
MICKEY_GALL = '<NAME>'
MIKE_JACKSON = '<NAME>'
MIKE_PERRY = '<NAME>'
MIKE_PYLE = '<NAME>'
MUSLIM_SALIKHOV = '<NAME>'
NATHAN_COY = '<NAME>'
NEIL_MAGNY = '<NAME>'
NICO_MUSOKE = '<NAME>'
NIKO_PRICE = '<NAME>'
NORDINE_TALEB = '<NAME>'
OLIVER_ENKAMP = '<NAME>'
OMARI_AKHMEDOV = '<NAME>'
PATRICK_COTE = '<NAME>'
PETER_SOBOTTA = '<NAME>'
RAFAEL_DOS_ANJOS = '<NAME>'
RANDY_BROWN = '<NAME>'
RICKEY_RAINEY = '<NAME>'
ROAN_CARNEIRO = '<NAME>'
ROBBIE_LAWLER = '<NAME>'
ROSTEM_AKMAN = '<NAME>'
RYAN_LAFLARE = '<NAME>'
SABAH_HOMASI = 'S<NAME>'
SALIM_TOUAHRI = '<NAME>'
SANTIAGO_PONZINIBBIO = '<NAME>'
SEAN_STRICKLAND = '<NAME>'
SERGEY_KHANDOZHKO = '<NAME>'
SERGIO_MORAES = '<NAME>'
SHINSHO_ANZAI = '<NAME>'
SONG_KENAN = '<NAME>'
STEPHEN_THOMPSON = '<NAME>'
SULTAN_ALIEV = '<NAME>'
TAKASHI_SATO = '<NAME>'
TAREC_SAFFIEDINE = '<NAME>'
THIAGO_ALVES = '<NAME>'
TIM_MEANS = '<NAME>'
TOM_GALLICCHIO = '<NAME>'
TYRON_WOODLEY = '<NAME>'
VICENTE_LUQUE = '<NAME>'
WARLLEY_ALVES = 'Warlley Alves'
YANCY_MEDEIROS = '<NAME>'
ZAK_CUMMINGS = '<NAME>'
ZAK_OTTOW = '<NAME>'
ZELIM_IMADAEV = '<NAME>'
MENS_MIDDLEWEIGHT = 'mens middleweight' # 83.9kg
ABU_AZAITAR = '<NAME>'
ALEN_AMEDOVSKI = '<NAME>'
ALESSIO_DI_CHRICO = '<NAME>'
ALEX_NICHOLSON = '<NAME>'
ANDERSON_SILVA = '<NAME>'
ANDREW_SANCHEZ = '<NAME>'
ANTHONY_HERNANDEZ = '<NAME>'
ANTHONY_SMITH = '<NAME>'
ANTONIO_BRAGA_NETO = '<NAME>'
ANTONIO_CARLOS_JUNIOR = '<NAME>'
BEVON_LEWIS = '<NAME>'
BRAD_TAVARES = '<NAME>'
BRADLEY_SCOTT = '<NAME>'
CEZAR_FERREIRA = '<NAME>'
CHARLES_BYRD = '<NAME>'
CHRIS_CAMOZZI = '<NAME>'
CHRIS_WEIDMAN = '<NAME>'
DANIEL_KELLY = '<NAME>'
DAVID_BRANCH = 'David Branch'
DEREK_BRUNSON = '<NAME>'
DERON_WINN = '<NAME>'
EDMEN_SHAHBAZYAN = '<NAME>'
ELIAS_THEODOROU = 'Elias Theodorou'
ERIC_SPICELY = 'Eric Spicely'
GARRETH_MCLELLAN = '<NAME>'
GEGARD_MOUSASI = '<NAME>'
GEORGES_ST_PIERRE = 'Georges St-Pierre'
GERALD_MEERSCHAERT = '<NAME>'
HECTOR_LOMBARD = '<NAME>'
IAN_HEINISCH = '<NAME>'
ISRAEL_ADESANYA = '<NAME>'
JACARE_SOUZA = '<NAME>'
JACK_HERMANSSON = '<NAME>'
JACK_MARSHMAN = '<NAME>'
JIM_MILLER = '<NAME>'
JOHN_PHILLIPS = '<NAME>'
JONATHAN_WILSON = '<NAME>'
JULIAN_MARQUEZ = '<NAME>'
KARL_ROBERSON = '<NAME>'
KELVIN_GASTELUM = '<NAME>'
KEVIN_HOLLAND = '<NAME>'
KRZYSZTOF_JOTKO = '<NAME>'
LYOTO_MACHIDA = '<NAME>'
MARC_ANDRE_BARRIAULT = '<NAME>'
MARKUS_PEREZ = '<NAME>'
MARVIN_VETTORI = '<NAME>'
MICHAEL_BISPING = '<NAME>'
NATE_MARQUARDT = '<NAME>'
OLUWALE_BAMGBOSE = '<NAME>'
OSKAR_PIECHOTA = 'O<NAME>'
PAULO_COSTA = '<NAME>'
RAFAEL_NATAL = '<NAME>'
RAMAZAN_EMEEV = '<NAME>'
RASHAD_EVANS = 'R<NAME>'
ROB_WILKINSON = '<NAME>'
ROBERT_WHITTAKER = '<NAME>'
RONALDO_SOUZA = '<NAME>'
RYAN_JANES = '<NAME>'
SAM_ALVEY = '<NAME>'
SCOTT_ASKHAM = '<NAME>'
SIYAR_BAHADURZADA = '<NAME>'
THALES_LEITES = 'Thales Leites'
THIAGO_SANTOS = '<NAME>'
TIM_BOETSCH = '<NAME>'
TIM_WILLIAMS = '<NAME>'
TOM_BREESE = '<NAME>'
TREVOR_SMITH = '<NAME>'
URIAH_HALL = '<NAME>'
VITOR_BELFORT = '<NAME>'
VITOR_MIRANDA = '<NAME>'
WELLINGTON_TURMAN = '<NAME>'
YOEL_ROMERO = '<NAME>'
MENS_LIGHT_HEAVYWEIGHT = 'mens light heavyweight' # 93kg
ABDUL_KERIM_EDILOV = '<NAME>'
ALEKSANDAR_RAKIC = '<NAME>'
ALEXANDER_GUSTAFSSON = '<NAME>'
ALONZO_MENIFIELD = '<NAME>'
ANTHONY_JOHNSON = '<NAME>'
BOJAN_MIHAJLOVIC = '<NAME>'
CB_DOLLAWAY = 'CB Dollaway'
COREY_ANDERSON = '<NAME>'
DALCHA_LUNGIAMBULA = '<NAME>'
DANIEL_CORMIER = '<NAME>'
DANIEL_JOLLY = '<NAME>'
DARKO_STOSIC = '<NAME>'
DARREN_STEWART = '<NAME>'
DEVIN_CLARK = '<NAME>'
DEQUAN_TOWNSEND = 'De<NAME>ownsend'
DOMINICK_REYES = '<NAME>'
ED_HERMAN = '<NAME>'
ERYK_ANDERS = '<NAME>'
FRANCIMAR_BARROSO = '<NAME>'
GADZHIMURAD_ANTIGULOV = 'Gadzhimurad Antigulov'
GIAN_VILLANTE = '<NAME>'
GLOVER_TEIXEIRA = '<NAME>'
GOKHAN_SAKI = '<NAME>'
HENRIQUE_DA_SILVA = '<NAME>'
ILIR_LATIFI = 'Ilir Latifi'
ION_CUTELABA = 'Ion Cutelaba'
JAKE_COLLIER = '<NAME>'
JAMES_BOCHNOVIC = '<NAME>'
JAN_BLACHOWICZ = '<NAME>'
JARED_CANNONIER = '<NAME>'
JEREMY_KIMBALL = '<NAME>'
JIMI_MANUWA = '<NAME>'
JIMMY_CRUTE = '<NAME>'
JOACHIM_CHRISTENSEN = '<NAME>'
JOHN_ALLAN = '<NAME>'
JOHNNY_WALKER = '<NAME>'
JON_JONES = '<NAME>'
JORDAN_JOHNSON = '<NAME>'
JOSH_STANSBURY = '<NAME>'
KENNEDY_NZECHUKWU = '<NAME>'
KHALIL_ROUNTREE = '<NAME>'
KLIDSON_DE_ABREU = '<NAME>'
LUKE_ROCKHOLD = '<NAME>'
MAGOMED_ANKALAEV = '<NAME>'
MARCIN_PRACHNIO = '<NAME>'
MARCOS_ROGERIO_DE_LIMA = '<NAME>'
MAURICIO_RUA = '<NAME>'
MICHAL_OLEKSIEJCZUK = '<NAME>'
MIKE_RODRIGUEZ = '<NAME>'
MISHA_CIRKUNOV = '<NAME>'
NICK_NEGUMEREANU = '<NAME>'
NICK_ROEHRICK = '<NAME>'
NIKITA_KRYLOV = '<NAME>'
OVINCE_SAINT_PREUX = 'Ovince Saint Preux'
PATRICK_CUMMINS = '<NAME>'
PAUL_CRAIG = '<NAME>'
ROGERIO_NOGUEIRA = '<NAME>'
RYAN_SPANN = '<NAME>'
SAPARBEG_SAFAROV = '<NAME>'
TREVIN_GILES = '<NAME>'
TYSON_PEDRO = '<NAME>'
VINICIUS_CASTRO = '<NAME>'
VOLKAN_OEZDEMIR = '<NAME>'
YUSHIN_OKAMI = '<NAME>'
MENS_HEAVYWEIGHT = 'mens heavyweight' # 120.2kg
ADAM_MILSTEAD = '<NAME>'
ADAM_WIECZOREK = '<NAME>'
ALEKSEI_OLEINIK = '<NAME>'
ALEXANDER_VOLKOV = '<NAME>'
ALISTAIR_OVEREEM = '<NAME>'
ALLEN_CROWDER = '<NAME>'
ANDREI_ARLOVSKI = '<NAME>'
ANTHONY_HAMILTON = '<NAME>'
ARJAN_BHULLAR = '<NAME>'
AUGUSTO_SAKAI = '<NAME>'
BEN_ROTHWELL = '<NAME>'
BLAGOY_IVANOV = '<NAME>'
CAIN_VELASQUEZ = '<NAME>'
CHASE_SHERMAN = '<NAME>'
CHRIS_DE_LA_ROCHA = '<NAME>'
CHRISTIAN_COLOMBO = '<NAME>'
CURTIS_BLAYDES = '<NAME>'
CYRIL_ASKER = '<NAME>'
DAMIAN_GRABOWSKI = '<NAME>'
DANIEL_SPITZ = '<NAME>'
DANIEL_OMIELANCZUK = '<NAME>'
DERRICK_LEWIS = '<NAME>'
DMITRII_SMOLIAKOV = '<NAME>'
DMITRY_SMOLYAKOV = '<NAME>'
DMITRY_SOSNOVSKIY = '<NAME>'
FABRICIO_WERDUM = '<NAME>'
FRANCIS_NGANNOU = '<NAME>'
GREG_HARDY = '<NAME>'
HU_YAOZONG = 'Hu Yaozong'
JAIRZINHO_ROZENSTRUIK = '<NAME>'
JAMES_MULHERON = '<NAME>'
JEFF_HUGHES = '<NAME>'
JUAN_ADAMS = '<NAME>'
JUAN_ESPINO = '<NAME>'
JUNIOR_ALBINI = 'Jun<NAME>'
JUNIOR_DOS_SANTOS = 'Junior dos Santos'
JUSTIN_FRAZIER = '<NAME>'
JUSTIN_LEDET = '<NAME>'
JUSTIN_WILLIS = '<NAME>'
LUIS_HENRIQUE = '<NAME>'
MARCEL_FORTUNA = '<NAME>'
MARCELO_GOLM = '<NAME>'
MARCIN_TYBURA = '<NAME>'
MARK_GODBEER = '<NAME>'
MARK_HUNT = '<NAME>'
MAURICE_GREEN = '<NAME>'
MICHEL_BATISTA = '<NAME>'
RASHAD_COULTER = '<NAME>'
ROY_NELSON = '<NAME>'
SERGEI_PAVLOVICH = '<NAME>'
SERGEY_SPIVAK = '<NAME>'
SHAMIL_ABDURAKHIMOV = '<NAME>'
STEFAN_STRUVE = '<NAME>'
STIPE_MIOCIC = '<NAME>'
TAI_TUIVASA = 'T<NAME>'
TIM_JOHNSON = '<NAME>'
TRAVIS_BROWNE = '<NAME>'
VIKTOR_PESTA = '<NAME>'
WALT_HARRIS = '<NAME>'
ZU_ANYANWU = 'Zu Anyanwu'
###############################################################################################
# womans
###############################################################################################
WOMANS_STRAWWEIGHT = 'womans strawweight'
ALEX_CHAMBERS = '<NAME>'
ALEXA_GRASSO = '<NAME>'
ALEXANDRA_ALBU = '<NAME>'
AMANDA_COOPER = '<NAME>'
AMANDA_RIBAS = '<NAME>'
ANGELA_HILL = '<NAME>'
ANGELA_MAGANA = '<NAME>'
ASHLEY_YODER = '<NAME>'
BEC_RAWLINGS = 'B<NAME>'
BRIANNA_VAN_BUREN = '<NAME>'
CARLA_ESPARZA = '<NAME>'
CHANMI_JEON = '<NAME>'
CLAUDIA_GADELHA = '<NAME>'
CORTNEY_CASEY_SANCHEZ = '<NAME>'
CYNTHIA_CALVILLO = '<NAME>'
DANIELLE_TAYLOR = '<NAME>'
EMILY_WHITMIRE = '<NAME>'
FELICE_HERRIG = '<NAME>'
HANNA_CIFERS = '<NAME>'
JAMIE_MOYLE = '<NAME>'
JESSICA_AGUILAR = '<NAME>'
JESSICA_ANDRADE = '<NAME>'
JESSICA_PENNE = '<NAME>'
JJ_ALDRICH = '<NAME>'
JOANNA_JEDRZEJCZYK = '<NAME>'
JOANNE_CALDERWOOD = '<NAME>'
JOCELYN_JONES_LYBARGER = '<NAME>'
JODIE_ESQUIBEL = '<NAME>'
JULIANA_LIMA = '<NAME>'
JUSTINE_KISH = '<NAME>'
KAILIN_CURRAN = '<NAME>'
KAROLINA_KOWALKIEWICZ = '<NAME>'
LIVINHA_SOUZA = '<NAME>'
MACKENZIE_DERN = '<NAME>'
MAIA_STEVENSON = '<NAME>'
MARINA_RODRIGUEZ = '<NAME>'
MARYNA_MOROZ = '<NAME>'
MAYCEE_BARBER = '<NAME>'
MICHELLE_WATERSON = '<NAME>'
NADIA_KASSEM = '<NAME>'
NINA_ANSAROFF = '<NAME>'
PEARL_GONZALEZ = '<NAME>'
POLIANA_BOTELHO = 'Pol<NAME>'
POLYANA_VIANA = 'Polyana Viana'
RANDA_MARKOS = 'Randa Markos'
ROSE_NAMAJUNAS = '<NAME>'
SYURI_KONDO = '<NAME>'
TATIANA_SUAREZ = '<NAME>'
TECIA_TORRES = '<NAME>'
VIVIANE_PEREIRA = '<NAME>'
WEILI_ZHANG = 'We<NAME>'
YAN_XIAONAN = '<NAME>'
ZHANG_WEILI = 'Z<NAME>'
WOMANS_FLYWEIGHT = 'womans flyweight'
ANDREA_LEE = '<NAME>'
ANTONINA_SHEVCHENKO = '<NAME>'
ARIANE_LIPSKI = '<NAME>'
ARIEL_BECK = '<NAME>'
BARB_HONCHAK = '<NAME>'
CHRISTINA_MARKS = '<NAME>'
DEANNA_BENNETT = '<NAME>'
GILLIAN_ROBERTSON = '<NAME>'
JENNIFER_MAIA = '<NAME>'
JESSICA_EYE = '<NAME>'
JESSICA_ROSE_CLARK = '<NAME>'
KALINDRA_FARIA = '<NAME>'
KARINE_GEVORGYAN = '<NAME>'
LAUREN_MUELLER = '<NAME>'
LAUREN_MURPHY = '<NAME>'
LIZ_CARMOUCHE = '<NAME>'
LUANA_CAROLINA = '<NAME>'
MARA_ROMERO_BORELLA = '<NAME>'
MAYRA_BUENO_SILVA = '<NAME>'
MELINDA_FABIAN = '<NAME>'
MOLLY_MCCANN = '<NAME>'
MONTANA_DE_LA_ROSA = 'Montana de la Rosa'
NICCO_MONTANO = '<NAME>'
PAIGE_VANZANT = '<NAME>'
PRISCILA_CACHOEIRA = '<NAME>'
RACHAEL_OSTOVICH = '<NAME>'
ROXANNE_MODAFFERI = '<NAME>'
SABINA_MAZO_ISAZA = '<NAME>'
SARAH_FROTA = '<NAME>'
SHANA_DOBSON = '<NAME>'
SIJARA_EUBANKS = '<NAME>'
TAILA_SANTOS = 'Taila Santos'
VERONICA_MACEDO = 'Veronica Macedo'
WOMANS_BANTAMWEIGHT = 'womans bantamweight'
ALEXIS_DAVIS = '<NAME>'
AMANDA_LEMOS = '<NAME>'
AMANDA_NUNES = '<NAME>'
ASHLEE_EVANS_SMITH = '<NAME>-Smith'
ASPEN_LADD = 'Aspen Ladd'
BETHE_CORREIA = '<NAME>'
CAT_ZINGANO = 'Cat Zingano'
CINDY_DANDOIS = '<NAME>'
GINA_MAZANY = '<NAME>'
IRENE_ALDANA = '<NAME>'
JI_YEON_KIM = '<NAME>'
JULIA_AVILA = '<NAME>'
JULIANNA_PENA = '<NAME>'
KATLYN_CHOOKAGIAN = '<NAME>'
KETLEN_VIEIRA = '<NAME>'
LESLIE_SMITH = '<NAME>'
LINA_LANSBERG = '<NAME>'
LUCIE_PUDILOVA = '<NAME>'
MARION_RENEAU = '<NAME>'
PANNIE_KIANZAD = '<NAME>'
RAQUEL_PENNINGTON = '<NAME>'
RONDA_ROUSEY = '<NAME>'
SARA_MCMANN = '<NAME>'
SARAH_MORAS = '<NAME>'
TALITA_BERNARDO = '<NAME>'
VALENTINA_SHEVCHENKO = '<NAME>'
VIVIANE_ARAUJO = '<NAME>'
WU_YANAN = 'Wu Yanan'
WOMANS_FEATHERWEIGHT = 'womans featherweight'
CRIS_CYBORG = 'Cris Cyborg'
FELICIA_SPENCER = '<NAME>'
GERMAINE_DE_RANDAMIE = '<NAME>'
HOLLY_HOLM = '<NAME>'
JULIJA_STOLIARENKO = '<NAME>'
LEAH_LETSON = '<NAME>'
MACY_CHIASSON = '<NAME>'
MEGAN_ANDERSON = '<NAME>'
TONYA_EVINGER = '<NAME>'
YANA_KUNITSKAYA = 'Yana Kunitskaya'
|
StarcoderdataPython
|
1623736
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('bpp', '0002_auto_20141020_1738'),
]
operations = [
migrations.AlterField(
model_name='patent',
name='utworzono',
field=models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), auto_now_add=True, verbose_name=b'Utworzono', db_index=True),
),
migrations.AlterField(
model_name='praca_doktorska',
name='utworzono',
field=models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), auto_now_add=True, verbose_name=b'Utworzono', db_index=True),
),
migrations.AlterField(
model_name='praca_habilitacyjna',
name='utworzono',
field=models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), auto_now_add=True, verbose_name=b'Utworzono', db_index=True),
),
migrations.AlterField(
model_name='wydawnictwo_ciagle',
name='utworzono',
field=models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), auto_now_add=True, verbose_name=b'Utworzono', db_index=True),
),
migrations.AlterField(
model_name='wydawnictwo_zwarte',
name='utworzono',
field=models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0), auto_now_add=True, verbose_name=b'Utworzono', db_index=True),
),
]
|
StarcoderdataPython
|
89500
|
<gh_stars>0
"""
Bring full functionality to light and media player controllers.
From turning devices on/off to changing the color lights.
https://github.com/xaviml/controllerx
"""
from cx_core import (
CallServiceController,
Controller,
CoverController,
CustomCoverController,
CustomLightController,
CustomMediaPlayerController,
CustomSwitchController,
LightController,
MediaPlayerController,
SwitchController,
)
from cx_devices.aqara import *
from cx_devices.ikea import *
from cx_devices.legrand import *
from cx_devices.lutron import *
from cx_devices.philips import *
from cx_devices.smartthings import *
from cx_devices.trust import *
|
StarcoderdataPython
|
3384843
|
colour_list=["red","blue","green","black"]
print('first and last in array')
print('%s %s'%(colour_list[0],colour_list[3]))
#array starts from 0
print('%s %s'%(colour_list[0],colour_list[-1]))
#goes back to the end in a loop
print('%s %s'%(colour_list[0],colour_list[-5]))
#above is not a valid statement
|
StarcoderdataPython
|
1692685
|
<filename>src/larksuiteoapi/service/ehr/v1/api.py
# -*- coding: UTF-8 -*-
# Code generated by lark suite oapi sdk gen
from typing import *
from ....api import Request, Response, set_timeout, set_tenant_key, set_user_access_token, set_path_params, \
set_query_params, set_response_stream, set_is_response_stream, FormData, FormDataFile
from ....config import Config
from ....consts import ACCESS_TOKEN_TYPE_TENANT, ACCESS_TOKEN_TYPE_USER, ACCESS_TOKEN_TYPE_APP
from .model import *
class Service(object):
def __init__(self, conf):
# type: (Config) -> None
self.conf = conf
self.attachments = AttachmentService(self)
self.employees = EmployeeService(self)
class AttachmentService(object):
def __init__(self, service):
# type: (Service) -> None
self.service = service
def get(self, tenant_key=None, response_stream=None, timeout=None):
# type: (str, Union[None, IO], int) -> AttachmentGetReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
if response_stream is not None:
request_opts += [set_response_stream(response_stream)]
return AttachmentGetReqCall(self, request_opts=request_opts)
class EmployeeService(object):
def __init__(self, service):
# type: (Service) -> None
self.service = service
def list(self, tenant_key=None, timeout=None):
# type: (str, int) -> EmployeeListReqCall
request_opts = [] # type: List[Callable[[Any], Any]]
if timeout is not None:
request_opts += [set_timeout(timeout)]
if tenant_key is not None:
request_opts += [set_tenant_key(tenant_key)]
return EmployeeListReqCall(self, request_opts=request_opts)
class AttachmentGetReqCall(object):
def __init__(self, service, request_opts=None):
# type: (AttachmentService, List[Any]) -> None
self.service = service
self.path_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_token(self, token):
# type: (str) -> AttachmentGetReqCall
self.path_params['token'] = token
return self
def do(self):
# type: () -> Response[None]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_path_params(self.path_params)]
self.request_opts += [set_is_response_stream()]
req = Request('ehr/v1/attachments/:token', 'GET', [ACCESS_TOKEN_TYPE_TENANT],
None, request_opts=self.request_opts)
resp = req.do(conf)
return resp
class EmployeeListReqCall(object):
def __init__(self, service, request_opts=None):
# type: (EmployeeService, List[Any]) -> None
self.service = service
self.query_params = {} # type: Dict[str, Any]
if request_opts:
self.request_opts = request_opts
else:
self.request_opts = [] # type: List[Any]
def set_view(self, view):
# type: (str) -> EmployeeListReqCall
self.query_params['view'] = view
return self
def set_status(self, status):
# type: (List[int]) -> EmployeeListReqCall
self.query_params['status'] = status
return self
def set_type(self, type):
# type: (List[int]) -> EmployeeListReqCall
self.query_params['type'] = type
return self
def set_start_time(self, start_time):
# type: (int) -> EmployeeListReqCall
self.query_params['start_time'] = start_time
return self
def set_end_time(self, end_time):
# type: (int) -> EmployeeListReqCall
self.query_params['end_time'] = end_time
return self
def set_user_id_type(self, user_id_type):
# type: (str) -> EmployeeListReqCall
self.query_params['user_id_type'] = user_id_type
return self
def set_user_ids(self, user_ids):
# type: (str) -> EmployeeListReqCall
self.query_params['user_ids'] = user_ids
return self
def set_page_token(self, page_token):
# type: (str) -> EmployeeListReqCall
self.query_params['page_token'] = page_token
return self
def set_page_size(self, page_size):
# type: (int) -> EmployeeListReqCall
self.query_params['page_size'] = page_size
return self
def do(self):
# type: () -> Response[EmployeeListResult]
root_service = self.service.service
conf = root_service.conf
self.request_opts += [set_query_params(self.query_params)]
req = Request('ehr/v1/employees', 'GET', [ACCESS_TOKEN_TYPE_TENANT],
None, output_class=EmployeeListResult, request_opts=self.request_opts)
resp = req.do(conf)
return resp
|
StarcoderdataPython
|
25034
|
<reponame>PKUfudawei/cmssw
import FWCore.ParameterSet.Config as cms
from ..modules.hltBTagPFPuppiDeepCSV0p865DoubleEta2p4_cfi import *
from ..modules.hltDoublePFPuppiJets128Eta2p4MaxDeta1p6_cfi import *
from ..modules.hltDoublePFPuppiJets128MaxEta2p4_cfi import *
from ..modules.l1tDoublePFPuppiJet112offMaxEta2p4_cfi import *
from ..modules.l1tDoublePFPuppiJets112offMaxDeta1p6_cfi import *
from ..sequences.HLTAK4PFPuppiJetsReconstruction_cfi import *
from ..sequences.HLTBeginSequence_cfi import *
from ..sequences.HLTBtagDeepCSVSequencePFPuppiModEta2p4_cfi import *
from ..sequences.HLTEndSequence_cfi import *
from ..sequences.HLTParticleFlowSequence_cfi import *
HLT_DoublePFPuppiJets128_DoublePFPuppiBTagDeepCSV_2p4 = cms.Path(
HLTBeginSequence +
l1tDoublePFPuppiJet112offMaxEta2p4 +
l1tDoublePFPuppiJets112offMaxDeta1p6 +
HLTParticleFlowSequence +
HLTAK4PFPuppiJetsReconstruction +
hltDoublePFPuppiJets128MaxEta2p4 +
hltDoublePFPuppiJets128Eta2p4MaxDeta1p6 +
HLTBtagDeepCSVSequencePFPuppiModEta2p4 +
hltBTagPFPuppiDeepCSV0p865DoubleEta2p4 +
HLTEndSequence
)
|
StarcoderdataPython
|
66470
|
# Author: <NAME>
# Contributors: <NAME>
import numpy as np
import scipy
import torch
class Geometry():
"""Helper class to calculate distances, angles, and dihedrals
with a unified, vectorized framework depending on whether pytorch
or numpy is used.
Parameters
----------
method : 'torch' or 'numpy' (default='torch')
Library used for compuations
device : torch.device (default=torch.device('cpu'))
Device upon which geometrical calculations will take place. When
embedded as an attribute for a feature class, the device will inherit
from the feature device attribute
"""
def __init__(self, method='torch', device=torch.device('cpu')):
self.device = device
if method not in ['torch', 'numpy']:
raise RuntimeError("Allowed methods are 'torch' and 'numpy'")
self.method = method
# # # # # # # # # # # # #
# Define any types here #
# # # # # # # # # # # # #
if method == 'torch':
self.bool = torch.bool
self.float32 = torch.float32
elif self.method == 'numpy':
self.bool = np.bool
self.float32 = np.float32
def check_for_nans(self, object, name=None):
"""This method checks an object for the presence of nans and
returns an error if any nans are found.
"""
if name is None:
name = ''
if self.isnan(object).any():
raise ValueError(
"Nan found in {}. Check your coordinates!)".format(
name)
)
def check_array_vs_tensor(self, object, name=None):
"""This method checks whether the object (i.e., numpy array or torch
tensor) is consistent with the method chosen for the Geometry
instance (i.e., 'numpy' or 'torch', respectively).
"""
if name is None:
name = ''
if self.method == 'numpy' and type(object) is not np.ndarray:
raise ValueError(
"Input argument {} must be type np.ndarray for Geometry(method='numpy')".format(
name)
)
if self.method == 'torch' and type(object) is not torch.Tensor:
raise ValueError(
"Input argument {} must be type torch.Tensor for Geometry(method='torch')".format(
name)
)
def get_distance_indices(self, n_beads, backbone_inds=[], backbone_map=None):
"""Determines indices of pairwise distance features.
"""
pair_order = []
adj_backbone_pairs = []
for increment in range(1, n_beads):
for i in range(n_beads - increment):
pair_order.append((i, i+increment))
if len(backbone_inds) > 0:
if (backbone_map[i+increment]
- backbone_map[i] == 1):
adj_backbone_pairs.append((i, i+increment))
return pair_order, adj_backbone_pairs
def get_redundant_distance_mapping(self, pair_order):
"""Reformulates pairwise distances from shape [n_frames, n_dist]
to shape [n_frames, n_beads, n_neighbors]
This is done by finding the index mapping between non-redundant and
redundant representations of the pairwise distances. This mapping can
then be supplied to Schnet-related features, such as a
RadialBasisFunction() layer, which use redundant pairwise distance
representations.
"""
pairwise_dist_inds = [zipped_pair[1] for zipped_pair in sorted(
[z for z in zip(pair_order,
np.arange(len(pair_order)))
])
]
map_matrix = scipy.spatial.distance.squareform(pairwise_dist_inds)
map_matrix = map_matrix[~np.eye(map_matrix.shape[0],
dtype=bool)].reshape(
map_matrix.shape[0], -1)
return map_matrix
def get_vectorize_inputs(self, inds, data):
"""Helper function to obtain indices for vectorized calculations.
"""
if len(np.unique([len(feat) for feat in inds])) > 1:
raise ValueError(
"All features must be the same length."
)
feat_length = len(inds[0])
ind_list = [[feat[i] for feat in inds]
for i in range(feat_length)]
dist_list = [data[:, ind_list[i+1], :]
- data[:, ind_list[i], :]
for i in range(feat_length - 1)]
if len(dist_list) == 1:
dist_list = dist_list[0]
return dist_list
def get_distances(self, distance_inds, data, norm=True):
"""Calculates distances in a vectorized fashion.
"""
self.check_array_vs_tensor(data, 'data')
distances = self.get_vectorize_inputs(distance_inds, data)
if norm:
distances = self.norm(distances, axis=2)
self.check_for_nans(distances, 'distances')
return distances
def get_angles(self, angle_inds, data, clip=True):
"""Calculates angles in a vectorized fashion.
If clip is True (default), then the angle cosines are clipped
to be between -1 and 1 to account for numerical error.
"""
self.check_array_vs_tensor(data, 'data')
base, offset = self.get_vectorize_inputs(angle_inds, data)
# This convention assumes that the middle index of the angle triplet
# is the angle vertex. Scalar multiplication of the first vector
# of the angle triplet by -1 means that the vertex point is
# subtracted from the non-vertex point for the first vector.
# This ensures that the arccos operation returns the acute angle
# at the vertex. See test_geometry_features for a non-parallel
# formulation.
base *= -1
angles = self.sum(base * offset, axis=2) / self.norm(base,
axis=2) / self.norm(
offset, axis=2)
if clip:
# Clipping to prevent the arccos to be NaN
angles = self.arccos(self.clip(angles,
lower_bound=-1.,
upper_bound=1.))
self.check_for_nans(angles, 'angles')
return angles
def get_dihedrals(self, dihed_inds, data):
"""Calculates dihedrals in a vectorized fashion.
Note
----
This is implemented in a hacky/bad way. It calculates twice as many
dihedrals as needed and removes every other one. There is a better
way to do this, I think using two lists of angles, but for now
this has the correct functionality.
"""
self.check_array_vs_tensor(data, 'data')
angle_inds = np.concatenate([[(f[i], f[i+1], f[i+2])
for i in range(2)] for f in dihed_inds])
base, offset = self.get_vectorize_inputs(angle_inds, data)
offset_2 = base[:, 1:]
cross_product_adj = self.cross(base, offset, axis=2)
cp_base = cross_product_adj[:, :-1, :]
cp_offset = cross_product_adj[:, 1:, :]
plane_vector = self.cross(cp_offset, offset_2, axis=2)
dihedral_cosines = self.sum(cp_base[:, ::2]*cp_offset[:, ::2],
axis=2)/self.norm(
cp_base[:, ::2], axis=2)/self.norm(cp_offset[:, ::2], axis=2)
dihedral_sines = self.sum(cp_base[:, ::2]*plane_vector[:, ::2],
axis=2)/self.norm(
cp_base[:, ::2], axis=2)/self.norm(plane_vector[:, ::2], axis=2)
dihedral_rad = self.arctan(dihedral_sines / dihedral_cosines)
#dihedral_rad = self.arccos(dihedral_cosines)
#dihedral_rad = self.arccos(self.clip(dihedral_cosines,
# lower_bound=-1.,
# upper_bound=1.))
self.check_for_nans(dihedral_rad, 'dihedral')
return dihedral_rad
def get_neighbors(self, distances, cutoff=None):
"""Calculates a simple neighbor list in which every bead sees
each other. If a cutoff is specified, only beads inside that distance
cutoff are considered as neighbors.
Parameters
----------
distances: torch.Tensor or np.array
Redundant distance matrix of shape (n_frames, n_beads, n_neighbors).
cutoff: float (default=None)
Distance cutoff in Angstrom in which beads are considered neighbors.
Returns
-------
neighbors: torch.Tensor or np.array
Indices of all neighbors of each bead. This is not affected by the
mask.
Shape [n_frames, n_beads, n_neighbors]
neighbor_mask: torch.Tensor or np.array
Index mask to filter out non-existing neighbors that were
introduced to due distance cutoffs.
Shape [n_frames, n_beads, n_neighbors]
"""
self.check_array_vs_tensor(distances, 'distances')
n_frames, n_beads, n_neighbors = distances.shape
# Create a simple neighbor list of shape [n_frames, n_beads, n_neighbors]
# in which every bead sees each other but themselves.
# First, create a matrix that contains all indices.
neighbors = self.tile(self.arange(n_beads), (n_frames, n_beads, 1))
# To remove the self interaction of beads, an inverted identity matrix
# is used to exclude the respective indices in the neighbor list.
neighbors = neighbors[:, ~self.eye(n_beads, dtype=self.bool)].reshape(
n_frames,
n_beads,
n_neighbors)
if cutoff is not None:
# Create an index mask for neighbors that are inside the cutoff
neighbor_mask = distances < cutoff
neighbor_mask = self.to_type(neighbor_mask, self.float32)
else:
neighbor_mask = self.ones((n_frames, n_beads, n_neighbors),
dtype=self.float32)
return neighbors, neighbor_mask
def _torch_eye(self, n, dtype):
if dtype == torch.bool:
# Only in pytorch>=1.2!
return torch.BoolTensor(np.eye(n, dtype=np.bool))
else:
return torch.eye(n, dtype=dtype)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # Versatile Methods # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The methods implemented below should modify the originals as little as
# possible, such that the documentation for the respective method on the
# numpy and pytorch websites should be sufficient.
# Methods defined: arccos, cross, norm, sum, arange, tile, eye, ones,
# to_type, clip, isnan
def arccos(self, x):
if self.method == 'torch':
return torch.acos(x)
elif self.method == 'numpy':
return np.arccos(x)
def arctan(self, x):
if self.method == 'torch':
return torch.atan(x)
elif self.method == 'numpy':
return np.arctan(x)
def cross(self, x, y, axis):
if self.method == 'torch':
return torch.cross(x, y, dim=axis)
elif self.method == 'numpy':
return np.cross(x, y, axis=axis)
def norm(self, x, axis):
if self.method == 'torch':
return torch.norm(x, dim=axis)
elif self.method == 'numpy':
return np.linalg.norm(x, axis=axis)
def sum(self, x, axis):
if self.method == 'torch':
return torch.sum(x, dim=axis)
elif self.method == 'numpy':
return np.sum(x, axis=axis)
def arange(self, n):
if self.method == 'torch':
return torch.arange(n)
elif self.method == 'numpy':
return np.arange(n)
def tile(self, x, shape):
if self.method == 'torch':
return x.repeat(*shape)
elif self.method == 'numpy':
return np.tile(x, shape)
def eye(self, n, dtype):
# As of pytorch 1.2.0, BoolTensors are implemented. However,
# torch.eye does not take dtype=torch.bool on CPU devices yet.
# Watch pytorch PR #24148 for the implementation, which would
# allow us to return torch.eye(n, dtype=dtype)
# For now, we do this:
if self.method == 'torch':
return self._torch_eye(n, dtype).to(self.device)
elif self.method == 'numpy':
return np.eye(n, dtype=dtype)
def ones(self, shape, dtype):
if self.method == 'torch':
return torch.ones(*shape, dtype=dtype).to(self.device)
elif self.method == 'numpy':
return np.ones(shape, dtype=dtype)
def to_type(self, x, dtype):
if self.method == 'torch':
return x.type(dtype)
elif self.method == 'numpy':
return x.astype(dtype)
def clip(self, x, lower_bound, upper_bound, out=None):
if self.method == 'torch':
return torch.clamp(x, min=lower_bound, max=upper_bound, out=out)
elif self.method == 'numpy':
return np.clip(x, a_min=lower_bound, a_max=upper_bound, out=out)
def isnan(self, x):
if self.method == 'torch':
return torch.isnan(x)
elif self.method == 'numpy':
return np.isnan(x)
|
StarcoderdataPython
|
198567
|
<filename>midca/examples/nbeacons_aaai17_agent3.py
#!/usr/bin/env python
import MIDCA
from MIDCA import base, goals
from MIDCA.modules import simulator, guide, evaluate, perceive, intend, planning, act, note, assess
from MIDCA.metamodules import monitor, control, interpret, metaintend, plan
from MIDCA.worldsim import domainread, stateread
import inspect, os
import random
# Domain Specific Imports
from MIDCA.domains.nbeacons import nbeacons_util
from MIDCA.domains.nbeacons.plan import methods_nbeacons, operators_nbeacons
'''
Simulation of the NBEACONS domain (adapted from marsworld in [Dannenhauer and Munoz-Avila 2015]).
THIS IS THE START SCRIPT FOR THE META COGNITIVE AGENT (Agent 3)
'''
wind_schedule = [[10,1],[50,2],[120,3],[200,4]]
# generate goals randomly, such that no goal is repeated or occurs in the last 3 goals
num_goals = 100
goal_list = []
i = 0
possible_goals = range(10)
last_chosen_goal = -1
while i < num_goals:
if last_chosen_goal == -1:
curr_goal = random.choice(possible_goals)
goal_list.append(curr_goal)
last_chosen_goal = curr_goal
else:
tmp_possible_goals = set(possible_goals) - set([last_chosen_goal])
curr_goal = random.sample(tmp_possible_goals,1)[0]
goal_list.append(curr_goal)
last_chosen_goal = curr_goal
i+=1
goal_list = map(lambda x: goals.Goal('B'+str(x), predicate = "activated"), goal_list)
# Setup
thisDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
MIDCA_ROOT = thisDir + "/../"
### Domain Specific Variables
DOMAIN_ROOT = MIDCA_ROOT + "domains/nbeacons/"
DOMAIN_FILE = DOMAIN_ROOT + "domains/nbeacons_avoid_mud.sim"
#STATE_FILE = DOMAIN_ROOT + "states/.sim" # state file is generated dynamically
DISPLAY_FUNC = nbeacons_util.drawNBeaconsScene
DECLARE_METHODS_FUNC = methods_nbeacons.declare_methods
DECLARE_OPERATORS_FUNC = operators_nbeacons.declare_operators
GOAL_GRAPH_CMP_FUNC = nbeacons_util.preferFree
DIMENSION = 16 # width and height of grid
BEACON_FAIL_RATE = 20 # percent chance each beacon will fail each tick
WIND_ENABLED = True
WIND_DIR = 'east' # direction to push the agent if it moves in this direction
WIND_STRENGTH = 0 # number of extra tiles for the agent to move
NUM_QUICKSAND = 10
# Load domain
world = domainread.load_domain(DOMAIN_FILE)
# Create Starting state
state1 = nbeacons_util.NBeaconGrid()
#state1.generate_good_test()
state1.generate(width=DIMENSION,height=DIMENSION,num_beacons=10,num_quicksand_spots=NUM_QUICKSAND)
state1_str = state1.get_STRIPS_str()
# Load state
stateread.apply_state_str(world, state1_str)
# Creates a PhaseManager object, which wraps a MIDCA object
myMidca = base.PhaseManager(world, display=DISPLAY_FUNC, verbose=2, metaEnabled=True)
# Add phases by name
for phase in ["Simulate", "Perceive", "Interpret1", "Interpret2", "Interpret3", "Eval", "Cleanup", "Intend", "Plan", "Act"]:
myMidca.append_phase(phase)
# Add the modules which instantiate basic operation
#myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
myMidca.append_module("Simulate", simulator.NBeaconsActionSimulator(wind=WIND_ENABLED,wind_dir=WIND_DIR,wind_strength=WIND_STRENGTH,dim=DIMENSION,wind_schedule=wind_schedule))
myMidca.append_module("Simulate", simulator.ASCIIWorldViewer(DISPLAY_FUNC))
myMidca.append_module("Perceive", perceive.PerfectObserver())
myMidca.append_module("Interpret1", note.StateDiscrepancyDetector())
myMidca.append_module("Interpret2", assess.SimpleNBeaconsExplain())
myMidca.append_module("Interpret3", guide.SimpleNBeaconsGoalManager())
#myMidca.append_module("Interpret", assess.SimpleNBeaconsExplain())
#myMidca.append_module("Interpret", assess.SimpleNBeaconsExplain())
#myMidca.append_module("Interpret", guide.UserGoalInput())
myMidca.append_module("Interpret3", guide.NBeaconsGoalGenerator(numbeacons=2,goalList=goal_list))
myMidca.append_module("Eval", evaluate.NBeaconsDataRecorder())
myMidca.append_module("Cleanup", simulator.NBeaconsSimulator(beacon_fail_rate=BEACON_FAIL_RATE))
myMidca.append_module("Intend", intend.SimpleIntend())
myMidca.append_module("Plan", planning.HeuristicSearchPlanner())
#myMidca.append_module("Plan", planning.PyHopPlanner(nbeacons_util.pyhop_state_from_world,
# nbeacons_util.pyhop_tasks_from_goals,
# DECLARE_METHODS_FUNC,
# DECLARE_OPERATORS_FUNC)) # set up planner for sample domain
myMidca.append_module("Act", act.NBeaconsSimpleAct())
for phase in ["Monitor", "Interpret", "Intend", "Plan", "Control"]:
myMidca.append_meta_phase(phase)
# add meta layer modules
myMidca.append_meta_module("Monitor", monitor.MRSimpleMonitor())
myMidca.append_meta_module("Interpret", interpret.MRSimpleDetect())
#myMidca.append_meta_module("Interpret", interpret.MRSimpleGoalGenForGoalTrans())
myMidca.append_meta_module("Intend", metaintend.MRSimpleIntend())
myMidca.append_meta_module("Plan", plan.MRSimplePlanner())
myMidca.append_meta_module("Control", control.MRSimpleControl())
# Set world viewer to output text
myMidca.set_display_function(nbeacons_util.drawNBeaconsScene)
# Tells the PhaseManager to copy and store MIDCA states so they can be accessed later.
# Note: Turning this on drastically increases MIDCA's running time.
myMidca.storeHistory = False
myMidca.mem.logEachAccess = False
# Initialize and start running!
myMidca.init()
myMidca.initGoalGraph(cmpFunc = GOAL_GRAPH_CMP_FUNC)
myMidca.run()
|
StarcoderdataPython
|
115525
|
<filename>venv/Lib/site-packages/baron/future.py
import re
def has_print_function(tokens):
for pos in range(len(tokens)):
if tokens_define_print_function(tokens[pos:]):
return True
return False
def tokens_define_print_function(tokens):
token = iter(tokens)
try:
if next(token)[0] != 'FROM':
return False
if next(token)[0:2] != ('NAME', '__future__'):
return False
if next(token)[0] != 'IMPORT':
return False
current_token = next(token)
# ignore LEFT_PARENTHESIS token
if current_token[0] == 'LEFT_PARENTHESIS':
current_token = next(token)
while (current_token[0] == 'NAME'):
if current_token[1] == 'print_function':
return True
# ignore AS and NAME tokens if present
# anyway, ignore COMMA token
if next(token)[0] == 'AS':
next(token)
next(token)
current_token = next(token)
except StopIteration:
pass
return False
def replace_print_by_name(tokens):
def is_print(token):
return token[0] == 'PRINT'
return [('NAME', 'print') if is_print(x) else x for x in tokens]
|
StarcoderdataPython
|
185048
|
<reponame>openearth/hydro-osm
__all__ = ["check", "tasks", "filter", "io", "config", "log", "utm"]
|
StarcoderdataPython
|
14770
|
<gh_stars>0
"Used to reference the nested workspaces for examples in /WORKSPACE"
ALL_EXAMPLES = [
"angular",
"app",
"kotlin",
"nestjs",
"parcel",
"protocol_buffers",
"user_managed_deps",
"vendored_node",
"vendored_node_and_yarn",
"web_testing",
"webapp",
"worker",
]
|
StarcoderdataPython
|
3369833
|
<reponame>trakken/gtm_manager
# pylint: disable=missing-docstring
from gtm_manager.folder import GTMFolder
def test_init(mock_service):
service, responses = mock_service("folders_get.json")
folder_get = responses[0]
folder = GTMFolder(
path="accounts/1234/containers/1234/workspace/1/folders/1", service=service
)
assert folder.containerId == folder_get.get("containerId")
assert folder.notes == folder_get.get("notes", "")
assert folder.workspaceId == folder_get.get("workspaceId")
assert folder.tagManagerUrl == folder_get.get("tagManagerUrl")
assert folder.fingerprint == folder_get.get("fingerprint")
assert folder.folderId == folder_get.get("folderId")
assert folder.accountId == folder_get.get("accountId")
assert folder.name == folder_get.get("name")
folder = GTMFolder(folder=folder_get, service=service)
assert folder.containerId == folder_get.get("containerId")
assert folder.notes == folder_get.get("notes", "")
assert folder.workspaceId == folder_get.get("workspaceId")
assert folder.tagManagerUrl == folder_get.get("tagManagerUrl")
assert folder.fingerprint == folder_get.get("fingerprint")
assert folder.folderId == folder_get.get("folderId")
assert folder.accountId == folder_get.get("accountId")
assert folder.name == folder_get.get("name")
def test_update():
pass
def test_delete(mock_service):
service, _ = mock_service("folders_get.json", "echo_request_body")
folder = GTMFolder(
path="accounts/1234/containers/1234/workspace/1/folders/1", service=service
)
folder.delete()
|
StarcoderdataPython
|
3242606
|
"""Generate the Go code to parse and serialize a mapry object graph."""
import collections
import re
from typing import List, Set
import icontract
from icontract import ensure
import mapry
import mapry.naming
WARNING = "// File automatically generated by mapry. DO NOT EDIT OR APPEND!"
def comment(text: str) -> str:
"""
Comment out the given the text.
:param text: of the comment
:return: non-indented comment
"""
comment_lines = [] # type: List[str]
for line in text.splitlines():
if line.strip():
comment_lines.append('// {}'.format(line))
else:
comment_lines.append('//')
return '\n'.join(comment_lines)
def type_repr(a_type: mapry.Type, go: mapry.Go) -> str:
"""
Generate the Go type representation of the given mapry type.
:param a_type: in mapry
:param go: Go settings
:return: Go type as a string
"""
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-branches
if isinstance(a_type, mapry.Boolean):
return "bool"
elif isinstance(a_type, mapry.Integer):
return "int64"
elif isinstance(a_type, mapry.Float):
return "float64"
elif isinstance(a_type, mapry.String):
return "string"
elif isinstance(a_type, mapry.Path):
return "string"
elif isinstance(a_type, mapry.Date):
return "time.Time"
elif isinstance(a_type, mapry.Time):
return "time.Time"
elif isinstance(a_type, mapry.Datetime):
return "time.Time"
elif isinstance(a_type, mapry.TimeZone):
return "*time.Location"
elif isinstance(a_type, mapry.Duration):
return "time.Duration"
elif isinstance(a_type, mapry.Array):
return "[]{}".format(type_repr(a_type=a_type.values, go=go))
elif isinstance(a_type, mapry.Map):
return "map[string]{}".format(type_repr(a_type=a_type.values, go=go))
elif isinstance(a_type, mapry.Class):
return "*{}".format(mapry.naming.ucamel_case(identifier=a_type.name))
elif isinstance(a_type, mapry.Embed):
return mapry.naming.ucamel_case(identifier=a_type.name)
else:
raise NotImplementedError(
"Unhandled Go represenation of a type: {}".format(type(a_type)))
@ensure(lambda result: not result.endswith('\n'))
def escaped_str(text: str) -> str:
r"""
Convert the ``text`` into a Go escaped string.
The result will include the prefix and suffix quotes.
:param text: to be converted
:return: Go string literal
>>> escaped_str(text='testme')
'"testme"'
>>> escaped_str(text='test"me')
'"test\\"me"'
"""
# yapf: disable
mapping = collections.OrderedDict([
('\\', '\\\\'),
('"', '\\"'),
('\a', '\\a'),
('\b', '\\b'),
('\t', '\\t'),
('\n', '\\n'),
('\v', '\\v'),
('\f', '\\f'),
('\r', '\\r'),
('\x1B', '\\e')])
# yapf: enable
result = text
for src, tgt in mapping.items():
result = result.replace(src, tgt)
return '"{}"'.format(result)
@ensure(lambda result: not result.endswith('\n'))
def ticked_str(text: str) -> str:
"""
Convert the ``text`` into a backticked Go string literal.
If there are backticks in the text, the generated string literal
is split into multiple lines.
:param text: to be converted
:return: Go string literal
>>> ticked_str(text='testme')
'`testme`'
>>> print(ticked_str(text='test`me'))
`test`+"`"+
`me`
"""
tick_parts = text.split('`')
parts = [] # type: List[str]
for i, part in enumerate(tick_parts):
if i == 0:
parts.append('`{}`'.format(part))
else:
parts.append('+"`"+\n `{}`'.format(part))
return ''.join(parts)
@ensure(lambda result: not result.endswith('\n'))
def import_declarations(import_set: Set[str]) -> str:
"""
Generate the import declaration(s) given the import specs.
:param import_set: import specs
:return: Go import declaration
>>> import_declarations(set())
''
>>> import_declarations({'time'})
'import "time"'
>>> print(import_declarations({'time', 'regexp'}))
import (
"regexp"
"time"
)
"""
if len(import_set) == 0:
return ''
elif len(import_set) == 1:
return 'import "{}"'.format(list(import_set)[0])
else:
parts = ['import (\n']
for package in sorted(import_set):
parts.append(' "{}"\n'.format(package))
parts.append(')')
return ''.join(parts)
class AutoID:
"""Keep track of parsing identifiers."""
def __init__(self) -> None:
"""Initialize with a zero identifier."""
self._next_id = 0
@ensure(
lambda result: re.match(r'^0|[1-9][0-9]*$', result),
enabled=icontract.SLOW)
def next_identifier(self) -> str:
"""
Generate the next identifier.
:return: the generated identifier
"""
result = self._next_id
self._next_id += 1
return str(result)
_NONPOINTER_TYPES = (
mapry.Boolean, mapry.Integer, mapry.Float, mapry.String, mapry.Path,
mapry.Date, mapry.Time, mapry.Datetime, mapry.Duration, mapry.Embed)
_POINTER_TYPES = (mapry.TimeZone, mapry.Array, mapry.Map, mapry.Class)
def is_pointer_type(a_type: mapry.Type) -> bool:
"""
Check whether the type is represented as a pointer in Go.
:param a_type: mapry type definition
:return: True if the type is represented with a pointer
"""
if isinstance(a_type, _NONPOINTER_TYPES):
return False
elif isinstance(a_type, _POINTER_TYPES):
return True
else:
raise NotImplementedError(
"Unhandled the mapry type definition: {}".format(a_type))
|
StarcoderdataPython
|
156211
|
<filename>pyFM/optimize/base_functions.py
import numpy as np
def descr_preservation(C, descr1_red, descr2_red):
"""
Compute the descriptor preservation constraint
Parameters
---------------------
C : (K2,K1) Functional map
descr1 : (K1,p) descriptors on first basis
descr2 : (K2,p) descriptros on second basis
Output
---------------------
energy : descriptor preservation squared norm
"""
return 0.5 * np.square(C @ descr1_red - descr2_red).sum()
def descr_preservation_grad(C, descr1_red, descr2_red):
"""
Compute the gradient of the descriptor preservation constraint
Parameters
---------------------
C : (K2,K1) Functional map
descr1 : (K1,p) descriptors on first basis
descr2 : (K2,p) descriptros on second basis
Output
---------------------
gradient : gradient of the descriptor preservation squared norm
"""
return (C @ descr1_red - descr2_red) @ descr1_red.T
def LB_commutation(C, ev_sqdiff):
"""
Compute the LB commutativity constraint
Parameters
---------------------
C : (K2,K1) Functional map
ev_sqdiff : (K2,K1) [normalized] matrix of squared eigenvalue differences
Output
---------------------
energy : (float) LB commutativity squared norm
"""
return 0.5 * (np.square(C) * ev_sqdiff).sum()
def LB_commutation_grad(C, ev_sqdiff):
"""
Compute the gradient of the LB commutativity constraint
Parameters
---------------------
C : (K2,K1) Functional map
ev_sqdiff : (K2,K1) [normalized] matrix of squared eigenvalue differences
Output
---------------------
gradient : (K2,K1) gradient of the LB commutativity squared norm
"""
return C * ev_sqdiff
def op_commutation(C, op1, op2):
"""
Compute the operator commutativity constraint.
Can be used with descriptor multiplication operator
Parameters
---------------------
C : (K2,K1) Functional map
op1 : (K1,K1) operator on first basis
op2 : (K2,K2) descriptros on second basis
Output
---------------------
energy : (float) operator commutativity squared norm
"""
return 0.5 * np.square(C @ op1 - op2 @ C).sum()
def op_commutation_grad(C, op1, op2):
"""
Compute the gradient of the operator commutativity constraint.
Can be used with descriptor multiplication operator
Parameters
---------------------
C : (K2,K1) Functional map
op1 : (K1,K1) operator on first basis
op2 : (K2,K2) descriptros on second basis
Output
---------------------
gardient : (K2,K1) gradient of the operator commutativity squared norm
"""
return op2.T @ (op2 @ C - C @ op1) - (op2 @ C - C @ op1) @ op1.T
def oplist_commutation(C, op_list):
"""
Compute the operator commutativity constraint for a list of pairs of operators
Can be used with a list of descriptor multiplication operator
Parameters
---------------------
C : (K2,K1) Functional map
op_list : list of tuple( (K1,K1), (K2,K2) ) operators on first and second basis
Output
---------------------
energy : (float) sum of operators commutativity squared norm
"""
energy = 0
for (op1, op2) in op_list:
energy += op_commutation(C, op1, op2)
return energy
def oplist_commutation_grad(C, op_list):
"""
Compute the gradient of the operator commutativity constraint for a list of pairs of operators
Can be used with a list of descriptor multiplication operator
Parameters
---------------------
C : (K2,K1) Functional map
op_list : list of tuple( (K1,K1), (K2,K2) ) operators on first and second basis
Output
---------------------
gradient : (K2,K1) gradient of the sum of operators commutativity squared norm
"""
gradient = 0
for (op1, op2) in op_list:
gradient += op_commutation_grad(C, op1, op2)
return gradient
def energy_func_std(C, descr_mu, lap_mu, descr_comm_mu, orient_mu, descr1_red, descr2_red, list_descr, orient_op, ev_sqdiff):
"""
Evaluation of the energy for standard FM computation
Parameters:
----------------------
C : (K2*K1) or (K2,K1) Functional map
descr_mu : scaling of the descriptor preservation term
lap_mu : scaling of the laplacian commutativity term
descr_comm_mu : scaling of the descriptor commutativity term
orient_mu : scaling of the orientation preservation term
descr1 : (K1,p) descriptors on first basis
descr2 : (K2,p) descriptros on second basis
list_descr : p-uple( (K1,K1), (K2,K2) ) operators on first and second basis
related to descriptors.
orient_op : p-uple( (K1,K1), (K2,K2) ) operators on first and second basis
related to orientation preservation operators.
ev_sqdiff : (K2,K1) [normalized] matrix of squared eigenvalue differences
Output
------------------------
energy : float - value of the energy
"""
k1 = descr1_red.shape[0]
k2 = descr2_red.shape[0]
C = C.reshape((k2,k1))
energy = 0
if descr_mu > 0:
energy += descr_mu * descr_preservation(C, descr1_red, descr2_red)
if lap_mu > 0:
energy += lap_mu * LB_commutation(C, ev_sqdiff)
if descr_comm_mu > 0:
energy += descr_comm_mu * oplist_commutation(C, list_descr)
if orient_mu > 0:
energy += orient_mu * oplist_commutation(C, orient_op)
return energy
def grad_energy_std(C, descr_mu, lap_mu, descr_comm_mu, orient_mu, descr1_red, descr2_red, list_descr, orient_op, ev_sqdiff):
"""
Evaluation of the gradient of the energy for standard FM computation
Parameters:
----------------------
C : (K2*K1) or (K2,K1) Functional map
descr_mu : scaling of the descriptor preservation term
lap_mu : scaling of the laplacian commutativity term
descr_comm_mu : scaling of the descriptor commutativity term
orient_mu : scaling of the orientation preservation term
descr1 : (K1,p) descriptors on first basis
descr2 : (K2,p) descriptros on second basis
list_descr : p-uple( (K1,K1), (K2,K2) ) operators on first and second basis
related to descriptors.
orient_op : p-uple( (K1,K1), (K2,K2) ) operators on first and second basis
related to orientation preservation operators.
ev_sqdiff : (K2,K1) [normalized] matrix of squared eigenvalue differences
Output
------------------------
gradient : (K2*K1) - value of the energy
"""
k1 = descr1_red.shape[0]
k2 = descr2_red.shape[0]
C = C.reshape((k2,k1))
gradient = np.zeros_like(C)
if descr_mu > 0:
gradient += descr_mu * descr_preservation_grad(C, descr1_red, descr2_red)
if lap_mu > 0:
gradient += lap_mu * LB_commutation_grad(C, ev_sqdiff)
if descr_comm_mu > 0:
gradient += descr_comm_mu * oplist_commutation_grad(C, list_descr)
if orient_mu > 0:
gradient += orient_mu * oplist_commutation_grad(C, orient_op)
gradient[:,0] = 0
return gradient.reshape(-1)
|
StarcoderdataPython
|
1607518
|
<reponame>PatBall1/DeepForestcast<filename>src/models/2DCNN_test.py<gh_stars>0
"""
SCRIPT FOR TESTING 2DCNN MODELS
"""
import time
import torch
import numpy as np
from datetime import datetime
from CNN import CNNmodel
from Training import ImbalancedDatasetUnderSampler
from Training import test_model
from Testing import *
from Data_maker_loader import *
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
torch.cuda.empty_cache()
hyperparameter_defaults = dict(
region="Junin",
kernel_size=[(5, 5), (5, 5), (3, 3), (3, 3)],
stride=[(2, 2), (1, 1), (1, 1), (1, 1)],
padding=[0, 0, 0, 0],
size=36,
dropout=0.1590181608038966,
levels=8,
batch_size=128,
hidden_dim1=16,
hidden_dim2=256,
hidden_dim3=256,
hidden_dim4=128,
lr=0.004411168121921798,
weight_decay=0,
n_splits=5,
# Set criteria for Early stopping
AUC=True,
BCE_Wloss=False,
FNcond=False,
n_epochs=40,
patience=7,
training_time=11.75,
# Weight on BCE loss
pos_weight=2,
# set ratios of 0:1 labels in Train and Validation data sets
train_times=4,
test_times=4,
# set parameters for the cost of the confusion matrix
# weights on the False Negative Rate
w=10,
# Batch params
stop_batch=1000000,
print_batch=500,
# Set training time period
start_year=19,
end_year=19,
modeltype="2D",
)
region = hyperparameter_defaults["region"]
server = "/rds/user/jgcb3/hpc-work/forecasting/junin"
# server = '/rds/general/project/aandedemand/live/satellite/junin'
# server = "/rds/general/user/jgb116/home/satellite/satellite/junin"
# from where to load tensors for data
wherepath = server + "/data/" + region
# Where to get and save tif map
sourcepath = server + "/deforestation_forecasting/download_data/outputs/" + region
# sourcepath = server + "/data/rasters_junin"
# Where to get model checkpoint
modelpath = (
server + "/models/Junin_models/2D/torch.nn.parallel.data_parallel.DataParallel"
)
bestmodel = "/torch.nn.parallel.data_parallel.DataParallel_8.12.21_11.33_51110015.pt"
checkpoint = modelpath + bestmodel
modelname = checkpoint.split("/", -1)[-1]
# Where to save Test_Roc
picspath = modelpath + "/pics"
checkpoint
# Set model, measurments and scenario parameters
# Set test time period
# predict 2017 images - 2018 labels
start_year = hyperparameter_defaults["start_year"]
end_year = hyperparameter_defaults["end_year"]
# set CNN model parameters
size = hyperparameter_defaults["size"]
DSM = False
# CHOOSE THE INPUT DIMENSIONS - No DSM is (2,8). With DSM is (3,8)
if DSM:
input_dim = 11
else:
input_dim = 10
hidden_dim = [
hyperparameter_defaults["hidden_dim1"],
hyperparameter_defaults["hidden_dim2"],
hyperparameter_defaults["hidden_dim3"],
hyperparameter_defaults["hidden_dim4"],
]
kernel_size = hyperparameter_defaults["kernel_size"]
stride = hyperparameter_defaults["stride"]
padding = hyperparameter_defaults["padding"]
dropout = hyperparameter_defaults["dropout"]
levels = hyperparameter_defaults["levels"]
# set ratios of 0:1 labels in Test data sets
train_times = hyperparameter_defaults["train_times"]
test_times = hyperparameter_defaults["test_times"]
# set parameters for the cost of the confusion matrix
w = hyperparameter_defaults["w"] # weights on the False Negative Rate
perc = (100 * test_times) / (
test_times + 1
) # the percentile to for treshhold selection. Advisable to be 100*times/(times+1)
# Weight parameter for the weighted BCE loss
pos_weight = hyperparameter_defaults["pos_weight"]
# parameters for testing
stop_batch = hyperparameter_defaults["stop_batch"]
print_batch = hyperparameter_defaults["print_batch"]
batch_size = hyperparameter_defaults["batch_size"]
model = CNNmodel(
input_dim=input_dim,
hidden_dim=hidden_dim,
kernel_size=hyperparameter_defaults["kernel_size"],
levels=[hyperparameter_defaults["levels"]],
dropout=hyperparameter_defaults["dropout"],
# start_year=config["start_year"],
# end_year=config["end_year"],
stride=hyperparameter_defaults["stride"],
padding=hyperparameter_defaults["padding"],
)
model = torch.nn.DataParallel(model)
print("Checkpoint: " + checkpoint)
checkpoint = torch.load(checkpoint)
model.load_state_dict(checkpoint["model_state_dict"])
criterion = torch.nn.BCEWithLogitsLoss(
reduction="mean", pos_weight=torch.tensor(pos_weight)
)
Data = with_DSM(
size=int(size / 2),
start_year=start_year,
end_year=end_year,
wherepath=wherepath,
DSM=DSM,
type=hyperparameter_defaults["modeltype"],
)
indeces = np.array(range(0, len(Data)))
test_sampler = ImbalancedDatasetUnderSampler(
labels=Data.labels, indices=indeces, times=test_times
)
print(datetime.now())
print("Region: " + region)
print(modelname)
print(
"percentage valid pixels in year 20%d with label 1: " % (end_year + 1),
test_sampler.count[1] / sum(test_sampler.count),
)
print("Which correspond to %d number of 1 labeled pixels" % (test_sampler.count[1]))
# ## testing(model, Data, criterion, test_sampler, w, perc, batch_size, stop_batch, print_batch, name = None, path = None)
start = time.time()
outputs, targets, coordinates = testing(
model=model,
Data=Data,
criterion=criterion,
test_sampler=test_sampler,
w=w,
perc=perc,
batch_size=batch_size,
stop_batch=stop_batch,
print_batch=print_batch,
name=modelname,
path=modelpath + "/CNN.CNNmodel",
save=True,
)
# outputs, coordinates = forecasting(model = model,
# Data = Data,
# year = end_year,
# batch_size = batch_size,
# stop_batch = stop_batch,
# print_batch = print_batch)
print("outputs")
print(outputs.shape)
print("coordinates")
print(coordinates.shape)
# for illustrative purposes create mock outputs and coordinates
from torch.distributions import Uniform
valid_pixels = torch.load(wherepath + "/pixels_cord_%d.pt" % (end_year))
m = Uniform(torch.tensor([0.0]), torch.tensor([1.0]))
scores = m.sample((len(valid_pixels),))
scores = scores.squeeze(dim=1)
print("scores")
print(scores.shape)
print("valid_pixels")
print(valid_pixels.shape)
# # Heatmap
# sourcepath = '/rds/general/user/jgb116/home/repos/deforestation_forecasting/data/Hansen'
# heatmap(end_year = end_year,
# outputs = outputs, # was scores, but this is just noise, right?
# coordinates = valid_pixels,
# sourcepath = sourcepath,
# wherepath = wherepath,
# name = modelname+"mock")
print("\n\nEND!Total time (in h):", (time.time() - start) / 3600)
|
StarcoderdataPython
|
3213937
|
from _recast import *
|
StarcoderdataPython
|
1661761
|
<gh_stars>0
import os
import base64
from datetime import date
from mailmerge import MailMerge
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from docx2pdf import convert
from utils import *
from mail import googleAPIcreds
class Automate:
def __init__(self, client, company):
self.userInfo = loadUserInfo()
self.companyInfo = loadCompanyInfo()
self.clientname = client
self.client = self.userInfo[client]
self.company = self.companyInfo[company]
self.InvoiceNum = ''
def genInvoiceNo(self):
self.start = self.client['start']
self.mid = int(self.company['lastInvoiceNo']) + 1
self.mid = str(self.mid)
while len(self.mid) < 4:
self.mid = '0' + self.mid
self.end = self.company['end']
self.InvoiceNum = self.start + self.mid + self.end
self.company.update({'lastInvoiceNo': self.mid})
saveCompanyList(self.companyInfo)
def createDoc(self, jobDesc, startDate, endDate, PO, other=None):
self.PO = PO
self.jobDesc = jobDesc
self.startDate = startDate
self.endDate = endDate
self.dayDiff = endDate - startDate
self.numDays = str(self.dayDiff.days)
self.genInvoiceNo()
self.template = self.company['template']
self.document = MailMerge(self.template)
self.other = other
self.total = str(int(self.company['rate'])*int(self.numDays))
if not self.startDate.month == self.endDate.month:
self.daysWorked = '{:%d/%b} - {:%d/%b}'.format(self.startDate, self.endDate)
else:
self.daysWorked = '{:%d}-{:%d %b}'.format(self.startDate, self.endDate)
self.document.merge(
name = self.client['name'],
userAddress = self.client['address'],
phone = self.client['phone'],
comAddress = self.company['address'],
rate = self.company['rate'],
daysWorked = self.numDays,
Amount = self.total,
total = self.total,
date = '{:%d-%b-%Y}'.format(date.today()),
InvoiceNo = self.InvoiceNum,
workDays = self.daysWorked,
po = self.PO,
jobdesc = self.jobDesc,
sortcode = self.client['sort'],
accnum = self.client['accNum']
)
if self.other != None:
self.document.merge(other=self.other)
self.document.write('Invoices\\'+self.InvoiceNum+'.docx')
def createPDFfromFile(self):
convert('Invoices\\'+self.InvoiceNum+'.docx','Invoices\\'+self.InvoiceNum+'.pdf')
os.remove('Invoices\\'+self.InvoiceNum+'.docx')
def createEmail(self, body=None):
self.body = self.company['body']
if body:
self.body = body
self.msg = MIMEMultipart()
self.msg['From'] = self.client['username']
self.msg['To'] = os.environ.get('my_email') # To be removed for final version
# self.msg['To'] = companyInfo[company]['email']
self.msg['Subject'] = 'Invoice - ' + self.InvoiceNum
self.msg.attach(MIMEText(self.body, 'plain'))
self.attachment = open(os.path.join('Invoices',self.InvoiceNum+'.pdf'), "rb")
self.instance = MIMEBase('application', 'octet-stream')
self.instance.set_payload((self.attachment).read())
encoders.encode_base64(self.instance)
self.instance.add_header('Content-Disposition', f'attachment; filename= {self.InvoiceNum+".pdf"}')
self.msg.attach(self.instance)
self.raw_message = base64.urlsafe_b64encode(self.msg.as_string().encode("utf-8"))
return {'raw': self.raw_message.decode("utf-8")}
def sendEmail(self, msg=None):
self.service = googleAPIcreds()
if not msg:
msg = {'raw': self.raw_message.decode("utf-8")}
try:
self.message = self.service.users().messages().send(userId='me', body=msg).execute()
return self.message
except Exception as e:
print(f'An error occurred: {e}')
return None
|
StarcoderdataPython
|
1735957
|
<gh_stars>1000+
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from copy import copy, deepcopy
import numpy as np
from addict import Dict
from .fake_quantize_configuration import read_all_fake_quantize_configurations, get_configurations_by_preset, \
get_configurations_by_qscheme, find_fqs_to_unify, add_range_estimator_configs
from .utils import load_hardware_config, merge_nested_dicts, get_ignored_operations
from ...graph.model_utils import get_nodes_by_type, get_node_by_name
from ...graph.node_utils import get_node_input, set_node_value, \
get_node_value, get_node_output, get_node_inputs, get_input_shape, \
get_quantized_input_key, get_input_data_value, get_first_convolutions
from ...graph.special_operations import OPERATIONS_WITH_WEIGHTS, TRANSPOSED_OPERATIONS
from ...graph.transformer import GraphTransformer
from ...utils.logger import get_logger
logger = get_logger(__name__)
def tune_range(a, b, num_bits):
""" Tunes asymmetric quantization range to set zero quant precisely to zero value.
Function moves left or right borders to do this and doesn't make left border higher or
right border lesser than its original values
:param a: range left border
:param b: range right border
:param num_bits: number of bits to perform quantization
:return tuple with recomputed ranges
"""
level_high = 2 ** num_bits - 1
s = level_high / (b - a)
fval = -a * s
qval = np.round(fval)
with np.errstate(invalid='ignore', divide='ignore'):
ra = np.where(qval < level_high, qval / (qval - level_high) * b, a)
rb = np.where(qval > 0.0, (qval - level_high) / qval * a, b)
range_a = b - ra
range_b = rb - a
mask = np.where(range_a > range_b, 1.0, 0.0)
inv_mask = np.abs(1.0 - mask)
ra = mask * ra + inv_mask * a
rb = inv_mask * rb + mask * b
return ra, rb
def tune_range_unify_zp(a, b, num_bits):
""" Tunes asymmetric quantization range to unify the zero point of all channels.
Function is used to solve the per-tensor zero point constrain on KMB (vpu2p0)
Function calculates the average zero point of all channels and tune the max/min range.
Function moves left or right borders to do this and doesn't make left border higher or
right border lesser than its original values
:param a: range left border
:param b: range right border
:param num_bits: number of bits to perform quantization
:return tuple with recomputed ranges
"""
level_high = 2 ** num_bits - 1
scale = (b - a) / level_high
zero_point = -a / scale
avg_zpts = np.round(np.mean(zero_point))
qval = np.ones_like(a) * avg_zpts
with np.errstate(invalid='ignore', divide='ignore'):
ra = np.where(qval < level_high, qval / (qval - level_high) * b, a)
rb = np.where(qval > 0.0, (qval - level_high) / qval * a, b)
range_a = b - ra
range_b = rb - a
mask = np.where(range_a > range_b, 1.0, 0.0)
inv_mask = np.abs(1.0 - mask)
ra = mask * ra + inv_mask * a
rb = inv_mask * rb + mask * b
return ra, rb
def fill_fake_quantize_node(fq, min_level, max_level, output_low=None, output_high=None):
""" Fills fake quantize input nodes with min/max values
:param fq: fake quantize node to fill
:param min_level: low border of quantization range
:param max_level: high border of quantization range
"""
if output_low is None:
output_low = min_level
if output_high is None:
output_high = max_level
def _update_node_val(port_idx, value):
_node = get_node_input(fq, port_idx)
set_node_value(_node, value)
_update_node_val(1, min_level)
_update_node_val(2, max_level)
_update_node_val(3, output_low)
_update_node_val(4, output_high)
def compute_stats_layouts(config, model, qscheme=None):
"""
Compute stats layouts and hardware configuration
:param config: dictionary with params algo section from toolkit config
:param model: NXModel instance
:return: configuration dictionary
"""
hardware_config = load_hardware_config(config)
fq_configuration = \
read_all_fake_quantize_configurations(config, hardware_config, model)
if not config.preset:
config.preset = 'performance'
if not qscheme:
fq_configuration = get_configurations_by_preset(config, model, fq_configuration)
fq_configuration = add_range_estimator_configs(fq_configuration, config)
else:
fq_configuration = get_configurations_by_qscheme(fq_configuration, qscheme)
# get all fake quantize nodes
fq_nodes = get_nodes_by_type(model, ['FakeQuantize'])
fake_quantize_config = {}
for fq in fq_nodes:
node_input = get_node_input(fq, 0)
is_weights = node_input.type == 'Const'
if is_weights:
fq_config = copy(fq_configuration[fq.name]['weights'])
else:
fq_config = copy(fq_configuration[fq.name]['activations'])
fake_quantize_config[fq.name] = fq_config
if fq.name in config.layerwise_configs[0]:
fq_config = Dict(merge_nested_dicts(fq_config, config.layerwise_configs[0][fq.name]))
fq_config['signed'] = False
if 'level_low' in fq_config and 'level_high' in fq_config and fq_config['level_low'] < 0:
fq_config['signed'] = True
fake_quantize_config[fq.name] = fq_config
fq.levels = compute_levels(fq_config, is_weights)
return fake_quantize_config
def get_value(key, fq_config, default=None):
return fq_config[key] if key in fq_config else default
def compute_levels(fq_config, is_weights):
def_levels = 2 ** get_value('bits', fq_config, 8)
if is_weights and fq_config['mode'] == 'symmetric':
level_low = get_value('level_low', fq_config, -def_levels / 2 + 1)
else:
level_low = get_value('level_low', fq_config, -def_levels / 2)
level_high = get_value('level_high', fq_config, def_levels / 2 - 1)
return int(abs(level_high) + abs(level_low) + 1)
def insert_fake_quantize_nodes(config, model, qscheme=None):
""" Inserts fake quantize nodes, fill them according config
:param config: dictionary with params algo section from toolkit config
:param model: NXModel instance
:param qscheme: The quantization scheme generated from the space
:return None
"""
hardware_config = load_hardware_config(config)
ignored_params = {
'skip_model': False,
'scope': [],
'operations': []
}
if config['ignored']:
ignored_params.update(deepcopy(config['ignored']))
if config['model_type']:
ignored_params['operations'] += get_ignored_operations(config['model_type'])
if qscheme:
for key in qscheme:
if qscheme[key]['quantize'] == 0 and key not in ignored_params['scope']:
ignored_params['scope'].append(key)
GraphTransformer(hardware_config).insert_fake_quantize(model, ignored_params)
def get_fake_quantize_input(fake_quantize):
""" Returns input into fake quantize node
:param fake_quantize: fake quantize node
:return Input node of fake quantize node
"""
parent = get_node_input(fake_quantize, 0)
if parent.attrs()['op'] == 'Cast':
parent = get_node_input(parent, 0)
return parent
def get_fake_quantize_input_value(fake_quantize):
""" Returns input into fake quantize node
:param fake_quantize: fake quantize node
:return Input node of fake quantize node
"""
input_node = fake_quantize
if input_node.attrs()['op'] == 'Cast':
input_node = get_node_input(input_node, 0)
return get_input_data_value(input_node, 0)
def get_fake_quantize_first_output(fake_quantize):
""" Returns first output of the fake quantize node (usually used for weights)
:param fake_quantize: fake quantize node
:return metadata of the node which is first output of the fake quantize node
"""
return get_node_output(fake_quantize, 0)[0]
def fix_zero_filters_symmetric(max_level, eps=0.01):
max_range = np.max(max_level)
lower_threshold = np.maximum(8e-5, eps * max_range)
return np.maximum(lower_threshold, max_level)
def fix_zero_filters_asymmetric(max_level, min_level, eps=1e-8):
ranges = max_level - min_level
ranges = ranges if isinstance(ranges, np.ndarray) else np.array([ranges])
min_correction = 8 * 10e-5
corrections = [(np.maximum(eps * rng, rng) - rng) * 0.5 if rng > min_correction
else min_correction for rng in ranges]
max_level = max_level + corrections
min_level = min_level - corrections
return max_level, min_level
def symmetric_range(node, fq, weights_stats,
batch_inputs_stats, fake_quantize_config):
name = get_quantized_input_key(fq)
if node.type == 'Const' or get_input_data_value(fq, 0) is not None:
node_output = get_fake_quantize_first_output(fq)
max_level = weights_stats[node_output.name]['max']
max_level = fix_zero_filters_symmetric(max_level)
min_level = -max_level
elif name in batch_inputs_stats:
max_level = batch_inputs_stats[name]['max']
min_level = batch_inputs_stats[name]['min']
max_level = fix_zero_filters_symmetric(max_level)
signed = fake_quantize_config[fq.name]['signed']
min_level = np.zeros(max_level.shape) if np.all(min_level >= 0) and not signed else \
-max_level * fq.levels / (fq.levels - 2)
else:
raise Exception(
'WARNING: Fake quantize node {} is missed'.format(fq.name))
min_level, max_level = broadcast_fq_values(fq, node, min_level, max_level, fake_quantize_config)
return min_level, max_level
def asymmetric_range(node, fq, weights_stats,
batch_inputs_stats, fake_quantize_config, unify_zp=True):
name = get_quantized_input_key(fq)
if node.type == 'Const' or get_input_data_value(fq, 0) is not None:
node_output = get_fake_quantize_first_output(fq)
max_level = weights_stats[node_output.name]['max']
min_level = weights_stats[node_output.name]['min']
elif name in batch_inputs_stats:
max_level = batch_inputs_stats[name]['max']
min_level = batch_inputs_stats[name]['min']
else:
raise Exception(
'WARNING: Fake quantize node {} is missed'.format(fq.name))
max_level, min_level = fix_zero_filters_asymmetric(max_level, min_level)
min_level = np.where(min_level < 0.0, min_level, 0.0)
max_level = np.where(max_level > 0.0, max_level, 0.0)
if unify_zp:
if name in batch_inputs_stats:
raise Exception(
'WARING: unify zero point of fake quantize node {} not supported'.format(fq.name)
)
min_level, max_level = tune_range_unify_zp(
min_level, max_level, fake_quantize_config[fq.name]['bits'])
else:
min_level, max_level = tune_range(
min_level, max_level, fake_quantize_config[fq.name]['bits'])
min_level, max_level = broadcast_fq_values(fq, node, min_level, max_level, fake_quantize_config)
return min_level, max_level
def get_quantized_model(model, create_stats_collector, activations_statistics,
fill_fq_range, config, qscheme=None):
"""
Returns a calibrated low precision model via four steps:
1. Quantize the model
2. Calculate quantization config for FQ nodes
3. Collect the weight stats based on config
4. Calibrate [min, max] for inserted fq nodes
:param model: original model (NXModel instance)
:param create_stats_collector: functor to create function for stats collector callback
:param activations_statistics: precomputed statistics for activations layers
:param fill_fq_range: functor to generate min and max range for fake quantize node
:param config: dictionary with params algo section from toolkit config
"""
# FakeQuantize nodes insertion
insert_fake_quantize_nodes(config, model, qscheme=qscheme)
fake_quantize_config = compute_stats_layouts(config, model, qscheme=qscheme)
# generate a list of fq nodes that require rescaling (first convolutions weight FQs)
fake_quantize_config.update(set_rescaling_factors(config['target_device'], model))
weights_stats_layout = create_stats_collector(fake_quantize_config, model, for_weights=True)
# compute weights statistics
weights_stats = compute_weights_stats(model, weights_stats_layout)
# calculate and fill min and max range for fq nodes
fill_fq_range(model, weights_stats, activations_statistics, fake_quantize_config, config)
return model
def compute_weights_stats(model, stats_layout):
""" Computes weights statistic from provided statistics layout
:param model: NXModel instance
:param stats_layout: dictionary with layer names as keys and
functions list with rules how to compute statistics as values
:return dictionary with layers names as keys and list of evaluated statistics as values"""
# compute weights statistics
weights_stats = {}
for fq_name, stats in stats_layout.items():
fq_node = get_node_by_name(model, fq_name)
if fq_node.type != 'FakeQuantize':
raise Exception('FakeQuantize node for weights is missed')
node = get_fake_quantize_first_output(fq_node)
weights_node = get_node_input(fq_node, 0)
weights_value = get_input_data_value(fq_node, 0)
if weights_node.type != 'Const' and weights_value is None:
raise Exception('Incorrect stats layout for weights:'
' {} is activation'.format(weights_node.name))
if node.name not in weights_stats:
weights_stats[node.name] = {}
for stat_name, stat_fn in stats.items():
weights = weights_value.astype(np.float32)
weights_stats[node.name][stat_name] = stat_fn(weights)
return weights_stats
def broadcast_fq_values(fq, node, min_level, max_level, fq_config):
""" Reshapes weights and activations in perchannel mode for next fusing
:param fq: current Fake Quantize node
:param node: input node for Fake Quantize
:param min_level:
:param max_level:
:param fq_config: for checking special Fake Quantize
:return tuple of reshaped min and max values"""
min_level = np.array(min_level)
max_level = np.array(max_level)
if not min_level.shape and not max_level.shape:
return min_level, max_level
# get input shape from data node
input_shape = get_input_shape(fq, 0)
bounds_shape = np.ones(len(input_shape), dtype=np.int32)
if node.type == 'Const':
output_node = get_fake_quantize_first_output(fq)
if output_node.type in [op['type'] for op in TRANSPOSED_OPERATIONS]:
bounds_shape[1] = input_shape[1]
else:
bounds_shape[0] = input_shape[0]
else:
if fq_config[fq.name]['granularity'] == 'perchannel':
bounds_shape[1] = input_shape[1]
min_level = min_level.reshape(bounds_shape)
max_level = max_level.reshape(bounds_shape)
return min_level, max_level
def set_rescaling_factors(target_device, model, scaling_factor=2.0):
"""
Generate a list of weight FQ nodes for input convolutions
for further rescaling of weights/FQs.
Skip if target device is not CPU.
:param target_device: target device name
:param model: NXModel instance
:param scaling_factor: rescaling factor for first convolution nodes
"""
fqs_to_rescale = []
if target_device not in ['CPU', 'ANY'] or not get_nodes_by_type(model, ['Convolution', ]):
return {'scaling_factor': 1.0,
'fqs_to_rescale': fqs_to_rescale}
input_nodes = get_nodes_by_type(model, ['Parameter'])
input_convolutions = get_first_convolutions(input_nodes)
for node in input_convolutions:
fqs_to_rescale.append(get_node_input(node, 1).name)
conv_nodes_to_rescale = get_nodes_by_type(model, [op['type'] for op in OPERATIONS_WITH_WEIGHTS])
conv_fqs_to_rescale = [get_node_input(node, 1).name for node in conv_nodes_to_rescale if
'need_rescale' in node and node['need_rescale']]
fqs_to_rescale.extend(conv_fqs_to_rescale)
return {'scaling_factor': scaling_factor,
'fqs_to_rescale': fqs_to_rescale}
def unify_fq_scales(model, config):
def _custom_broadcast(arrays_list):
arrays_list = np.broadcast_arrays(*list(arr.T for arr in arrays_list))
return [arr.T for arr in arrays_list]
for _, fqs in find_fqs_to_unify(model, config):
min_levels = []
max_levels = []
for fq in fqs:
fq = get_node_by_name(model, fq)
fq_inputs = get_node_inputs(fq)[1:]
min_levels.append(get_node_value(fq_inputs[0]))
max_levels.append(get_node_value(fq_inputs[1]))
orig_shapes = [s.shape for s in min_levels]
min_levels = _custom_broadcast(min_levels)
max_levels = _custom_broadcast(max_levels)
for i, fq in enumerate(fqs):
fq = get_node_by_name(model, fq)
min_level = np.min(min_levels, axis=0).reshape(orig_shapes[i])
max_level = np.max(max_levels, axis=0).reshape(orig_shapes[i])
fill_fake_quantize_node(fq, min_level, max_level)
def create_renamed_layers_mapping(model, stats_layout):
changed_names_map = {}
for layer_name in stats_layout:
node = get_node_by_name(model, layer_name)
if node is not None and 'orig_node_name' in node:
changed_names_map[node.name] = node['orig_node_name']
return changed_names_map
|
StarcoderdataPython
|
1771069
|
"""
================================================================
Demo of the histogram function's different ``histtype`` settings
================================================================
* Histogram with step curve that has a color fill.
* Histogram with step curve with no fill.
* Histogram with custom and unequal bin widths.
* Two histograms with stacked bars.
Selecting different bin counts and sizes can significantly affect the
shape of a histogram. The Astropy docs have a great section on how to
select these parameters:
http://docs.astropy.org/en/stable/visualization/histogram.html
"""
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(19680801)
mu_x = 200
sigma_x = 25
x = np.random.normal(mu_x, sigma_x, size=100)
mu_w = 200
sigma_w = 10
w = np.random.normal(mu_w, sigma_w, size=100)
fig, axs = plt.subplots(nrows=2, ncols=2)
axs[0, 0].hist(x, 20, density=True, histtype='stepfilled', facecolor='g',
alpha=0.75)
axs[0, 0].set_title('stepfilled')
axs[0, 1].hist(x, 20, density=True, histtype='step', facecolor='g',
alpha=0.75)
axs[0, 1].set_title('step')
axs[1, 0].hist(x, density=True, histtype='barstacked', rwidth=0.8)
axs[1, 0].hist(w, density=True, histtype='barstacked', rwidth=0.8)
axs[1, 0].set_title('barstacked')
# Create a histogram by providing the bin edges (unequally spaced).
bins = [100, 150, 180, 195, 205, 220, 250, 300]
axs[1, 1].hist(x, bins, density=True, histtype='bar', rwidth=0.8)
axs[1, 1].set_title('bar, unequal bins')
fig.tight_layout()
plt.show()
|
StarcoderdataPython
|
130228
|
<reponame>stanford-futuredata/sketchstore<filename>python/testdata/msftmunge.py
import numpy as np
import pandas as pd
import random
from tqdm import tqdm
import math
import os
import json
fname = "/Users/edwardgan/Documents/Projects/datasets/msft/mb200k.tsv"
outfname = "/Users/edwardgan/Documents/Projects/datasets/msft/mb200k.feather"
column_names = [
'PipelineInfo_IngestionTime',
'SDKVersion',
'APIVersion',
'DeviceHash_Id',
'AppInfo_Language',
'AppInfo_Version',
'DeviceInfo_Make',
'DeviceInfo_OsBuild',
'DeviceInfo_OsVersion',
'DeviceInfo_Model',
'DeviceInfo_NetworkType',
'DeviceInfo_NetworkProvider',
'UserInfo_Language',
'UserInfo_TimeZone',
'eventpriority',
'records_received_count',
'records_tried_to_send_count',
'records_sent_count',
'olsize',
'olsize_start',
'olc_start',
'ol_w',
'olc',
'records_dropped_count',
# 'UserHash_Id',
'inq',
'infl',
'r_count',
'PipelineInfo_ClientCountry',
'EventInfo_InitId',
'EventInfo_Sequence',
'e_meth',
'TenantId',
'DataPackageId',
'EventInfo_Time',
'r_no_name',
'r_size',
'r_ban',
'r_kl',
'r_ps',
'r_403',
'r_inv',
'd_assert',
'd_bad_tenant',
'd_disk_full',
'd_io_fail',
'd_bond_fail',
'd_disk_off',
'd_unk']
df = pd.read_csv(
fname,
sep="\t",
names=column_names
)
f_metrics = [
"DeviceInfo_OsBuild",
"DeviceInfo_NetworkProvider"
]
q_metric = "records_received_count"
dims = [
"TenantId",
"AppInfo_Version",
"UserInfo_TimeZone",
"DeviceInfo_NetworkType",
]
df[q_metric] = df[q_metric].fillna(0)
for cur_f in tqdm(f_metrics + dims):
df[cur_f] = df[cur_f].fillna("na")
vc = df[cur_f].value_counts()
vc_rep = dict(zip(
vc.index,
range(len(vc))
))
df.replace({cur_f: vc_rep}, inplace=True)
df[dims + f_metrics + [q_metric]].to_feather(
outfname
)
|
StarcoderdataPython
|
97829
|
<filename>hand_writing_detection.py
import cv2
# loads the handwriting
img = cv2.imread("phrase_handwritten.png")
# img_rot = img[::-1]
img_rot = cv2.rotate(img, cv2.ROTATE_180)
cv2.imshow("Rotated Image", img_rot)
# cv2.imshow("inverted image", img_rot)
# create a copy of the image
img_copy = img_rot.copy()
# convert image to grayscale
gray_img = cv2.cvtColor(img_rot, cv2.COLOR_BGR2GRAY)
# read image in binary and display
thresh, binary_img = cv2.threshold(gray_img, 0, 250, cv2.THRESH_OTSU)
cv2.imshow("Binary images", binary_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Find a contour on the handwriting image
contours_list, hierarchy = cv2.findContours(
binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
# print(hierarchy)
for cnt in contours_list:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(img_rot, (x, y), (x + w, y + h), (0, 255, 255), 2)
cv2.imshow("Contours marked on RGB image", img_rot)
cv2.imwrite("contours.jpg", img_rot)
cv2.waitKey(0)
cv2.destroyAllWindows()
# reading and creating reference image
ref_gray = cv2.imread("typed_B.png", cv2.IMREAD_GRAYSCALE)
ret, ref_binary_image = cv2.threshold(ref_gray, 0, 255, cv2.THRESH_OTSU)
cv2.imshow("Reference image", ref_binary_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
print("Here i am")
# check if reference image contains more than one countours
ref_contour_list, ref_hierarchy = cv2.findContours(
ref_binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
if len(ref_contour_list) >= 1:
ref_contour = ref_contour_list[0]
else:
import sys
print("Reference image contains more than 1 contour. Please check!")
sys.exit()
# find matching countours of the from the reference image
counter = 0
# save location of the matched contours in the tuple
distance_list = []
for cnt in contours_list:
retval = cv2.matchShapes(cnt, ref_contour, cv2.CONTOURS_MATCH_I1, 0)
# print(dist_list)
distance_list.append(retval)
counter = counter + 1
min_distance = min(distance_list)
print(
"The minimum distance of the reference contour with a contour in the main image is "
+ str(min_distance)
)
ind_min_dist = distance_list.index(min_distance)
required_cnt = contours_list[ind_min_dist]
x, y, w, h = cv2.boundingRect(required_cnt)
cv2.rectangle(img_copy, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.imshow("Detected B", img_copy)
cv2.imwrite("dectected_b.jpg", img_copy)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
StarcoderdataPython
|
1701421
|
<reponame>cohortfsllc/cohort-cocl2-sandbox<filename>pnacl/scripts/parse_llvm_test_report.py
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parse the report output of the llvm test suite or regression tests,
filter out known failures, and check for new failures
pnacl/scripts/parse_llvm_test_report.py [options]+ reportfile
"""
import csv
import logging
import optparse
import os
import sys
import StringIO
# exclude these tests
EXCLUDES = {}
def ParseCommandLine(argv):
parser = optparse.OptionParser(prog=argv[0])
parser.add_option('-x', '--exclude', action='append', dest='excludes',
default=[],
help='Add list of excluded tests (expected fails)')
parser.add_option('-c', '--check-excludes', action='store_true',
default=False, dest='check_excludes',
help='Report tests which unexpectedly pass')
parser.add_option('-v', '--verbose', action='store_true',
default=False, dest='verbose',
help='Print compilation/run logs of failing tests')
parser.add_option('-p', '--build-path', dest='buildpath',
help='Path to test-suite build directory')
parser.add_option('-a', '--attribute', dest='attributes', action='append',
default=[],
help='Add attribute of test configuration (e.g. arch)')
parser.add_option('-t', '--testsuite', action='store_true', dest='testsuite',
default=False)
parser.add_option('-l', '--lit', action='store_true', dest='lit',
default=False)
options, args = parser.parse_args(argv[1:])
return options, args
def Fatal(text):
print >> sys.stderr, text
sys.exit(1)
def IsFullname(name):
return name.find('/') != -1
def GetShortname(fullname):
return fullname.split('/')[-1]
def ParseTestsuiteCSV(filecontents):
''' Parse a CSV file output by llvm testsuite with a record for each test.
returns 2 dictionaries:
1) a mapping from the short name of the test (without the path) to
a list of full pathnames that match it. It contains all the tests.
2) a mapping of all test failures, mapping full test path to the type
of failure (compile or exec)
'''
alltests = {}
failures = {}
reader = csv.DictReader(StringIO.StringIO(filecontents))
testcount = 0
for row in reader:
testcount += 1
fullname = row['Program']
shortname = GetShortname(fullname)
fullnames = alltests.get(shortname, [])
fullnames.append(fullname)
alltests[shortname] = fullnames
if row['CC'] == '*':
failures[fullname] = 'compile'
elif row['Exec'] == '*':
failures[fullname] = 'exec'
logging.info('%d tests, %d failures', testcount, len(failures))
return alltests, failures
def ParseLit(filecontents):
''' Parse the output of the LLVM regression test runner (lit/make check).
returns a dictionary mapping test name to the type of failure
(Clang, LLVM, LLVMUnit, etc)
'''
alltests = {}
failures = {}
testcount = 0
for line in filecontents.splitlines():
l = line.split()
if len(l) < 4:
continue
if l[0] in ('PASS:', 'FAIL:', 'XFAIL:', 'XPASS:', 'UNSUPPORTED:'):
testcount += 1
fullname = ''.join(l[1:4])
shortname = GetShortname(fullname)
fullnames = alltests.get(shortname, [])
fullnames.append(fullname)
alltests[shortname] = fullnames
if l[0] in ('FAIL:', 'XPASS:'):
failures[fullname] = l[1]
logging.info('%d tests, %d failures', testcount, len(failures))
return alltests, failures
def ParseExcludeFile(filename, config_attributes,
check_test_names=False, alltests=None):
''' Parse a list of excludes (known test failures). Excludes can be specified
by shortname (e.g. fbench) or by full path
(e.g. SingleSource/Benchmarks/Misc/fbench) but if there is more than
one test with the same shortname, the full name must be given.
Errors are reported if an exclude does not match exactly one test
in alltests, or if there are duplicate excludes.
Returns:
Number of failures in the exclusion file.
'''
errors = 0
f = open(filename)
for line in f:
line = line.strip()
if not line: continue
if line.startswith('#'): continue
tokens = line.split()
if len(tokens) > 1:
testname = tokens[0]
attributes = set(tokens[1].split(','))
if not attributes.issubset(config_attributes):
continue
else:
testname = line
if testname in EXCLUDES:
logging.error('Duplicate exclude: %s', line)
errors += 1
if IsFullname(testname):
shortname = GetShortname(testname)
if shortname not in alltests or testname not in alltests[shortname]:
logging.error('Exclude %s not found in list of tests', line)
errors += 1
fullname = testname
else:
# short name is specified
shortname = testname
if shortname not in alltests:
logging.error('Exclude %s not found in list of tests', shortname)
errors += 1
if len(alltests[shortname]) > 1:
logging.error('Exclude %s matches more than one test: %s. ' +
'Specify full name in exclude file.',
shortname, str(alltests[shortname]))
errors += 1
fullname = alltests[shortname][0]
if fullname in EXCLUDES:
logging.error('Duplicate exclude %s', fullname)
errors += 1
EXCLUDES[fullname] = filename
f.close()
logging.info('Parsed %s: now %d total excludes', filename, len(EXCLUDES))
return errors
def DumpFileContents(name):
error = not os.path.exists(name)
logging.debug(name)
try:
logging.debug(open(name, 'rb').read())
except IOError:
error = True
if error:
logging.error("Couldn't open file: %s", name)
# Make the bots go red
logging.error('@@@STEP_FAILURE@@@')
def PrintTestsuiteCompilationResult(path, test):
''' Print the compilation and run results for the specified test in the
LLVM testsuite.
These results are left in several different log files by the testsuite
driver, and are different for MultiSource/SingleSource tests
'''
logging.debug('RESULTS for %s', test)
testpath = os.path.join(path, test)
testdir, testname = os.path.split(testpath)
outputdir = os.path.join(testdir, 'Output')
logging.debug('COMPILE phase')
logging.debug('OBJECT file phase')
if test.startswith('MultiSource'):
for f in os.listdir(outputdir):
if f.endswith('llvm.o.compile'):
DumpFileContents(os.path.join(outputdir, f))
elif test.startswith('SingleSource'):
DumpFileContents(os.path.join(outputdir, testname + '.llvm.o.compile'))
else:
Fatal('ERROR: unrecognized test type ' + test)
logging.debug('PEXE generation phase')
DumpFileContents(os.path.join(outputdir,
testname + '.nonfinal.pexe.compile'))
logging.debug('PEXE finalization phase')
DumpFileContents(os.path.join(outputdir, testname + '.final.pexe.finalize'))
logging.debug('TRANSLATION phase')
DumpFileContents(os.path.join(outputdir, testname + '.nexe.translate'))
logging.debug('EXECUTION phase')
logging.debug('native output:')
DumpFileContents(os.path.join(outputdir, testname + '.out-nat'))
logging.debug('pnacl output:')
DumpFileContents(os.path.join(outputdir, testname + '.out-pnacl'))
def main(argv):
options, args = ParseCommandLine(argv)
if len(args) != 1:
Fatal('Must specify filename to parse')
filename = args[0]
return Report(vars(options), filename=filename)
def Report(options, filename=None, filecontents=None):
loglevel = logging.INFO
if options['verbose']:
loglevel = logging.DEBUG
logging.basicConfig(level=loglevel, format='%(message)s')
if not (filename or filecontents):
Fatal('ERROR: must specify filename or filecontents')
failures = {}
logging.debug('Full test results:')
if not filecontents:
with open(filename, 'rb') as f:
filecontents = f.read();
# get the set of tests and failures
if options['testsuite']:
if options['verbose'] and options['buildpath'] is None:
Fatal('ERROR: must specify build path if verbose output is desired')
alltests, failures = ParseTestsuiteCSV(filecontents)
check_test_names = True
elif options['lit']:
alltests, failures = ParseLit(filecontents)
check_test_names = True
else:
Fatal('Must specify either testsuite (-t) or lit (-l) output format')
# get the set of excludes
exclusion_failures = 0
for f in options['excludes']:
exclusion_failures += ParseExcludeFile(f, set(options['attributes']),
check_test_names=check_test_names,
alltests=alltests)
# Regardless of the verbose option, do a dry run of
# PrintTestsuiteCompilationResult so we can catch errors when intermediate
# filenames in the compilation pipeline change.
# E.g. https://code.google.com/p/nativeclient/issues/detail?id=3659
if len(alltests) and options['testsuite']:
logging.disable(logging.INFO)
PrintTestsuiteCompilationResult(options['buildpath'],
alltests.values()[0][0])
logging.disable(logging.NOTSET)
# intersect them and check for unexpected fails/passes
unexpected_failures = 0
unexpected_passes = 0
for tests in alltests.itervalues():
for test in tests:
if test in failures:
if test not in EXCLUDES:
unexpected_failures += 1
logging.info('[ FAILED ] %s: %s failure', test, failures[test])
if options['testsuite']:
PrintTestsuiteCompilationResult(options['buildpath'], test)
elif test in EXCLUDES:
unexpected_passes += 1
logging.info('%s: unexpected success', test)
logging.info('%d unexpected failures %d unexpected passes',
unexpected_failures, unexpected_passes)
if exclusion_failures:
logging.info('%d problems in known_failures exclusion files',
exclusion_failures)
if options['check_excludes']:
return unexpected_failures + unexpected_passes + exclusion_failures > 0
return unexpected_failures + exclusion_failures > 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
StarcoderdataPython
|
3370761
|
from . import VEXObject
import logging
l = logging.getLogger("pyvex.expr")
class IRExpr(VEXObject):
"""
IR expressions in VEX represent operations without side effects.
"""
tag = None
def __init__(self):
VEXObject.__init__(self)
def pp(self):
print self.__str__()
@property
def child_expressions(self):
"""
A list of all of the expressions that this expression ends up evaluating.
"""
expressions = [ ]
for k in self.__slots__:
v = getattr(self, k)
if isinstance(v, IRExpr):
expressions.append(v)
expressions.extend(v.child_expressions)
return expressions
@property
def constants(self):
"""
A list of all of the constants that this expression ends up using.
"""
constants = [ ]
for k in self.__slots__:
v = getattr(self, k)
if isinstance(v, IRExpr):
constants.extend(v.constants)
elif isinstance(v, IRConst):
constants.append(v)
return constants
def result_size(self, tyenv):
return type_sizes[self.result_type(tyenv)]
def result_type(self, tyenv):
raise NotImplementedError
@staticmethod
def _from_c(c_expr):
if c_expr == ffi.NULL or c_expr[0] == ffi.NULL:
return None
tag_int = c_expr.tag
try:
return tag_to_class[tag_int]._from_c(c_expr)
except KeyError:
raise PyVEXError('Unknown/unsupported IRExprTag %s\n' % ints_to_enums[tag_int])
_translate = _from_c
@staticmethod
def _to_c(expr):
try:
tag_int = enums_to_ints[expr.tag]
return tag_to_class[tag_int]._to_c(expr)
except KeyError:
raise PyVEXError('Unknown/unsupported IRExprTag %s\n' % expr.tag)
def typecheck(self, tyenv):
return self.result_type(tyenv)
class Binder(IRExpr):
"""
Used only in pattern matching within Vex. Should not be seen outside of Vex.
"""
__slots__ = ['binder']
tag = 'Iex_Binder'
def __init__(self, binder):
IRExpr.__init__(self)
self.binder = binder
def __str__(self):
return "Binder"
@staticmethod
def _from_c(c_expr):
return Binder(c_expr.iex.Binder.binder)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Binder(expr.binder)
def result_type(self, tyenv):
return 'Ity_INVALID'
class VECRET(IRExpr):
tag = 'Iex_VECRET'
def __init__(self):
IRExpr.__init__(self)
def __str__(self):
return "VECRET"
@staticmethod
def _from_c(c_expr):
return VECRET()
@staticmethod
def _to_c(expr):
return pvc.IRExpr_VECRET()
def result_type(self, tyenv):
return 'Ity_INVALID'
class GSPTR(IRExpr):
tag = 'Iex_GSPTR'
def __init__(self):
IRExpr.__init__(self)
def __str__(self):
return "GSPTR"
@staticmethod
def _from_c(c_expr):
return GSPTR()
@staticmethod
def _to_c(expr):
return pvc.IRExpr_GSPTR()
def result_type(self, tyenv):
return 'Ity_INVALID'
class GetI(IRExpr):
"""
Read a guest register at a non-fixed offset in the guest state.
"""
__slots__ = ['descr', 'ix', 'bias']
tag = 'Iex_GetI'
def __init__(self, descr, ix, bias):
IRExpr.__init__(self)
self.descr = descr
self.ix = ix
self.bias = bias
@property
def description(self):
return self.descr
@property
def index(self):
return self.ix
def __str__(self):
return "GetI(%s)[%s,%s]" % (self.descr, self.ix, self.bias)
@staticmethod
def _from_c(c_expr):
descr = IRRegArray._from_c(c_expr.Iex.GetI.descr)
ix = IRExpr._from_c(c_expr.Iex.GetI.ix)
bias = c_expr.Iex.GetI.bias
return GetI(descr, ix, bias)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_GetI(IRRegArray._to_c(expr.descr),
IRExpr._to_c(expr.ix),
expr.bias)
def result_type(self, tyenv):
return self.descr.elemTy
class RdTmp(IRExpr):
"""
Read the value held by a temporary.
"""
__slots__ = ['tmp']
tag = 'Iex_RdTmp'
def __init__(self, tmp):
IRExpr.__init__(self)
self.tmp = tmp
def __str__(self):
return "t%d" % self.tmp
@staticmethod
def _from_c(c_expr):
return RdTmp(c_expr.Iex.RdTmp.tmp)
@staticmethod
def _to_c(expr):
return pvc.IRExpr_RdTmp(expr.tmp)
def result_type(self, tyenv):
return tyenv.lookup(self.tmp)
class Get(IRExpr):
"""
Read a guest register, at a fixed offset in the guest state.
"""
__slots__ = ['offset', 'ty']
tag = 'Iex_Get'
def __init__(self, offset, ty):
IRExpr.__init__(self)
self.offset = offset
self.ty = ty
@property
def type(self):
return self.ty
def __str__(self, reg_name=None):
if reg_name:
return "GET:%s(%s)" % (self.ty[4:], reg_name)
else:
return "GET:%s(offset=%s)" % (self.ty[4:], self.offset)
@staticmethod
def _from_c(c_expr):
return Get(c_expr.Iex.Get.offset,
ints_to_enums[c_expr.Iex.Get.ty])
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Get(expr.offset,
enums_to_ints[expr.ty])
def result_type(self, tyenv):
return self.ty
class Qop(IRExpr):
"""
A quaternary operation (4 arguments).
"""
__slots__ = ['op', 'args']
tag = 'Iex_Qop'
def __init__(self, op, args):
IRExpr.__init__(self)
self.op = op
self.args = args
def __str__(self):
return "%s(%s)" % (self.op[4:], ','.join(str(a) for a in self.args))
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [ ])
expressions.extend(self.args)
return expressions
@staticmethod
def _from_c(c_expr):
return Qop(ints_to_enums[c_expr.Iex.Qop.details.op],
[IRExpr._from_c(arg)
for arg in [c_expr.Iex.Qop.details.arg1,
c_expr.Iex.Qop.details.arg2,
c_expr.Iex.Qop.details.arg3,
c_expr.Iex.Qop.details.arg4]])
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Qop(enums_to_ints[expr.op],
*[IRExpr._to_c(arg)
for arg in expr.args])
def result_type(self, tyenv):
return op_type(self.op)
def typecheck(self, tyenv):
resty, (arg1ty, arg2ty, arg3ty, arg4ty) = op_arg_types(self.op)
arg1ty_real = self.args[0].typecheck(tyenv)
arg2ty_real = self.args[1].typecheck(tyenv)
arg3ty_real = self.args[2].typecheck(tyenv)
arg4ty_real = self.args[3].typecheck(tyenv)
if arg1ty_real is None or arg2ty_real is None or arg3ty_real is None or arg4ty_real is None:
return None
if arg1ty_real != arg1ty:
l.debug("First arg of %s must be %s", self.op, arg1ty)
return None
if arg2ty_real != arg2ty:
l.debug("Second arg of %s must be %s", self.op, arg2ty)
return None
if arg3ty_real != arg3ty:
l.debug("Third arg of %s must be %s", self.op, arg3ty)
return None
if arg4ty_real != arg4ty:
l.debug("Fourth arg of %s must be %s", self.op, arg4ty)
return None
return resty
class Triop(IRExpr):
"""
A ternary operation (3 arguments)
"""
__slots__ = ['op', 'args']
tag = 'Iex_Triop'
def __init__(self, op, args):
IRExpr.__init__(self)
self.op = op
self.args = args
def __str__(self):
return "%s(%s)" % (self.op[4:], ','.join(str(a) for a in self.args))
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [ ])
expressions.extend(self.args)
return expressions
@staticmethod
def _from_c(c_expr):
return Triop(ints_to_enums[c_expr.Iex.Triop.details.op],
[IRExpr._from_c(arg)
for arg in [c_expr.Iex.Triop.details.arg1,
c_expr.Iex.Triop.details.arg2,
c_expr.Iex.Triop.details.arg3]])
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Triop(enums_to_ints[expr.op],
*[IRExpr._to_c(arg)
for arg in expr.args])
def result_type(self, tyenv):
return op_type(self.op)
def typecheck(self, tyenv):
resty, (arg1ty, arg2ty, arg3ty) = op_arg_types(self.op)
arg1ty_real = self.args[0].typecheck(tyenv)
arg2ty_real = self.args[1].typecheck(tyenv)
arg3ty_real = self.args[2].typecheck(tyenv)
if arg1ty_real is None or arg2ty_real is None or arg3ty_real is None:
return None
if arg1ty_real != arg1ty:
l.debug("First arg of %s must be %s", self.op, arg1ty)
return None
if arg2ty_real != arg2ty:
l.debug("Second arg of %s must be %s", self.op, arg2ty)
return None
if arg3ty_real != arg3ty:
l.debug("Third arg of %s must be %s", self.op, arg3ty)
return None
return resty
class Binop(IRExpr):
"""
A binary operation (2 arguments).
"""
__slots__ = ['op', 'args']
tag = 'Iex_Binop'
def __init__(self, op, args):
IRExpr.__init__(self)
self.op = op
self.args = args
def __str__(self):
return "%s(%s)" % (self.op[4:], ','.join(str(a) for a in self.args))
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [ ])
expressions.extend(self.args)
return expressions
@staticmethod
def _from_c(c_expr):
return Binop(ints_to_enums[c_expr.Iex.Binop.op],
[IRExpr._from_c(arg)
for arg in [c_expr.Iex.Binop.arg1,
c_expr.Iex.Binop.arg2]])
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Binop(enums_to_ints[expr.op],
*[IRExpr._to_c(arg)
for arg in expr.args])
def result_type(self, tyenv):
return op_type(self.op)
def typecheck(self, tyenv):
resty, (arg1ty, arg2ty) = op_arg_types(self.op)
arg1ty_real = self.args[0].typecheck(tyenv)
arg2ty_real = self.args[1].typecheck(tyenv)
if arg1ty_real is None or arg2ty_real is None:
return None
if arg1ty_real != arg1ty:
l.debug("First arg of %s must be %s", self.op, arg1ty)
return None
if arg2ty_real != arg2ty:
l.debug("Second arg of %s must be %s", self.op, arg2ty)
return None
return resty
class Unop(IRExpr):
"""
A unary operation (1 argument).
"""
__slots__ = ['op', 'args']
tag = 'Iex_Unop'
def __init__(self, op, args):
IRExpr.__init__(self)
self.op = op
self.args = args
def __str__(self):
return "%s(%s)" % (self.op[4:], ','.join(str(a) for a in self.args))
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [ ])
expressions.extend(self.args)
return expressions
@staticmethod
def _from_c(c_expr):
return Unop(ints_to_enums[c_expr.Iex.Unop.op],
[IRExpr._from_c(c_expr.Iex.Unop.arg)])
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Unop(enums_to_ints[expr.op],
IRExpr._to_c(expr.args[0]))
def result_type(self, tyenv):
return op_type(self.op)
def typecheck(self, tyenv):
resty, (arg1ty,) = op_arg_types(self.op)
arg1ty_real = self.args[0].typecheck(tyenv)
if arg1ty_real is None:
return None
if arg1ty_real != arg1ty:
l.debug("First arg of %s must be %s", self.op, arg1ty)
return None
return resty
class Load(IRExpr):
"""
A load from memory.
"""
__slots__ = ['end', 'ty', 'addr']
tag = 'Iex_Load'
def __init__(self, end, ty, addr):
IRExpr.__init__(self)
self.end = end
self.ty = ty
self.addr = addr
@property
def endness(self):
return self.end
@property
def type(self):
return self.ty
def __str__(self):
return "LD%s:%s(%s)" % (self.end[-2:].lower(), self.ty[4:], self.addr)
@staticmethod
def _from_c(c_expr):
return Load(ints_to_enums[c_expr.Iex.Load.end],
ints_to_enums[c_expr.Iex.Load.ty],
IRExpr._from_c(c_expr.Iex.Load.addr))
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Load(enums_to_ints[expr.end],
enums_to_ints[expr.ty],
IRExpr._to_c(expr.addr))
def result_type(self, tyenv):
return self.ty
def typecheck(self, tyenv):
addrty = self.addr.typecheck(tyenv)
if addrty is None:
return None
if addrty != tyenv.wordty:
l.debug("Address must be word-sized")
return None
return self.ty
class Const(IRExpr):
"""
A constant expression.
"""
__slots__ = ['con']
tag = 'Iex_Const'
def __init__(self, con):
IRExpr.__init__(self)
self.con = con
def __str__(self):
return str(self.con)
@staticmethod
def _from_c(c_expr):
return Const(IRConst._from_c(c_expr.Iex.Const.con))
@staticmethod
def _to_c(expr):
return pvc.IRExpr_Const(IRConst._to_c(expr.con))
def result_type(self, tyenv):
return self.con.type
class ITE(IRExpr):
"""
An if-then-else expression.
"""
__slots__ = ['cond', 'iffalse', 'iftrue']
tag = 'Iex_ITE'
def __init__(self, cond, iffalse, iftrue):
IRExpr.__init__(self)
self.cond = cond
self.iffalse = iffalse
self.iftrue = iftrue
def __str__(self):
return "ITE(%s,%s,%s)" % (self.cond, self.iftrue, self.iffalse)
@staticmethod
def _from_c(c_expr):
return ITE(IRExpr._from_c(c_expr.Iex.ITE.cond),
IRExpr._from_c(c_expr.Iex.ITE.iffalse),
IRExpr._from_c(c_expr.Iex.ITE.iftrue))
@staticmethod
def _to_c(expr):
return pvc.IRExpr_ITE(IRExpr._to_c(expr.cond),
IRExpr._to_c(expr.iftrue),
IRExpr._to_c(expr.iffalse))
def result_type(self, tyenv):
return self.iftrue.result_type(tyenv)
def typecheck(self, tyenv):
condty = self.cond.typecheck(tyenv)
falsety = self.iffalse.typecheck(tyenv)
truety = self.iftrue.typecheck(tyenv)
if condty is None or falsety is None or truety is None:
return None
if condty != 'Ity_I1':
l.debug("guard must be Ity_I1")
return None
if falsety != truety:
l.debug("false condition must be same type as true condition")
return None
return falsety
class CCall(IRExpr):
"""
A call to a pure (no side-effects) helper C function.
"""
__slots__ = ['retty', 'cee', 'args']
tag = 'Iex_CCall'
def __init__(self, retty, cee, args):
IRExpr.__init__(self)
self.retty = retty
self.cee = cee
self.args = tuple(args)
@property
def ret_type(self):
return self.retty
@property
def callee(self):
return self.cee
def __str__(self):
return "%s(%s):%s" % (self.cee, ','.join(str(a) for a in self.args), self.retty)
@property
def child_expressions(self):
expressions = sum((a.child_expressions for a in self.args), [ ])
expressions.extend(self.args)
return expressions
@staticmethod
def _from_c(c_expr):
i = 0
args = []
while True:
arg = c_expr.Iex.CCall.args[i]
if arg == ffi.NULL:
break
args.append(IRExpr._from_c(arg))
i += 1
return CCall(ints_to_enums[c_expr.Iex.CCall.retty],
IRCallee._from_c(c_expr.Iex.CCall.cee),
tuple(args))
@staticmethod
def _to_c(expr):
args = [IRExpr._to_c(arg) for arg in expr.args]
return pvc.IRExpr_CCall(IRCallee._to_c(expr.cee),
enums_to_ints[expr.retty],
mkIRExprVec[len(args)](*args))
def result_type(self, tyenv):
return self.retty
_op_type_cache = {}
def op_type(op):
try:
return _op_type_cache[op]
except KeyError:
out_int = ffi.new('IRType *')
unused = ffi.new('IRType *')
pvc.typeOfPrimop(enums_to_ints[op], out_int, unused, unused, unused, unused)
out = ints_to_enums[out_int[0]]
_op_type_cache[op] = out
return out
def op_arg_types(op):
res_ty = ffi.new('IRType *')
arg1_ty = ffi.new('IRType *')
arg2_ty = ffi.new('IRType *')
arg3_ty = ffi.new('IRType *')
arg4_ty = ffi.new('IRType *')
arg2_ty[0] = 0x1100
arg3_ty[0] = 0x1100
arg4_ty[0] = 0x1100
pvc.typeOfPrimop(enums_to_ints[op], res_ty, arg1_ty, arg2_ty, arg3_ty, arg4_ty)
if arg2_ty[0] == 0x1100:
return (ints_to_enums[res_ty[0]],
(ints_to_enums[arg1_ty[0]],))
elif arg3_ty[0] == 0x1100:
return (ints_to_enums[res_ty[0]],
(ints_to_enums[arg1_ty[0]],
ints_to_enums[arg2_ty[0]],))
elif arg4_ty[0] == 0x1100:
return (ints_to_enums[res_ty[0]],
(ints_to_enums[arg1_ty[0]],
ints_to_enums[arg2_ty[0]],
ints_to_enums[arg3_ty[0]],))
else:
return (ints_to_enums[res_ty[0]],
(ints_to_enums[arg1_ty[0]],
ints_to_enums[arg2_ty[0]],
ints_to_enums[arg3_ty[0]],
ints_to_enums[arg4_ty[0]],))
from .const import IRConst
from .enums import IRCallee, IRRegArray, enums_to_ints, ints_to_enums, type_sizes
from .errors import PyVEXError
from . import ffi, pvc
tag_to_class = {
enums_to_ints['Iex_Binder']: Binder,
enums_to_ints['Iex_Get']: Get,
enums_to_ints['Iex_GetI']: GetI,
enums_to_ints['Iex_RdTmp']: RdTmp,
enums_to_ints['Iex_Qop']: Qop,
enums_to_ints['Iex_Triop']: Triop,
enums_to_ints['Iex_Binop']: Binop,
enums_to_ints['Iex_Unop']: Unop,
enums_to_ints['Iex_Load']: Load,
enums_to_ints['Iex_Const']: Const,
enums_to_ints['Iex_ITE']: ITE,
enums_to_ints['Iex_CCall']: CCall,
enums_to_ints['Iex_GSPTR']: GSPTR,
enums_to_ints['Iex_VECRET']: VECRET,
}
mkIRExprVec = [
pvc.mkIRExprVec_0,
pvc.mkIRExprVec_1,
pvc.mkIRExprVec_2,
pvc.mkIRExprVec_3,
pvc.mkIRExprVec_4,
pvc.mkIRExprVec_5,
pvc.mkIRExprVec_6,
pvc.mkIRExprVec_7,
pvc.mkIRExprVec_8
]
|
StarcoderdataPython
|
1616036
|
import yaml
y = yaml.safe_load(open("kustomize/katib-db/base/katib-db-deployment.yaml"))
y["spec"]["template"]["spec"]["containers"][0]["securityContext"]={}
y["spec"]["template"]["spec"]["containers"][0]["securityContext"]["privileged"]=True
y["spec"]["template"]["spec"]["securityContext"]={}
y["spec"]["template"]["spec"]["securityContext"]["seLinuxOptions"]={}
y["spec"]["template"]["spec"]["securityContext"]["seLinuxOptions"]["level"]="s0:c13,c12"
yaml.dump(y, open("kustomize/katib-db/base/katib-db-deployment.yaml",'w'), default_flow_style=False)
|
StarcoderdataPython
|
4829853
|
from datetime import datetime
from typing import Optional
from constance import config
from django.db import transaction
from posthog.async_migrations.definition import AsyncMigrationOperation
from posthog.async_migrations.setup import DEPENDENCY_TO_ASYNC_MIGRATION
from posthog.celery import app
from posthog.constants import AnalyticsDBMS
from posthog.models.async_migration import AsyncMigration, MigrationStatus
def execute_op(op: AsyncMigrationOperation, query_id: str, rollback: bool = False):
"""
sync execute the migration against the analytics db (ClickHouse) and then
run the side effect if it is defined
"""
sql = op.rollback if rollback else op.sql
if op.database == AnalyticsDBMS.CLICKHOUSE:
execute_op_clickhouse(sql, query_id, op.timeout_seconds)
else:
execute_op_postgres(sql, query_id)
def execute_op_clickhouse(sql: str, query_id: str, timeout_seconds: int):
from ee.clickhouse.client import sync_execute
sync_execute(f"/* {query_id} */ " + sql, settings={"max_execution_time": timeout_seconds})
def execute_op_postgres(sql: str, query_id: str):
from django.db import connection
with connection.cursor() as cursor:
cursor.execute(f"/* {query_id} */ " + sql)
def process_error(migration_instance: AsyncMigration, error: Optional[str]):
update_async_migration(
migration_instance=migration_instance,
status=MigrationStatus.Errored,
last_error=error or "",
finished_at=datetime.now(),
)
if getattr(config, "ASYNC_MIGRATIONS_DISABLE_AUTO_ROLLBACK"):
return
from posthog.async_migrations.runner import attempt_migration_rollback
attempt_migration_rollback(migration_instance)
def trigger_migration(migration_instance: AsyncMigration, fresh_start: bool = True):
from posthog.tasks.async_migrations import run_async_migration
task = run_async_migration.delay(migration_instance.name, fresh_start)
update_async_migration(
migration_instance=migration_instance, celery_task_id=str(task.id),
)
def force_stop_migration(migration_instance: AsyncMigration, error: str = "Force stopped by user"):
"""
In theory this is dangerous, as it can cause another task to be lost
`revoke` with `terminate=True` kills the process that's working on the task
and there's no guarantee the task will not already be done by the time this happens.
See: https://docs.celeryproject.org/en/stable/reference/celery.app.control.html#celery.app.control.Control.revoke
However, this is generally ok for us because:
1. Given these are long-running migrations, it is statistically unlikely it will complete during in between
this call and the time the process is killed
2. Our Celery tasks are not essential for the functioning of PostHog, meaning losing a task is not the end of the world
"""
app.control.revoke(migration_instance.celery_task_id, terminate=True)
process_error(migration_instance, error)
def rollback_migration(migration_instance: AsyncMigration, force: bool = False):
from posthog.async_migrations.runner import attempt_migration_rollback
attempt_migration_rollback(migration_instance, force=force)
def complete_migration(migration_instance: AsyncMigration):
update_async_migration(
migration_instance=migration_instance,
status=MigrationStatus.CompletedSuccessfully,
finished_at=datetime.now(),
progress=100,
)
from posthog.async_migrations.runner import run_next_migration
next_migration = DEPENDENCY_TO_ASYNC_MIGRATION.get(migration_instance.name)
if next_migration:
run_next_migration(next_migration)
def mark_async_migration_as_running(migration_instance: AsyncMigration):
update_async_migration(
migration_instance=migration_instance,
last_error="",
current_query_id="",
progress=0,
current_operation_index=0,
status=MigrationStatus.Running,
started_at=datetime.now(),
finished_at=None,
)
def update_async_migration(
migration_instance: AsyncMigration,
last_error: Optional[str] = None,
current_query_id: Optional[str] = None,
celery_task_id: Optional[str] = None,
progress: Optional[int] = None,
current_operation_index: Optional[int] = None,
status: Optional[int] = None,
started_at: Optional[datetime] = None,
finished_at: Optional[datetime] = None,
lock_row: bool = True,
):
def execute_update():
instance = migration_instance
if lock_row:
instance = AsyncMigration.objects.select_for_update().get(pk=migration_instance.pk)
else:
instance.refresh_from_db()
if last_error is not None:
instance.last_error = last_error
if current_query_id is not None:
instance.current_query_id = current_query_id
if celery_task_id is not None:
instance.celery_task_id = celery_task_id
if progress is not None:
instance.progress = progress
if current_operation_index is not None:
instance.current_operation_index = current_operation_index
if status is not None:
instance.status = status
if started_at is not None:
instance.started_at = started_at
if finished_at is not None:
instance.finished_at = finished_at
instance.save()
if lock_row:
with transaction.atomic():
execute_update()
else:
execute_update()
|
StarcoderdataPython
|
1666798
|
<filename>hwt/serializer/hwt/context.py
class ValueWidthRequirementScope():
"""
Context manager which temporarily swaps the _valueWidthRequired on specified context
.. code-block:: python
with ValueWidthRequirementScope(ctx, True):
#...
"""
def __init__(self, ctx, val):
self.ctx = ctx
self.val = val
def __enter__(self):
self.orig = self.ctx._valueWidthRequired
self.ctx._valueWidthRequired = self.val
def __exit__(self, exc_type, exc_val, exc_tb):
self.ctx._valueWidthRequired = self.orig
|
StarcoderdataPython
|
3258439
|
<reponame>rgalhama/wordrep_cmcl2020
import sys, os
import pandas as pd
import numpy as np
from scipy import stats
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.outliers_influence import variance_inflation_factor
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
from configs.config_loader import load_config, opts2record
data_fname="merged_indegree_train_th%.2faoa_wordbank_eng_understands_prop0.5_nouns_clean_means.csv"
header_svd="win;thr;"
header_sgns="dyn;thr"
def write_output(model_type, config, datafile, similarity_threshold, pearsonr, pearson_pval, outputpath):
params_model=load_config(config)
header, record=opts2record(params_model["counts"],params_model[model_type], params_model["post"])
header="model;"+header
record=model_type+";"+record
header+="similarity_threshold;"
record+=similarity_threshold+";"
header+="pearsonr;pearson_pval"
record+="{0:.4f};".format(pearsonr)
record+="{0:.4f}".format(pearson_pval)
outfile=os.path.join(outputpath, "params_corr_"+os.path.basename(datafile))
with open(outfile, "w") as fh:
fh.write(header+"\n")
fh.write(record+"\n")
print("Correlation saved at:\n %s\n"%outfile)
def main(model_type, config, similarity_th, datafile, outputpath):
#Read in data with AoA and computed index
df = pd.read_csv(datafile, sep=";")
X=df["index"]
Y=df["aoa"]
#Read in frequency data and merge
df["logfreq"] = np.log(df.freq)
#Compute stats
pearsonr, pearson_pval = stats.pearsonr(X, Y)
#Write output
write_output(model_type, config, datafile, similarity_th, pearsonr, pearson_pval, outputpath)
if __name__ == "__main__":
args=sys.argv[1:]
if len(args) != 5:
print("Usage: corr_index.py <model_type> <config> <similarity_th> <datafile> <output_path>")
exit(-1)
main(*args)
#svd
#/home/rgalhama/Data_Research/results_wordrep/eng_0_60/svd_thr_100_win_5_dim_500_neg_1/post_eig_0_neg_1/
#0.7
#/home/rgalhama/Data_Research/results_wordrep/eng_0_60/svd_thr_100_win_5_dim_500_neg_1/post_eig_0_neg_1/evaluation_aoa_cdi_0.5_understands_nouns
|
StarcoderdataPython
|
3292146
|
<reponame>Food-X-Technologies/foodx_devops_tools<filename>tests/manual/test_auth.py
# Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
"""Run authentication tests against an actual Azure Cloud instance"""
import json
import logging
import pytest
from foodx_devops_tools.azure.cloud import (
AzureCredentials,
login_service_principal,
)
from foodx_devops_tools.azure.cloud.exceptions import AzureAuthenticationError
log = logging.getLogger(__name__)
@pytest.mark.asyncio
async def test_clean(service_principal_credentials):
await login_service_principal(service_principal_credentials)
@pytest.mark.asyncio
async def test_failed():
MOCK_CREDENTIALS = AzureCredentials(
name="mock_user",
userid="123abc",
secret="verysecret",
subscription="this_subscription",
tenant="this_tenant",
)
with pytest.raises(AzureAuthenticationError):
await login_service_principal(MOCK_CREDENTIALS)
|
StarcoderdataPython
|
1700066
|
<filename>venv/lib/python3.8/site-packages/pip/_vendor/requests/certs.py
/home/runner/.cache/pip/pool/9d/74/55/abd0ed1a6bffd4061bc234eef54ae001c749bf4e59be435e6a82ce6716
|
StarcoderdataPython
|
163024
|
alpha = "abcdefghijklmnopqrstuvwxyz"
key = "<KEY>"
message = input("enter the message : ")
cipher = ""
for i in message:
cipher+=key[alpha.index(i)]
print(cipher)
|
StarcoderdataPython
|
95041
|
<filename>comparison/sciclone/convert_outputs.py
import argparse
import json
import csv
from collections import defaultdict
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
import inputparser
def convert_clusters(scresultsfn, varid_map):
clusters = defaultdict(list)
garbage = []
with open(scresultsfn) as F:
R = csv.DictReader(F, delimiter='\t')
for row in R:
chrom, pos, cluster = row['chr'], int(row['st']), row['cluster']
varid = varid_map['%s_%s' % (chrom, pos)]
if cluster == 'NA':
garbage.append(varid)
else:
cluster = int(cluster)
clusters[cluster].append(varid)
cids = sorted(clusters.keys())
assert set(cids) == set(range(1, len(cids) + 1))
clusters = [clusters[cid] for cid in cids]
return (clusters, garbage)
def build_variant_to_varid_map(variants):
varid_map = {'%s_%s' % (V['chrom'], V['pos']): int(V['id'][1:]) for V in variants.values()}
# Ensure no duplicate entries exist.
assert len(varid_map) == len(variants)
return varid_map
def add_missing_sex_variants_to_garbage(variants, clusters, garbage):
# I run SciClone without sex variants, since I don't know how to specify
# total numbers of locus according to their inputs -- maybe I need to make a
# quasi-CNA covering all of X and Y in males, but I looked into this and
# couldn't figure it out. As such, just mark all sex variants as garbage.
existing = set([V for C in clusters for V in C] + list(garbage))
vids = sorted([int(V[1:]) for V in variants.keys()])
for vid, var in variants.items():
vid = int(vid[1:])
if vid in existing:
continue
assert var['chrom'] in ('X', 'Y')
garbage.append(vid)
def write_results(clusters, garbage, params_fn_orig, params_fn_modified):
params = inputparser.load_params(params_fn_orig)
for K in ('clusters', 'garbage'):
if K in params:
del params[K]
params['clusters'] = clusters
params['garbage'] = garbage
with open(params_fn_modified, 'w') as F:
json.dump(params, F)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('ssm_fn')
parser.add_argument('scresults_fn')
parser.add_argument('params_fn_orig')
parser.add_argument('params_fn_modified')
args = parser.parse_args()
variants = inputparser.load_ssms(args.ssm_fn)
varid_map = build_variant_to_varid_map(variants)
clusters, garbage = convert_clusters(args.scresults_fn, varid_map)
add_missing_sex_variants_to_garbage(variants, clusters, garbage)
write_results(clusters, garbage, args.params_fn_orig, args.params_fn_modified)
main()
|
StarcoderdataPython
|
3322586
|
<gh_stars>100-1000
import gc
import unittest
from comtypes.client import PumpEvents
class PumpEventsTest(unittest.TestCase):
def test_pump_events_doesnt_leak_cycles(self):
gc.collect()
for i in range(3):
PumpEvents(0.05)
ncycles = gc.collect()
self.assertEqual(ncycles, 0)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3379508
|
<reponame>DevTotti/pizzafeed<gh_stars>0
"""from apscheduler.schedulers.blocking import BlockingScheduler
from optimized import *
sched = BlockingScheduler()
@sched.scheduled_job('interval', minutes=30)
def index():
#sc.run_pending()
main()
sched.start()
"""
#clock: python clock.py
|
StarcoderdataPython
|
1648781
|
<reponame>meawoppl/babyfood
# From https://www.digikey.com/Web%20Export/Supplier%20Content/Vishay_8026/PDF/VishayBeyschlag_SolderPad.pdf?redirected=1
from babyfood.pcb.PCBUnits import mil
from babyfood.features.basic import CenteredRectangle, FilledCenteredRectangle
from babyfood.components.ABC import AbstractSMAComponent
resistorsParams = {
"0102": (0.65, 1.10, 1.40, 2.85),
"0204": (1.50, 1.25, 1.75, 4.00),
"0207": (2.80, 2.20, 2.20, 7.20),
"0402": (0.25, 0.60, 0.55, 1.45),
"0603": (0.50, 0.95, 0.95, 2.40),
"0805": (0.65, 1.10, 1.40, 2.85),
"1206": (1.50, 1.25, 1.75, 4.00)}
class SMAResistor(AbstractSMAComponent):
def __init__(self, codeString):
# Names on datasheet ref'ed above
_g, _y, _x, _z = resistorsParams[codeString]
# Names useful to us
self._w = _y
self._h = _x
shift = (_g / 2) + (_y / 2)
self._outline = int(codeString[0:2]) * 0.254, int(codeString[2:4]) * 0.254
print(self._outline)
self._centers = ((-shift, 0),
(+shift, 0))
def draw(self, ctx):
pad = FilledCenteredRectangle(self._w, self._h)
mask = FilledCenteredRectangle(self._w - 0.1, self._h - 0.1)
outline = CenteredRectangle(*self._outline)
ctx.setActiveLayer("overlay")
outline.draw(ctx)
for cp in self._centers:
with ctx.transform.translation(*cp):
ctx.setActiveLayer("copper")
pad.draw(ctx)
ctx.setActiveLayer("mask")
mask.draw(ctx)
|
StarcoderdataPython
|
1680704
|
import os
import torch
import random
import numpy as np
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from PIL import Image
class Dataset():
def __init__(self, train_dir, basic_types = None, shuffle = True, single_channel = False):
self.train_dir = train_dir
self.basic_types = basic_types
self.shuffle = shuffle
self.single_channel = single_channel
def get_loader(self, sz, bs, num_workers = 1):
if(self.single_channel):
dt = {
'input' : transforms.Compose([
transforms.Resize((sz, sz)),
transforms.Grayscale(1),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
]),
'target' : transforms.Compose([
transforms.Resize((sz, sz)),
transforms.Grayscale(1),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
}
else:
dt = {
'input' : transforms.Compose([
transforms.Resize((sz, sz)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]),
'target' : transforms.Compose([
transforms.Resize((sz, sz)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
}
if(self.basic_types == 'Pix2Pix'):
input_transform = dt['input']
target_transform = dt['target']
train_dataset = Pix2Pix_Dataset(self.train_dir[0], self.train_dir[1], input_transform, target_transform)
train_loader = DataLoader(train_dataset, batch_size = bs, shuffle = self.shuffle, num_workers = num_workers)
returns = (train_loader)
elif(self.basic_types == 'CycleGan'):
input_transform = dt['input']
target_transform = dt['target']
train_dataset = CycleGan_Dataset(self.train_dir[0], self.train_dir[1], input_transform, target_transform)
train_loader = DataLoader(train_dataset, batch_size = bs, shuffle = self.shuffle, num_workers = num_workers)
returns = (train_loader)
return returns
class Pix2Pix_Dataset():
def __init__(self, input_dir, target_dir, input_transform, target_transform):
self.input_dir = input_dir
self.target_dir = target_dir
self.input_transform = input_transform
self.target_transform = target_transform
self.image_name_list = []
for file in os.listdir(input_dir):
if(file.endswith('.png') or file.endswith('.jpeg') or file.endswith('.jpg') or file.endswith('.bmp')):
self.image_name_list.append(file)
def __len__(self):
return len(self.image_name_list)
def __getitem__(self, idx):
if(self.target_dir == None):
input_img = Image.open(os.path.join(self.input_dir, self.image_name_list[idx]))
target_img = input_img.copy()
else:
input_img = Image.open(os.path.join(self.input_dir, self.image_name_list[idx]))
target_img = Image.open(os.path.join(self.target_dir, self.image_name_list[idx]))
input_img = self.input_transform(input_img)
target_img = self.target_transform(target_img)
sample = (input_img, target_img)
return sample
class CycleGan_Dataset():
def __init__(self, input_dir, target_dir, input_transform, target_transform):
self.input_dir = input_dir
self.target_dir = target_dir
self.input_transform = input_transform
self.target_transform = target_transform
self.A_image_name_list = []
for file in os.listdir(input_dir):
if(file.endswith('.png') or file.endswith('.jpeg') or file.endswith('.jpg') or file.endswith('.bmp')):
self.A_image_name_list.append(file)
self.B_image_name_list = []
for file in os.listdir(target_dir):
if(file.endswith('.png') or file.endswith('.jpeg') or file.endswith('.jpg') or file.endswith('.bmp')):
self.B_image_name_list.append(file)
def __len__(self):
return len(self.A_image_name_list)
def __getitem__(self, idx):
input_img = Image.open(os.path.join(self.input_dir, self.A_image_name_list[idx]))
target_img = Image.open(os.path.join(self.target_dir, self.B_image_name_list[random.randint(0, len(self.B_image_name_list) - 1)]))
input_img = self.input_transform(input_img)
target_img = self.target_transform(target_img)
sample = (input_img, target_img)
return sample
|
StarcoderdataPython
|
1780603
|
<reponame>alphamatic/amp
import io
import logging
import os
import pandas as pd
import dataflow.core.nodes.sinks as dtfconosin
import helpers.hunit_test as hunitest
_LOG = logging.getLogger(__name__)
class TestWriteDf(hunitest.TestCase):
def test_write(self) -> None:
"""
Round-trip test on df serializing/deserializing.
"""
dir_name = self.get_scratch_space()
df_writer = dtfconosin.WriteDf("df_writer", dir_name)
df = self._get_data()
df_writer.predict(df)["df_out"]
file_name = os.path.join(dir_name, "24199772.pq")
reconstructed_df = pd.read_parquet(file_name)
self.assert_dfs_close(reconstructed_df, df)
def test_pass_through_no_writing(self) -> None:
"""
Ensure that `df` is passed through when no `dir_name` is provided.
"""
df_writer = dtfconosin.WriteDf("df_writer", "")
df = self._get_data()
df_out = df_writer.predict(df)["df_out"]
self.assert_dfs_close(df_out, df)
def test_pass_through(self) -> None:
"""
Ensure that `df` is passed through when `dir_name` is provided.
"""
dir_name = self.get_scratch_space()
df_writer = dtfconosin.WriteDf("df_writer", dir_name)
df = self._get_data()
df_out = df_writer.predict(df)["df_out"]
self.assert_dfs_close(df_out, df)
@staticmethod
def _get_data() -> pd.DataFrame:
txt = """
,close,close,mid,mid
datetime,MN0,MN1,MN0,MN1
2016-01-04 16:00:00,95.00,96.00,100,98.00
2016-01-04 16:01:00,NaN,NaN,NaN,NaN
2016-01-05 09:29:00,NaN,NaN,NaN,NaN
2016-01-05 09:30:00,100.00,NaN,100,NaN
2016-01-05 09:31:00,105.00,98.00,106.05,97.02
2016-01-05 09:32:00,52.50,49.00,53.025,48.51
"""
df = pd.read_csv(
io.StringIO(txt), index_col=0, parse_dates=True, header=[0, 1]
)
return df
class TestWriteCols(hunitest.TestCase):
def test_write(self) -> None:
dir_name = self.get_scratch_space()
mapping = {"close": "price"}
df_writer = dtfconosin.WriteCols("df_writer", dir_name, mapping)
df = self._get_data()
df_writer.predict(df)["df_out"]
file_name = os.path.join(dir_name, "24199772_price.csv")
reconstructed_col = pd.read_csv(file_name, index_col=0)
col = df.iloc[-1]["close"].rename("24199772_price").to_frame()
self.assert_dfs_close(reconstructed_col, col)
def test_pass_through_no_writing(self) -> None:
df_writer = dtfconosin.WriteCols("df_writer", "", {})
df = self._get_data()
df_out = df_writer.predict(df)["df_out"]
self.assert_dfs_close(df_out, df)
def test_pass_through(self) -> None:
dir_name = self.get_scratch_space()
col_mapping = {"mid": "mid_price"}
df_writer = dtfconosin.WriteCols("df_writer", dir_name, col_mapping)
df = self._get_data()
df_out = df_writer.predict(df)["df_out"]
self.assert_dfs_close(df_out, df)
@staticmethod
def _get_data() -> pd.DataFrame:
txt = """
,close,close,mid,mid
datetime,MN0,MN1,MN0,MN1
2016-01-04 16:00:00,95.00,96.00,100,98.00
2016-01-04 16:01:00,NaN,NaN,NaN,NaN
2016-01-05 09:29:00,NaN,NaN,NaN,NaN
2016-01-05 09:30:00,100.00,NaN,100,NaN
2016-01-05 09:31:00,105.00,98.00,106.05,97.02
2016-01-05 09:32:00,52.50,49.00,53.025,48.51
"""
df = pd.read_csv(
io.StringIO(txt), index_col=0, parse_dates=True, header=[0, 1]
)
return df
|
StarcoderdataPython
|
1674523
|
<filename>python/test.py
import time
import numpy
import orjson
def list_vs_array():
resol = 128
lst = [[[3.14 for _ in range(resol)] for _ in range(resol)] for _ in range(resol)]
arr = numpy.full((resol, resol, resol), 3.14, dtype=numpy.float32)
t0 = time.time()
l = orjson.dumps(lst)
t1 = time.time()
dt = (t1 - t0) * 1000
print(f"Dumping list took: {dt}ms")
t0 = time.time()
a = orjson.dumps(arr, option=orjson.OPT_SERIALIZE_NUMPY)
t1 = time.time()
dt = (t1 - t0) * 1000
print(f"Dumping ndarray took: {dt}ms")
return l, a
import sympy
from sympy.physics import hydrogen
# it's 'real' not 'is_real'
r = sympy.Symbol("r", real=True, positive=True)
theta = sympy.Symbol("theta", real=True)
phi = sympy.Symbol("phi", real=True)
# WHO THE FUCK DECIDED THIS FUCTION'S PARAMETER ORDER
p0 = hydrogen.Psi_nlm(2, 1, 0, r, phi, theta)
p1 = hydrogen.Psi_nlm(2, 1, 1, r, phi, theta)
p2 = hydrogen.Psi_nlm(2, 1, -1, r, phi, theta)
def integrate(wf):
abs_sqrd = wf * sympy.conjugate(wf)
jacobi = r * r * sympy.sin(theta)
return sympy.integrate(
abs_sqrd * jacobi,
(r, 0, sympy.oo),
(phi, 0, 2 * sympy.pi),
(theta, 0, sympy.pi),
)
|
StarcoderdataPython
|
4821788
|
<reponame>PratikGarai/Coding-Challenges<filename>Hackerrank/MaximumPalindromes.py
def getRes(counts):
l = 0
o = 0
res = 1
den = 1
for i in counts :
for j in range(1,(i//2)+1):
res = (res*(l+j)//j)
l += i//2
o += i%2
if not o:
o = 1
res = o*res
return res%mod
def main():
s = input()
l = len(s)
mat = [[0 for i in range(l)] for j in range(l)]
for begin in range(l):
counts = [0 for i in range(26)]
mat[begin][begin] = 1
counts[ord(s[begin])-97] += 1
for end in range(begin+1,l):
ind = ord(s[end])-97
counts[ind] += 1
mat[begin][end] = getRes(counts)
t = int(input())
for i in range(t):
l,r = map(int, input().split())
print(mat[l-1][r-1]%mod)
main()
|
StarcoderdataPython
|
3254802
|
<reponame>mdalzell/advent-of-code-2019
from aoc2019.shared.intcode import IntCode
class SpringBot:
def __init__(self, program):
self.__computer = IntCode(program)
def loadSpringScript(self, commands):
asciiValues = []
for command in commands:
for character in command:
asciiValues.append(ord(character))
asciiValues.append(10)
self.__computer.inputs += asciiValues
def run(self):
self.__computer.run()
if self.__computer.output[-1] > 128:
return self.__computer.output[-1]
printString = ""
for output in self.__computer.output:
printString += chr(output)
print(printString)
part1Commands = ['OR A T', 'AND B T', 'AND C T', 'NOT T T', 'AND D T', 'OR T J', 'WALK']
part2Commands = ['OR A T', 'AND B T', 'AND C T', 'NOT T T', 'AND D T', 'OR T J', 'NOT E T', 'NOT T T', 'OR H T', 'AND T J', 'RUN']
|
StarcoderdataPython
|
135142
|
<gh_stars>0
import importlib
import importlib.machinery
import importlib.util
import os
import re
import tempfile
from mako import exceptions
from mako.template import Template
from .exc import CommandError
def template_to_file(template_file, dest, output_encoding, **kw):
template = Template(filename=template_file)
try:
output = template.render_unicode(**kw).encode(output_encoding)
except:
with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as ntf:
ntf.write(
exceptions.text_error_template()
.render_unicode()
.encode(output_encoding)
)
fname = ntf.name
raise CommandError(
"Template rendering failed; see %s for a "
"template-oriented traceback." % fname
)
else:
with open(dest, "wb") as f:
f.write(output)
def coerce_resource_to_filename(fname):
"""Interpret a filename as either a filesystem location or as a package
resource.
Names that are non absolute paths and contain a colon
are interpreted as resources and coerced to a file location.
"""
if not os.path.isabs(fname) and ":" in fname:
import pkg_resources
fname = pkg_resources.resource_filename(*fname.split(":"))
return fname
def pyc_file_from_path(path):
"""Given a python source path, locate the .pyc."""
candidate = importlib.util.cache_from_source(path)
if os.path.exists(candidate):
return candidate
# even for pep3147, fall back to the old way of finding .pyc files,
# to support sourceless operation
filepath, ext = os.path.splitext(path)
for ext in importlib.machinery.BYTECODE_SUFFIXES:
if os.path.exists(filepath + ext):
return filepath + ext
else:
return None
def load_python_file(dir_, filename):
"""Load a file from the given path as a Python module."""
module_id = re.sub(r"\W", "_", filename)
path = os.path.join(dir_, filename)
_, ext = os.path.splitext(filename)
if ext == ".py":
if os.path.exists(path):
module = load_module_py(module_id, path)
else:
pyc_path = pyc_file_from_path(path)
if pyc_path is None:
raise ImportError("Can't find Python file %s" % path)
else:
module = load_module_pyc(module_id, pyc_path)
elif ext in (".pyc", ".pyo"):
module = load_module_pyc(module_id, path)
return module
def load_module_py(module_id, path):
spec = importlib.util.spec_from_file_location(module_id, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def load_module_pyc(module_id, path):
spec = importlib.util.spec_from_file_location(module_id, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
|
StarcoderdataPython
|
3687
|
<reponame>imaroger/sot-talos-balance
'''Test feet admittance control'''
from sot_talos_balance.utils.run_test_utils import run_ft_calibration, run_test, runCommandClient
try:
# Python 2
input = raw_input # noqa
except NameError:
pass
run_test('appli_feet_admittance.py')
run_ft_calibration('robot.ftc')
input("Wait before running the test")
print('Set saturation value')
runCommandClient('robot.admBF_dqSaturation.sin.value = [0.0, 0.0, 0.01, 0.0, 0.0, 0.0]')
input("Wait before dumping the data")
runCommandClient('dump_tracer(robot.tracer)')
|
StarcoderdataPython
|
89482
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `dicom_wsi` package."""
import datetime
import os
from yaml import load, BaseLoader
from ..dicom_wsi.base_attributes import build_base
from ..dicom_wsi.parse_wsi import get_wsi
from ..dicom_wsi.sequence_attributes import build_sequences
from ..dicom_wsi.shared_functional_groups import build_functional_groups
def test_get_wsi():
dir_path = os.path.dirname(os.path.realpath(__file__))
base_yaml = os.path.join(dir_path, "base.yaml")
# Load your YAML file
cfg = load(open(base_yaml), Loader=BaseLoader)
cfg, wsi = get_wsi(cfg)
dict_dicom_returned = {}
dcm, cfg = build_base(cfg, instance=3)
dcm = build_sequences(dcm)
dcm = build_functional_groups(dcm, cfg)
dict_dicom_returned["ContentDate"] = dcm.ContentDate
dict_dicom_returned["SOPInstanceUID"] = dcm.SOPInstanceUID
dict_dicom_returned["SOPClassUID"] = dcm.SOPClassUID
dict_dicom_expected = {"ContentDate": str(datetime.date.today()).replace('-', ''),
"SOPInstanceUID": '1.2.276.0.7230010.3.1.4.0.23267.1577648534.965883',
"SOPClassUID": '1.2.840.10008.5.1.4.1.1.77.1.6'}
assert dict_dicom_returned == dict_dicom_expected
|
StarcoderdataPython
|
4824693
|
import sys
import os
import signal
import json
from distutils.spawn import find_executable
from textwrap import shorten
from psutil import process_iter
from subprocess import Popen, DEVNULL
from time import sleep
from pytg.sender import Sender
from pytg.receiver import Receiver
from pytg.utils import coroutine
from pytg.exceptions import IllegalResponseException
# SETTINGS
## Telegram CLI
TG_CLI = 'telegram-cli'
TG_CLI_PORT = 44134
TG_CLI_EXECUTABLE = find_executable(TG_CLI)
TG_CLI_PID = None
TG_CLI_TIMEOUT = 20
REQUEST_DELAY = 2 # Time to wait before the next request
SAVE_PATH = './messages/'
def init_tg_cli():
try:
p = next(p for p in process_iter()
if p.name() == 'telegram-cli'
and p.cmdline()[
p.cmdline().index('-P') + 1 == str(TG_CLI_PORT)])
except:
print('Running telegram-cli on port {}'.format(str(TG_CLI_PORT)))
p = Popen([TG_CLI_EXECUTABLE, '--json', '-d', '-P', str(TG_CLI_PORT)])
global TG_CLI_PID
TG_CLI_PID = p.pid
receiver = Receiver(host="localhost", port=TG_CLI_PORT)
sender = Sender(host="localhost", port=TG_CLI_PORT)
sender.default_answer_timeout = TG_CLI_TIMEOUT
return receiver, sender
def answer_yn(question=None):
"""Prints a simple yes or no question and returns bool"""
while True:
answer = input(question or 'Are you sure? Y/n').lower()
if answer == '' or answer == 'y' or answer == 'yes':
return True
elif answer == 'n' or answer == 'no':
return False
print('Please enter a valid answer (Y/n)')
def menu(title, menu_items, instructions=None):
"""
Print menu and return chosen menu entry.
It can take a list of strings or a list of dicts as long
as the dicts have a 'text' key for each item.
['one', 'two', 'three']
[{'text': 'one', 'other_key': '...'}, {...}]
"""
separator_len = 64
print(title)
print("=" * separator_len)
if instructions is not None:
print(instructions)
if all(isinstance(item, str) for item in menu_items):
print('\n'.join(['{:>4} - {}'.format(i + 1, item)
for i, item in enumerate(menu_items)]))
elif (all(isinstance(item, dict) for item in menu_items) and
all(['text' in item for item in menu_items])):
print('\n'.join(['{:>4} - {}'.format(i + 1, item['text'])
for i, item in enumerate(menu_items)]))
else:
raise Exception('Invalid menu definition')
print(' 0 - Exit')
print('-' * separator_len)
while True:
try:
option = int(input('Enter the option number: '))
if 0 < option <= len(menu_items):
return option - 1
elif option is 0:
sys.exit()
except ValueError:
pass
print('Please enter a valid option number')
def select_dialog(sender):
"""Ask the user to select which action to perform"""
dialog_list = sender.dialog_list(999)
menu_content = [
'[{}] {}'.format(
dialog['peer_type']
.replace('channel', 'S') # Supergroups
.replace('chat', 'C')
.replace('user', 'U')
.replace('encr_chat', 'E') # Encrypted chat
.replace('geo_chat', 'G'),
shorten(dialog['print_name'], width=48, placeholder='...'))
for dialog in dialog_list]
dialog_number = menu('Select chat', menu_content)
dialog_id = dialog_list[dialog_number]['id']
dialog_name = dialog_list[dialog_number]['print_name']
return dialog_id, dialog_name
def get_full_history(sender, dialog_id):
"""Download the full history for the selected dialog"""
page = 0
limit = 100
history = []
print('Downloading messages...')
while True:
sleep(REQUEST_DELAY)
offset = page * limit
try:
history[0:0] = sender.history(dialog_id, limit, offset)
print('.', end=' ', flush=True)
except IllegalResponseException:
print('\n{} messages found in selected dialog'.format(len(history)))
break
page += 1
print('')
return history
def filter_messages_by_user(history, user):
"""Filter messages sent by the specified user in the provided history"""
own_messages = []
print('Filtering messages for user {}...'.format(user['username']))
for message in history:
if user['id'] == message['from']['id']:
own_messages.insert(0, message)
print('x', end='', flush=True)
else:
print('.', end='', flush=True)
print('')
return own_messages
def write_json_to_file(filename, content):
"""Dumps a dict as JSON to the specified filename"""
print('Writing to {}'.format(filename))
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
f.write(json.dumps(content, indent=2))
print('Done!')
def save_history(save_path, own_messages=False):
"""Download and save the chat history"""
def get_and_save_history(sender):
dialog_id, dialog_name = select_dialog(sender)
history = get_full_history(sender, dialog_id)
filename = '{}.json'.format(dialog_name[:32])
if own_messages:
user = sender.whoami()
history = filter_messages_by_user(history, user)
filename = '{}_own.json'.format(dialog_name[:32])
filename = os.path.join(save_path, filename)
write_json_to_file(filename, history)
return get_and_save_history
def delete_messages(sender):
"""
Delete all user messages
NOTE: Messages will be deleted only for the user. Other users
will still be able to see the messages since telegram-cli does
not have support for completely deleting messages (at least
for now).
"""
dialog_id, dialog_name = select_dialog(sender)
history = get_full_history(sender, dialog_id)
user = sender.whoami()
own_messages = filter_messages_by_user(history, user)
print('The messages you have sent to {} ({}) will be deleted'.format(
dialog_name, dialog_id))
if not answer_yn():
print('Cancelled')
return
print('Deleting messages')
for message in own_messages:
sender.message_delete(message['id'], forEveryone=True)
print('.', end='', flush=True)
def main():
receiver, sender = init_tg_cli()
menu_content = [
{
'text': 'Save full chat history',
'action': save_history(SAVE_PATH)
},
{
'text': 'Save own messages',
'action': save_history(SAVE_PATH, own_messages=True)
},
]
while True:
choice = menu('Select option', menu_content)
menu_content[choice]['action'](sender)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\n\nExit')
# Close telegram-cli
os.kill(TG_CLI_PID, signal.SIGTERM)
|
StarcoderdataPython
|
3205085
|
from rest_framework.test import APITestCase, APIClient
from api.models import Sample, Disease, Mutation, Gene
class SampleTests(APITestCase):
sample_keys = ['sample_id',
'disease',
'mutations',
'gender',
'age_diagnosed']
def setUp(self):
self.gene1 = Gene.objects.create(entrez_gene_id=123456,
symbol='GENE123',
description='foo',
chromosome='1',
gene_type='bar',
synonyms=['foo', 'bar'],
aliases=['foo', 'bar'])
self.disease1 = Disease.objects.create(acronym='BLCA',
name='bladder urothelial carcinoma')
self.sample1 = Sample.objects.create(sample_id='TCGA-22-4593-01',
disease=self.disease1,
gender='female',
age_diagnosed=37)
self.sample2 = Sample.objects.create(sample_id='TCGA-2G-AALW-01',
disease=self.disease1,
gender='male',
age_diagnosed=43)
self.mutation1 = Mutation.objects.create(gene=self.gene1,
sample=self.sample1)
self.mutation2 = Mutation.objects.create(gene=self.gene1,
sample=self.sample2)
def test_list_samples(self):
client = APIClient()
list_response = client.get('/samples')
self.assertEqual(list_response.status_code, 200)
self.assertEqual(list(list_response.data.keys()), ['count',
'next',
'previous',
'results'])
self.assertEqual(len(list_response.data['results']), 2)
self.assertEqual(list(list_response.data['results'][0].keys()), self.sample_keys)
self.assertEqual(list(list_response.data['results'][1].keys()), self.sample_keys)
def test_get_sample(self):
client = APIClient()
get_response = client.get('/samples/' + str(self.sample1.sample_id))
self.assertEqual(get_response.status_code, 200)
self.assertEqual(list(get_response.data.keys()), self.sample_keys)
|
StarcoderdataPython
|
1685749
|
#!/user/bin/env python3
# Note that all the tests in this module require dataset (either network access or cached)
import os
import torch
import torchtext
import json
import hashlib
from torchtext.legacy import data
from parameterized import parameterized
from ..common.torchtext_test_case import TorchtextTestCase
from ..common.parameterized_utils import load_params
from ..common.assets import conditional_remove
from ..common.cache_utils import check_cache_status
def _raw_text_custom_name_func(testcase_func, param_num, param):
info = param.args[0]
name_info = [info['dataset_name'], info['split']]
return "%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name("_".join(name_info))
)
class TestDataset(TorchtextTestCase):
@classmethod
def setUpClass(cls):
check_cache_status()
def _helper_test_func(self, length, target_length, results, target_results):
self.assertEqual(length, target_length)
if isinstance(target_results, list):
target_results = torch.tensor(target_results, dtype=torch.int64)
if isinstance(target_results, tuple):
target_results = tuple(torch.tensor(item, dtype=torch.int64) for item in target_results)
self.assertEqual(results, target_results)
def test_wikitext2_legacy(self):
from torchtext.legacy.datasets import WikiText2
cachedir = os.path.join(self.project_root, ".data", "wikitext-2")
conditional_remove(cachedir)
ds = WikiText2
TEXT = data.Field(lower=True, batch_first=True)
train, valid, test = ds.splits(TEXT)
TEXT.build_vocab(train)
train_iter, valid_iter, test_iter = data.BPTTIterator.splits(
(train, valid, test), batch_size=3, bptt_len=30)
train_iter, valid_iter, test_iter = ds.iters(batch_size=4,
bptt_len=30)
conditional_remove(cachedir)
def test_wikitext2(self):
from torchtext.experimental.datasets import WikiText2
cachedir = os.path.join(self.project_root, ".data", "wikitext-2")
conditional_remove(cachedir)
cachefile = os.path.join(self.project_root, ".data", "wikitext-2-v1.zip")
conditional_remove(cachefile)
train_dataset, valid_dataset, test_dataset = WikiText2()
train_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))
valid_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))
test_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))
self._helper_test_func(len(train_data), 2049990, train_data[20:25],
[5024, 89, 21, 3, 1838])
self._helper_test_func(len(test_data), 241859, test_data[30:35],
[914, 4, 36, 11, 569])
self._helper_test_func(len(valid_data), 214417, valid_data[40:45],
[925, 8, 2, 150, 8575])
vocab = train_dataset.get_vocab()
tokens_ids = [vocab[token] for token in 'the player characters rest'.split()]
self.assertEqual(tokens_ids, [2, 286, 503, 700])
# Add test for the subset of the standard datasets
train_iter, valid_iter, test_iter = torchtext.datasets.WikiText2(split=('train', 'valid', 'test'))
self._helper_test_func(len(train_iter), 36718, next(train_iter), ' \n')
self._helper_test_func(len(valid_iter), 3760, next(valid_iter), ' \n')
self._helper_test_func(len(test_iter), 4358, next(test_iter), ' \n')
del train_iter, valid_iter, test_iter
train_dataset, test_dataset = WikiText2(split=('train', 'test'))
train_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))
test_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))
self._helper_test_func(len(train_data), 2049990, train_data[20:25],
[5024, 89, 21, 3, 1838])
self._helper_test_func(len(test_data), 241859, test_data[30:35],
[914, 4, 36, 11, 569])
conditional_remove(cachedir)
conditional_remove(cachefile)
def test_penntreebank_legacy(self):
from torchtext.legacy.datasets import PennTreebank
# smoke test to ensure penn treebank works properly
TEXT = data.Field(lower=True, batch_first=True)
ds = PennTreebank
train, valid, test = ds.splits(TEXT)
TEXT.build_vocab(train)
train_iter, valid_iter, test_iter = data.BPTTIterator.splits(
(train, valid, test), batch_size=3, bptt_len=30)
train_iter, valid_iter, test_iter = ds.iters(batch_size=4,
bptt_len=30)
def test_penntreebank(self):
from torchtext.experimental.datasets import PennTreebank
# smoke test to ensure penn treebank works properly
train_dataset, valid_dataset, test_dataset = PennTreebank()
train_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))
valid_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, valid_dataset)))
test_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))
self._helper_test_func(len(train_data), 924412, train_data[20:25],
[9919, 9920, 9921, 9922, 9188])
self._helper_test_func(len(test_data), 82114, test_data[30:35],
[397, 93, 4, 16, 7])
self._helper_test_func(len(valid_data), 73339, valid_data[40:45],
[0, 0, 78, 426, 196])
vocab = train_dataset.get_vocab()
tokens_ids = [vocab[token] for token in 'the player characters rest'.split()]
self.assertEqual(tokens_ids, [2, 2550, 3344, 1125])
# Add test for the subset of the standard datasets
train_dataset, test_dataset = PennTreebank(split=('train', 'test'))
train_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, train_dataset)))
test_data = torch.cat(tuple(filter(lambda t: t.numel() > 0, test_dataset)))
self._helper_test_func(len(train_data), 924412, train_data[20:25],
[9919, 9920, 9921, 9922, 9188])
self._helper_test_func(len(test_data), 82114, test_data[30:35],
[397, 93, 4, 16, 7])
train_iter, test_iter = torchtext.datasets.PennTreebank(split=('train', 'test'))
self._helper_test_func(len(train_iter), 42068, next(train_iter)[:15], ' aer banknote b')
self._helper_test_func(len(test_iter), 3761, next(test_iter)[:25], " no it was n't black mond")
del train_iter, test_iter
def test_text_classification(self):
from torchtext.experimental.datasets import AG_NEWS
# smoke test to ensure ag_news dataset works properly
datadir = os.path.join(self.project_root, ".data")
if not os.path.exists(datadir):
os.makedirs(datadir)
train_dataset, test_dataset = AG_NEWS(root=datadir, ngrams=3)
self._helper_test_func(len(train_dataset), 120000, train_dataset[-1][1][:10],
[3525, 319, 4053, 34, 5407, 3607, 70, 6798, 10599, 4053])
self._helper_test_func(len(test_dataset), 7600, test_dataset[-1][1][:10],
[2351, 758, 96, 38581, 2351, 220, 5, 396, 3, 14786])
# Add test for the subset of the standard datasets
train_dataset = AG_NEWS(split='train')
self._helper_test_func(len(train_dataset), 120000, train_dataset[-1][1][:10],
[2155, 223, 2405, 30, 3010, 2204, 54, 3603, 4930, 2405])
def test_raw_ag_news(self):
train_iter, test_iter = torchtext.datasets.AG_NEWS()
self._helper_test_func(len(train_iter), 120000, next(train_iter)[1][:25], 'Wall St. Bears Claw Back ')
self._helper_test_func(len(test_iter), 7600, next(test_iter)[1][:25], 'Fears for T N pension aft')
del train_iter, test_iter
@parameterized.expand(
load_params('raw_datasets.jsonl'),
name_func=_raw_text_custom_name_func)
def test_raw_text_name_property(self, info):
dataset_name = info['dataset_name']
split = info['split']
if dataset_name == "Multi30k" or dataset_name == 'WMT14':
data_iter = torchtext.experimental.datasets.raw.DATASETS[dataset_name](split=split)
else:
data_iter = torchtext.datasets.DATASETS[dataset_name](split=split)
self.assertEqual(str(data_iter), dataset_name)
@parameterized.expand(
load_params('raw_datasets.jsonl'),
name_func=_raw_text_custom_name_func)
def test_raw_text_classification(self, info):
dataset_name = info['dataset_name']
split = info['split']
if dataset_name == "Multi30k" or dataset_name == 'WMT14':
data_iter = torchtext.experimental.datasets.raw.DATASETS[dataset_name](split=split)
else:
data_iter = torchtext.datasets.DATASETS[dataset_name](split=split)
self.assertEqual(len(data_iter), info['NUM_LINES'])
self.assertEqual(hashlib.md5(json.dumps(next(data_iter), sort_keys=True).encode('utf-8')).hexdigest(), info['first_line'])
if dataset_name == "AG_NEWS":
self.assertEqual(torchtext.datasets.URLS[dataset_name][split], info['URL'])
self.assertEqual(torchtext.datasets.MD5[dataset_name][split], info['MD5'])
elif dataset_name == "Multi30k":
self.assertEqual(torchtext.experimental.datasets.raw.URLS[dataset_name][split], info['URL'])
self.assertEqual(torchtext.experimental.datasets.raw.MD5[dataset_name][split], info['MD5'])
elif dataset_name == "WMT14":
self.assertEqual(torchtext.experimental.datasets.raw.URLS[dataset_name], info['URL'])
self.assertEqual(torchtext.experimental.datasets.raw.MD5[dataset_name], info['MD5'])
else:
self.assertEqual(torchtext.datasets.URLS[dataset_name], info['URL'])
self.assertEqual(torchtext.datasets.MD5[dataset_name], info['MD5'])
del data_iter
@parameterized.expand(list(sorted(torchtext.datasets.DATASETS.keys())))
def test_raw_datasets_split_argument(self, dataset_name):
if 'statmt' in torchtext.datasets.URLS[dataset_name]:
return
dataset = torchtext.datasets.DATASETS[dataset_name]
train1 = dataset(split='train')
train2, = dataset(split=('train',))
for d1, d2 in zip(train1, train2):
self.assertEqual(d1, d2)
# This test only aims to exercise the argument parsing and uses
# the first line as a litmus test for correctness.
break
# Exercise default constructor
_ = dataset()
@parameterized.expand(["AG_NEWS", "WikiText2", "IMDB"])
def test_datasets_split_argument(self, dataset_name):
dataset = torchtext.experimental.datasets.DATASETS[dataset_name]
train1 = dataset(split='train')
train2, = dataset(split=('train',))
for d1, d2 in zip(train1, train2):
self.assertEqual(d1, d2)
# This test only aims to exercise the argument parsing and uses
# the first line as a litmus test for correctness.
break
# Exercise default constructor
_ = dataset()
def test_next_method_dataset(self):
train_iter, test_iter = torchtext.datasets.AG_NEWS()
for_count = 0
next_count = 0
for line in train_iter:
for_count += 1
try:
next(train_iter)
next_count += 1
except:
break
self.assertEqual((for_count, next_count), (60000, 60000))
def test_imdb(self):
from torchtext.experimental.datasets import IMDB
from torchtext.vocab import Vocab
# smoke test to ensure imdb works properly
train_dataset, test_dataset = IMDB()
self._helper_test_func(len(train_dataset), 25000, train_dataset[0][1][:10],
[13, 1568, 13, 246, 35468, 43, 64, 398, 1135, 92])
self._helper_test_func(len(test_dataset), 25000, test_dataset[0][1][:10],
[13, 125, 1051, 5, 246, 1652, 8, 277, 66, 20])
# Test API with a vocab input object
old_vocab = train_dataset.get_vocab()
new_vocab = Vocab(counter=old_vocab.freqs, max_size=2500)
new_train_data, new_test_data = IMDB(vocab=new_vocab)
# Add test for the subset of the standard datasets
train_dataset = IMDB(split='train')
self._helper_test_func(len(train_dataset), 25000, train_dataset[0][1][:10],
[13, 1568, 13, 246, 35468, 43, 64, 398, 1135, 92])
train_iter, test_iter = torchtext.datasets.IMDB()
self._helper_test_func(len(train_iter), 25000, next(train_iter)[1][:25], 'I rented I AM CURIOUS-YEL')
self._helper_test_func(len(test_iter), 25000, next(test_iter)[1][:25], 'I love sci-fi and am will')
del train_iter, test_iter
def test_iwslt2017(self):
from torchtext.experimental.datasets import IWSLT2017
train_dataset, valid_dataset, test_dataset = IWSLT2017()
self.assertEqual(len(train_dataset), 206112)
self.assertEqual(len(valid_dataset), 888)
self.assertEqual(len(test_dataset), 1568)
de_vocab, en_vocab = train_dataset.get_vocab()
def assert_nth_pair_is_equal(n, expected_sentence_pair):
de_sentence = [de_vocab.itos[index] for index in train_dataset[n][0]]
en_sentence = [en_vocab.itos[index] for index in train_dataset[n][1]]
expected_de_sentence, expected_en_sentence = expected_sentence_pair
self.assertEqual(de_sentence, expected_de_sentence)
self.assertEqual(en_sentence, expected_en_sentence)
assert_nth_pair_is_equal(0, (['Vielen', 'Dank', ',', 'Chris', '.', '\n'], ['Thank', 'you', 'so', 'much', ',', 'Chris', '.', '\n']))
assert_nth_pair_is_equal(10, (['und', 'wir', 'fuhren', 'selbst', '.', '\n'], ['Driving', 'ourselves', '.', '\n']))
assert_nth_pair_is_equal(20, (['Sie', 'sagte', ':', '"', 'Ja', ',', 'das', 'ist', 'Ex-Vizepräsident', 'Al', 'Gore', 'und', 'seine',
'Frau', 'Tipper', '.', '"', '\n'], ['And', 'she', 'said', '"', 'Yes', ',', 'that', "'s", 'former',
'Vice', 'President', 'Al', 'Gore', 'and', 'his', 'wife', ',', 'Tipper', '.', '"', '\n']))
def test_iwslt2016(self):
from torchtext.experimental.datasets import IWSLT2016
train_dataset, valid_dataset, test_dataset = IWSLT2016()
self.assertEqual(len(train_dataset), 196884)
self.assertEqual(len(valid_dataset), 993)
self.assertEqual(len(test_dataset), 1305)
de_vocab, en_vocab = train_dataset.get_vocab()
def assert_nth_pair_is_equal(n, expected_sentence_pair):
de_sentence = [de_vocab.itos[index] for index in train_dataset[n][0]]
en_sentence = [en_vocab.itos[index] for index in train_dataset[n][1]]
expected_de_sentence, expected_en_sentence = expected_sentence_pair
self.assertEqual(de_sentence, expected_de_sentence)
self.assertEqual(en_sentence, expected_en_sentence)
assert_nth_pair_is_equal(0, (['David', 'Gallo', ':', 'Das', 'ist', 'Bill', 'Lange',
'.', 'Ich', 'bin', 'Dave', 'Gallo', '.', '\n'],
['David', 'Gallo', ':', 'This', 'is', 'Bill', 'Lange',
'.', 'I', "'m", 'Dave', 'Gallo', '.', '\n']))
assert_nth_pair_is_equal(10, (['Die', 'meisten', 'Tiere', 'leben', 'in',
'den', 'Ozeanen', '.', '\n'],
['Most', 'of', 'the', 'animals', 'are', 'in',
'the', 'oceans', '.', '\n']))
assert_nth_pair_is_equal(20, (['Es', 'ist', 'einer', 'meiner', 'Lieblinge', ',', 'weil', 'es',
'alle', 'möglichen', 'Funktionsteile', 'hat', '.', '\n'],
['It', "'s", 'one', 'of', 'my', 'favorites', ',', 'because', 'it', "'s",
'got', 'all', 'sorts', 'of', 'working', 'parts', '.', '\n']))
def test_multi30k(self):
from torchtext.experimental.datasets import Multi30k
# smoke test to ensure multi30k works properly
train_dataset, valid_dataset, test_dataset = Multi30k()
# This change is due to the BC breaking in spacy 3.0
self._helper_test_func(len(train_dataset), 29000, train_dataset[20],
# ([4, 444, 2531, 47, 17480, 7423, 8, 158, 10, 12, 5849, 3, 2],
([4, 444, 2529, 47, 17490, 7422, 8, 158, 10, 12, 5846, 3, 2],
[5, 61, 530, 137, 1494, 10, 9, 280, 6, 2, 3749, 4, 3]))
self._helper_test_func(len(valid_dataset), 1014, valid_dataset[30],
([4, 179, 26, 85, 1005, 57, 19, 154, 3, 2],
[5, 24, 32, 81, 47, 1348, 6, 2, 119, 4, 3]))
# This change is due to the BC breaking in spacy 3.0
self._helper_test_func(len(test_dataset), 1000, test_dataset[40],
# ([4, 26, 6, 12, 3915, 1538, 21, 64, 3, 2],
([4, 26, 6, 12, 3913, 1537, 21, 64, 3, 2],
[5, 32, 20, 2, 747, 345, 1915, 6, 46, 4, 3]))
de_vocab, en_vocab = train_dataset.get_vocab()
de_tokens_ids = [
de_vocab[token] for token in
'Zwei Männer verpacken Donuts in Kunststofffolie'.split()
]
# This change is due to the BC breaking in spacy 3.0
# self.assertEqual(de_tokens_ids, [20, 30, 18705, 4448, 6, 6241])
self.assertEqual(de_tokens_ids, [20, 30, 18714, 4447, 6, 6239])
en_tokens_ids = [
en_vocab[token] for token in
'Two young White males are outside near many bushes'.split()
]
self.assertEqual(en_tokens_ids,
[18, 24, 1168, 807, 16, 56, 83, 335, 1338])
# Add test for the subset of the standard datasets
train_iter, valid_iter = torchtext.experimental.datasets.raw.Multi30k(split=('train', 'valid'))
self._helper_test_func(len(train_iter), 29000, ' '.join(next(train_iter)),
' '.join(['Zwei junge weiße Männer sind im Freien in der Nähe vieler Büsche.\n',
'Two young, White males are outside near many bushes.\n']))
self._helper_test_func(len(valid_iter), 1014, ' '.join(next(valid_iter)),
' '.join(['Eine Gruppe von Männern lädt Baumwolle auf einen Lastwagen\n',
'A group of men are loading cotton onto a truck\n']))
del train_iter, valid_iter
train_dataset = Multi30k(split='train')
# This change is due to the BC breaking in spacy 3.0
self._helper_test_func(len(train_dataset), 29000, train_dataset[20],
# ([4, 444, 2531, 47, 17480, 7423, 8, 158, 10, 12, 5849, 3, 2],
([4, 444, 2529, 47, 17490, 7422, 8, 158, 10, 12, 5846, 3, 2],
[5, 61, 530, 137, 1494, 10, 9, 280, 6, 2, 3749, 4, 3]))
datafile = os.path.join(self.project_root, ".data", "train*")
conditional_remove(datafile)
datafile = os.path.join(self.project_root, ".data", "val*")
conditional_remove(datafile)
datafile = os.path.join(self.project_root, ".data", "test*")
conditional_remove(datafile)
datafile = os.path.join(self.project_root, ".data",
"multi30k_task*.tar.gz")
conditional_remove(datafile)
def test_udpos_sequence_tagging(self):
from torchtext.experimental.datasets import UDPOS
# smoke test to ensure imdb works properly
train_dataset, valid_dataset, test_dataset = UDPOS()
self._helper_test_func(len(train_dataset), 12543, (train_dataset[0][0][:10], train_dataset[0][1][:10],
train_dataset[0][2][:10], train_dataset[-1][0][:10],
train_dataset[-1][1][:10], train_dataset[-1][2][:10]),
([262, 16, 5728, 45, 289, 701, 1160, 4436, 10660, 585],
[8, 3, 8, 3, 9, 2, 4, 8, 8, 8],
[5, 34, 5, 27, 7, 11, 14, 5, 5, 5],
[9, 32, 169, 436, 59, 192, 30, 6, 117, 17],
[5, 10, 11, 4, 11, 11, 3, 12, 11, 4],
[6, 20, 8, 10, 8, 8, 24, 13, 8, 15]))
self._helper_test_func(len(valid_dataset), 2002, (valid_dataset[0][0][:10], valid_dataset[0][1][:10],
valid_dataset[0][2][:10], valid_dataset[-1][0][:10],
valid_dataset[-1][1][:10], valid_dataset[-1][2][:10]),
([746, 3, 10633, 656, 25, 1334, 45],
[6, 7, 8, 4, 7, 2, 3],
[3, 4, 5, 16, 4, 2, 27],
[354, 4, 31, 17, 141, 421, 148, 6, 7, 78],
[11, 3, 5, 4, 9, 2, 2, 12, 7, 11],
[8, 12, 6, 15, 7, 2, 2, 13, 4, 8]))
self._helper_test_func(len(test_dataset), 2077, (test_dataset[0][0][:10], test_dataset[0][1][:10],
test_dataset[0][2][:10], test_dataset[-1][0][:10],
test_dataset[-1][1][:10], test_dataset[-1][2][:10]),
([210, 54, 3115, 0, 12229, 0, 33],
[5, 15, 8, 4, 6, 8, 3],
[30, 3, 5, 14, 3, 5, 9],
[116, 0, 6, 11, 412, 10, 0, 4, 0, 6],
[5, 4, 12, 10, 9, 15, 4, 3, 4, 12],
[6, 16, 13, 16, 7, 3, 19, 12, 19, 13]))
# Assert vocabs
self.assertEqual(len(train_dataset.get_vocabs()), 3)
self.assertEqual(len(train_dataset.get_vocabs()[0]), 19674)
self.assertEqual(len(train_dataset.get_vocabs()[1]), 19)
self.assertEqual(len(train_dataset.get_vocabs()[2]), 52)
# Assert token ids
word_vocab = train_dataset.get_vocabs()[0]
tokens_ids = [word_vocab[token] for token in 'Two of them were being run'.split()]
self.assertEqual(tokens_ids, [1206, 8, 69, 60, 157, 452])
# Add test for the subset of the standard datasets
train_dataset = UDPOS(split='train')
self._helper_test_func(len(train_dataset), 12543, (train_dataset[0][0][:10], train_dataset[-1][2][:10]),
([262, 16, 5728, 45, 289, 701, 1160, 4436, 10660, 585],
[6, 20, 8, 10, 8, 8, 24, 13, 8, 15]))
train_iter, valid_iter = torchtext.datasets.UDPOS(split=('train', 'valid'))
self._helper_test_func(len(train_iter), 12543, ' '.join(next(train_iter)[0][:5]),
' '.join(['Al', '-', 'Zaman', ':', 'American']))
self._helper_test_func(len(valid_iter), 2002, ' '.join(next(valid_iter)[0][:5]),
' '.join(['From', 'the', 'AP', 'comes', 'this']))
del train_iter, valid_iter
def test_conll_sequence_tagging(self):
from torchtext.experimental.datasets import CoNLL2000Chunking
# smoke test to ensure imdb works properly
train_dataset, test_dataset = CoNLL2000Chunking()
self._helper_test_func(len(train_dataset), 8936, (train_dataset[0][0][:10], train_dataset[0][1][:10],
train_dataset[0][2][:10], train_dataset[-1][0][:10],
train_dataset[-1][1][:10], train_dataset[-1][2][:10]),
([11556, 9, 3, 1775, 17, 1164, 177, 6, 212, 317],
[2, 3, 5, 2, 17, 12, 16, 15, 13, 5],
[3, 6, 3, 2, 5, 7, 7, 7, 7, 3],
[85, 17, 59, 6473, 288, 115, 72, 5, 2294, 2502],
[18, 17, 12, 19, 10, 6, 3, 3, 4, 4],
[3, 5, 7, 7, 3, 2, 6, 6, 3, 2]))
self._helper_test_func(len(test_dataset), 2012, (test_dataset[0][0][:10], test_dataset[0][1][:10],
test_dataset[0][2][:10], test_dataset[-1][0][:10],
test_dataset[-1][1][:10], test_dataset[-1][2][:10]),
([0, 294, 73, 10, 13582, 194, 18, 24, 2414, 7],
[4, 4, 4, 23, 4, 2, 11, 18, 11, 5],
[3, 2, 2, 3, 2, 2, 5, 3, 5, 3],
[51, 456, 560, 2, 11, 465, 2, 1413, 36, 60],
[3, 4, 4, 8, 3, 2, 8, 4, 17, 16],
[6, 3, 2, 4, 6, 3, 4, 3, 5, 7]))
# Assert vocabs
self.assertEqual(len(train_dataset.get_vocabs()), 3)
self.assertEqual(len(train_dataset.get_vocabs()[0]), 19124)
self.assertEqual(len(train_dataset.get_vocabs()[1]), 46)
self.assertEqual(len(train_dataset.get_vocabs()[2]), 24)
# Assert token ids
word_vocab = train_dataset.get_vocabs()[0]
tokens_ids = [word_vocab[token] for token in 'Two of them were being run'.split()]
self.assertEqual(tokens_ids, [970, 5, 135, 43, 214, 690])
# Add test for the subset of the standard datasets
train_dataset = CoNLL2000Chunking(split='train')
self._helper_test_func(len(train_dataset), 8936, (train_dataset[0][0][:10], train_dataset[0][1][:10],
train_dataset[0][2][:10], train_dataset[-1][0][:10],
train_dataset[-1][1][:10], train_dataset[-1][2][:10]),
([11556, 9, 3, 1775, 17, 1164, 177, 6, 212, 317],
[2, 3, 5, 2, 17, 12, 16, 15, 13, 5],
[3, 6, 3, 2, 5, 7, 7, 7, 7, 3],
[85, 17, 59, 6473, 288, 115, 72, 5, 2294, 2502],
[18, 17, 12, 19, 10, 6, 3, 3, 4, 4],
[3, 5, 7, 7, 3, 2, 6, 6, 3, 2]))
train_iter, test_iter = torchtext.datasets.CoNLL2000Chunking()
self._helper_test_func(len(train_iter), 8936, ' '.join(next(train_iter)[0][:5]),
' '.join(['Confidence', 'in', 'the', 'pound', 'is']))
self._helper_test_func(len(test_iter), 2012, ' '.join(next(test_iter)[0][:5]),
' '.join(['Rockwell', 'International', 'Corp.', "'s", 'Tulsa']))
del train_iter, test_iter
def test_squad1(self):
from torchtext.experimental.datasets import SQuAD1
from torchtext.vocab import Vocab
# smoke test to ensure imdb works properly
train_dataset, dev_dataset = SQuAD1()
context, question, answers, ans_pos = train_dataset[100]
self._helper_test_func(len(train_dataset), 87599, (question[:5], ans_pos[0]),
([7, 24, 86, 52, 2], [72, 72]))
context, question, answers, ans_pos = dev_dataset[100]
self._helper_test_func(len(dev_dataset), 10570, (question, ans_pos[0]),
([42, 27, 669, 7438, 17, 2, 1950, 3273, 17252, 389, 16], [45, 48]))
# Test API with a vocab input object
old_vocab = train_dataset.get_vocab()
new_vocab = Vocab(counter=old_vocab.freqs, max_size=2500)
new_train_data, new_test_data = SQuAD1(vocab=new_vocab)
# Add test for the subset of the standard datasets
train_dataset = SQuAD1(split='train')
context, question, answers, ans_pos = train_dataset[100]
self._helper_test_func(len(train_dataset), 87599, (question[:5], ans_pos[0]),
([7, 24, 86, 52, 2], [72, 72]))
train_iter, dev_iter = torchtext.datasets.SQuAD1()
self._helper_test_func(len(train_iter), 87599, next(train_iter)[0][:50],
'Architecturally, the school has a Catholic charact')
self._helper_test_func(len(dev_iter), 10570, next(dev_iter)[0][:50],
'Super Bowl 50 was an American football game to det')
del train_iter, dev_iter
def test_squad2(self):
from torchtext.experimental.datasets import SQuAD2
from torchtext.vocab import Vocab
# smoke test to ensure imdb works properly
train_dataset, dev_dataset = SQuAD2()
context, question, answers, ans_pos = train_dataset[200]
self._helper_test_func(len(train_dataset), 130319, (question[:5], ans_pos[0]),
([84, 50, 1421, 12, 5439], [9, 9]))
context, question, answers, ans_pos = dev_dataset[200]
self._helper_test_func(len(dev_dataset), 11873, (question, ans_pos[0]),
([41, 29, 2, 66, 17016, 30, 0, 1955, 16], [40, 46]))
# Test API with a vocab input object
old_vocab = train_dataset.get_vocab()
new_vocab = Vocab(counter=old_vocab.freqs, max_size=2500)
new_train_data, new_test_data = SQuAD2(vocab=new_vocab)
# Add test for the subset of the standard datasets
train_dataset = SQuAD2(split='train')
context, question, answers, ans_pos = train_dataset[200]
self._helper_test_func(len(train_dataset), 130319, (question[:5], ans_pos[0]),
([84, 50, 1421, 12, 5439], [9, 9]))
train_iter, dev_iter = torchtext.datasets.SQuAD2()
self._helper_test_func(len(train_iter), 130319, next(train_iter)[0][:50],
'<NAME> (/biːˈjɒnseɪ/ bee-Y')
self._helper_test_func(len(dev_iter), 11873, next(dev_iter)[0][:50],
'The Normans (Norman: Nourmands; French: Normands; ')
del train_iter, dev_iter
|
StarcoderdataPython
|
4818728
|
<reponame>Carmo-sousa/telegram-bot
"""
All commands of Bot
"""
import logging
from telegram import Update
from telegram.ext import CallbackContext
# Enable logging
logger = logging.getLogger(__name__)
# TODO: Terminar de escrever a pagina de ajuda.
HELP_MESSAGE = """
Bem vindo ao seu gerenciador de usuário e senha!
*Comandos*:
/help Mostra está pagina de ajuda.
/new Salva um novo.
/end Finaliza a conversa.
"""
def start(update: Update, context: CallbackContext):
user = update.message.from_user
text = (
f"Seu ID: {user.id}\n"
"Bem vindo ao seu gerenciador de usuários pessoal!\n"
"Para adicionar um novo usuário e senha digite /new.\n"
"Para mais informações digite /help"
)
logger.info("Gender of %s: %s", user.first_name, update.message.text)
update.message.reply_text(text=text)
def help(update: Update, context: CallbackContext):
user = update.message.from_user
logger.info("Gender of %s: %s", user.first_name, update.message.text)
update.message.reply_text(HELP_MESSAGE)
def job_callback(context: CallbackContext):
job = context.job
context.bot.send_message("325105532", job.context)
|
StarcoderdataPython
|
4805623
|
<reponame>amirhertz/shadowpix
from image_utils import *
from mesh_utils import *
import numpy as np
epsilon = 1e-10
def check_constrains(r, u, v):
constrains = [r <= u[:, :-1] + epsilon, r <= u[:, 1:] + + epsilon, r[: -1, :] <= v[1:, :] + epsilon]
for idx, constrain in enumerate(constrains):
if not np.all(constrain):
print('fail on constrain %d' % (idx + 1))
def local_method(images_paths, image_size, light_angle, output_file):
image_a, image_b, image_c = load_images(image_size, *images_paths)
image_a, image_b, image_c = 1 - image_a, 1 - image_b, 1 - image_c
light_angle *= np.pi / 180
s = 1 / np.tan(light_angle)
u = np.zeros([image_size, image_size + 1])
# side constrains
for i in range(image_size):
u[:, i + 1] = u[:, i] + s * (image_b[:, i] - image_a[:, i])
u += (s * image_a[:, 0])[:, np.newaxis]
# height constrains
images_constrains = s * (-image_a[:image_size - 1, :] + image_a[1:, :] - image_c[1:, :])
u[0, :] -= min(0, np.min(u[0, :]))
for j in range(image_size - 1):
height_constrain = -u[j + 1, :-1] + u[j, :-1] + images_constrains[j, :]
u[j + 1, :] += max(np.max(height_constrain), 0)
r = u[:, : image_size] - s * image_a
v = r + s * image_c
check_constrains(r, u, v)
ds_to_mesh(r, u, v, 0.1, output_file)
print('done')
|
StarcoderdataPython
|
4804290
|
"""Test for TaskContextConnection"""
from unittest.mock import Mock
import pytest
from pynocular.aiopg_transaction import LockedConnection, TaskContextConnection
@pytest.fixture()
def locked_connection():
"""Return a locked connection"""
return LockedConnection(Mock())
@pytest.mark.asyncio()
async def test_task_context_connection_set_clear(locked_connection) -> None:
"""Test that we can set and clear the connection"""
context_conn = TaskContextConnection("key1")
context_conn.set(locked_connection)
test_conn = context_conn.get()
assert test_conn == locked_connection
context_conn.clear()
# No connection should exist now
test_conn = context_conn.get()
assert test_conn is None
@pytest.mark.asyncio()
async def test_task_context_connection_shared(locked_connection) -> None:
"""Test that we can share context across instances"""
context_conn = TaskContextConnection("key1")
context_conn.set(locked_connection)
test_conn = context_conn.get()
assert test_conn == locked_connection
# Create another instance that should share the connection
context_conn2 = TaskContextConnection("key1")
test_conn2 = context_conn2.get()
assert test_conn2 == locked_connection
context_conn.clear()
# No connection should exist on either connection
test_conn = context_conn.get()
assert test_conn is None
test_conn2 = context_conn2.get()
assert test_conn2 is None
|
StarcoderdataPython
|
4837065
|
<reponame>nanbi/Python-software
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.flags import SMALL_SAMPLES, SET_SEED
from selection.tests.instance import gaussian_instance
from selection.tests.decorators import wait_for_return_value, set_seed_iftrue, set_sampling_params_iftrue, register_report
import selection.tests.reports as reports
from selection.randomized.api import randomization, multiple_queries, glm_target, glm_nonparametric_bootstrap
from selection.randomized.glm import resid_bootstrap, fixedX_group_lasso
@register_report(['pvalue', 'active'])
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=10, burnin=10)
@set_seed_iftrue(SET_SEED)
@wait_for_return_value()
def test_fixedX(ndraw=10000, burnin=2000): # nsim needed for decorator
s, n, p = 5, 200, 20
randomizer = randomization.laplace((p,), scale=1.)
X, Y, beta, nonzero, sigma = gaussian_instance(n=n, p=p, s=s, rho=0.1, signal=7)
lam_frac = 1.
lam = lam_frac * np.mean(np.fabs(X.T.dot(np.random.standard_normal((n, 50000)))).max(0)) * sigma
W = np.ones(p) * lam
epsilon = 1. / np.sqrt(n)
penalty = rr.group_lasso(np.arange(p),
weights=dict(zip(np.arange(p), W)), lagrange=1.)
M_est = fixedX_group_lasso(X, Y, epsilon, penalty, randomizer)
mv = multiple_queries([M_est])
mv.solve()
active = M_est.selection_variable['variables']
nactive = active.sum()
if set(nonzero).issubset(np.nonzero(active)[0]) and active.sum() > len(nonzero):
pvalues = []
active_set = np.nonzero(active)[0]
inactive_selected = I = [i for i in np.arange(active_set.shape[0]) if active_set[i] not in nonzero]
active_selected = A = [i for i in np.arange(active_set.shape[0]) if active_set[i] in nonzero]
if not I:
return None
idx = I[0]
boot_target, target_observed = resid_bootstrap(M_est.loss, active)
X_active = X[:,active]
beta_hat = np.linalg.pinv(X_active).dot(Y)
resid_hat = Y - X_active.dot(beta_hat)
form_covariances = glm_nonparametric_bootstrap(n, n)
mv.setup_sampler(form_covariances)
# null saturated
def null_target(Y_star):
result = boot_target(Y_star)
return result[idx]
null_observed = np.zeros(1)
null_observed[0] = target_observed[idx]
target_sampler = mv.setup_target(null_target, null_observed)
test_stat = lambda x: x[0]
pval = target_sampler.hypothesis_test(test_stat, null_observed, burnin=burnin, ndraw=ndraw) # twosided by default
pvalues.append(pval)
# null selected
def null_target(Y_star):
result = boot_target(Y_star)
return np.hstack([result[idx], result[nactive:]])
null_observed = np.zeros_like(null_target(np.random.standard_normal(n)))
null_observed[0] = target_observed[idx]
null_observed[1:] = target_observed[nactive:]
target_sampler = mv.setup_target(null_target, null_observed, target_set=[0])
test_stat = lambda x: x[0]
pval = target_sampler.hypothesis_test(test_stat, null_observed, burnin=burnin, ndraw=ndraw) # twosided by default
pvalues.append(pval)
# true saturated
idx = A[0]
def active_target(Y_star):
result = boot_target(Y_star)
return result[idx]
active_observed = np.zeros(1)
active_observed[0] = target_observed[idx]
sampler = lambda : np.random.choice(n, size=(n,), replace=True)
target_sampler = mv.setup_target(active_target, active_observed)
test_stat = lambda x: x[0]
pval = target_sampler.hypothesis_test(test_stat, active_observed, burnin=burnin, ndraw=ndraw) # twosided by default
pvalues.append(pval)
# true selected
def active_target(Y_star):
result = boot_target(Y_star)
return np.hstack([result[idx], result[nactive:]])
active_observed = np.zeros_like(active_target(np.random.standard_normal(n)))
active_observed[0] = target_observed[idx]
active_observed[1:] = target_observed[nactive:]
target_sampler = mv.setup_target(active_target, active_observed, target_set=[0])
test_stat = lambda x: x[0]
pval = target_sampler.hypothesis_test(test_stat, active_observed, burnin=burnin, ndraw=ndraw) # twosided by default
pvalues.append(pval)
return pvalues, [False, False, True, True]
def report(niter=50, **kwargs):
fixedX_report = reports.reports['test_fixedX']
runs = reports.collect_multiple_runs(fixedX_report['test'],
fixedX_report['columns'],
niter,
reports.summarize_all,
**kwargs)
fig = reports.pvalue_plot(runs)
fig.savefig('fixedX_pivots.pdf') # will have both bootstrap and CLT on plot
|
StarcoderdataPython
|
1670056
|
bootstrap_servers = 'kafka-instance-1-vm:9092'
send_to_kafka = 1
kafka_topic = 'topicz1'
time_delay = 5
number_of_threads = 100
|
StarcoderdataPython
|
61641
|
<reponame>mmcenta/2048gym
from stable_baselines.common.env_checker import check_env
from gym_text2048.envs import Text2048Env, Text2048CappedEnv, Text2048WithHeuristicEnv, Text2048CappedWithHeuristicEnv
if __name__ == "__main__":
envs = [
Text2048Env(),
Text2048CappedEnv(),
Text2048WithHeuristicEnv(),
Text2048CappedWithHeuristicEnv(),
]
for env in envs:
check_env(env)
|
StarcoderdataPython
|
1740476
|
import functools
from math import log
import numpy as np
import tree
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.numpy import SMALL_NUMBER, MIN_LOG_NN_OUTPUT, \
MAX_LOG_NN_OUTPUT
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
from ray.rllib.utils.torch_ops import atanh
from ray.rllib.utils.typing import TensorType, List
torch, nn = try_import_torch()
class TorchDistributionWrapper(ActionDistribution):
"""Wrapper class for torch.distributions."""
@override(ActionDistribution)
def __init__(self, inputs: List[TensorType], model: TorchModelV2):
# If inputs are not a torch Tensor, make them one and make sure they
# are on the correct device.
if not isinstance(inputs, torch.Tensor):
inputs = torch.from_numpy(inputs)
if isinstance(model, TorchModelV2):
inputs = inputs.to(next(model.parameters()).device)
super().__init__(inputs, model)
# Store the last sample here.
self.last_sample = None
@override(ActionDistribution)
def logp(self, actions: TensorType) -> TensorType:
return self.dist.log_prob(actions)
@override(ActionDistribution)
def entropy(self) -> TensorType:
return self.dist.entropy()
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
return torch.distributions.kl.kl_divergence(self.dist, other.dist)
@override(ActionDistribution)
def sample(self) -> TensorType:
self.last_sample = self.dist.sample()
return self.last_sample
@override(ActionDistribution)
def sampled_action_logp(self) -> TensorType:
assert self.last_sample is not None
return self.logp(self.last_sample)
class TorchCategorical(TorchDistributionWrapper):
"""Wrapper class for PyTorch Categorical distribution."""
@override(ActionDistribution)
def __init__(self, inputs, model=None, temperature=1.0):
if temperature != 1.0:
assert temperature > 0.0, \
"Categorical `temperature` must be > 0.0!"
inputs /= temperature
super().__init__(inputs, model)
self.dist = torch.distributions.categorical.Categorical(
logits=self.inputs)
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self.dist.probs.argmax(dim=1)
return self.last_sample
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return action_space.n
class TorchMultiCategorical(TorchDistributionWrapper):
"""MultiCategorical distribution for MultiDiscrete action spaces."""
@override(TorchDistributionWrapper)
def __init__(self, inputs, model, input_lens):
super().__init__(inputs, model)
# If input_lens is np.ndarray or list, force-make it a tuple.
inputs_split = self.inputs.split(tuple(input_lens), dim=1)
self.cats = [
torch.distributions.categorical.Categorical(logits=input_)
for input_ in inputs_split
]
@override(TorchDistributionWrapper)
def sample(self):
arr = [cat.sample() for cat in self.cats]
self.last_sample = torch.stack(arr, dim=1)
return self.last_sample
@override(ActionDistribution)
def deterministic_sample(self):
arr = [torch.argmax(cat.probs, -1) for cat in self.cats]
self.last_sample = torch.stack(arr, dim=1)
return self.last_sample
@override(TorchDistributionWrapper)
def logp(self, actions):
# # If tensor is provided, unstack it into list.
if isinstance(actions, torch.Tensor):
actions = torch.unbind(actions, dim=1)
logps = torch.stack(
[cat.log_prob(act) for cat, act in zip(self.cats, actions)])
return torch.sum(logps, dim=0)
@override(ActionDistribution)
def multi_entropy(self):
return torch.stack([cat.entropy() for cat in self.cats], dim=1)
@override(TorchDistributionWrapper)
def entropy(self):
return torch.sum(self.multi_entropy(), dim=1)
@override(ActionDistribution)
def multi_kl(self, other):
return torch.stack(
[
torch.distributions.kl.kl_divergence(cat, oth_cat)
for cat, oth_cat in zip(self.cats, other.cats)
],
dim=1,
)
@override(TorchDistributionWrapper)
def kl(self, other):
return torch.sum(self.multi_kl(other), dim=1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.sum(action_space.nvec)
class TorchDiagGaussian(TorchDistributionWrapper):
"""Wrapper class for PyTorch Normal distribution."""
@override(ActionDistribution)
def __init__(self, inputs, model):
super().__init__(inputs, model)
mean, log_std = torch.chunk(self.inputs, 2, dim=1)
self.dist = torch.distributions.normal.Normal(mean, torch.exp(log_std))
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self.dist.mean
return self.last_sample
@override(TorchDistributionWrapper)
def logp(self, actions):
return super().logp(actions).sum(-1)
@override(TorchDistributionWrapper)
def entropy(self):
return super().entropy().sum(-1)
@override(TorchDistributionWrapper)
def kl(self, other):
return super().kl(other).sum(-1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class TorchSquashedGaussian(TorchDistributionWrapper):
"""A tanh-squashed Gaussian distribution defined by: mean, std, low, high.
The distribution will never return low or high exactly, but
`low`+SMALL_NUMBER or `high`-SMALL_NUMBER respectively.
"""
def __init__(self, inputs, model, low=-1.0, high=1.0):
"""Parameterizes the distribution via `inputs`.
Args:
low (float): The lowest possible sampling value
(excluding this value).
high (float): The highest possible sampling value
(excluding this value).
"""
super().__init__(inputs, model)
# Split inputs into mean and log(std).
mean, log_std = torch.chunk(self.inputs, 2, dim=-1)
# Clip `scale` values (coming from NN) to reasonable values.
log_std = torch.clamp(log_std, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT)
std = torch.exp(log_std)
self.dist = torch.distributions.normal.Normal(mean, std)
assert np.all(np.less(low, high))
self.low = low
self.high = high
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self._squash(self.dist.mean)
return self.last_sample
@override(TorchDistributionWrapper)
def sample(self):
# Use the reparameterization version of `dist.sample` to allow for
# the results to be backprop'able e.g. in a loss term.
normal_sample = self.dist.rsample()
self.last_sample = self._squash(normal_sample)
return self.last_sample
@override(ActionDistribution)
def logp(self, x):
# Unsquash values (from [low,high] to ]-inf,inf[)
unsquashed_values = self._unsquash(x)
# Get log prob of unsquashed values from our Normal.
log_prob_gaussian = self.dist.log_prob(unsquashed_values)
# For safety reasons, clamp somehow, only then sum up.
log_prob_gaussian = torch.clamp(log_prob_gaussian, -100, 100)
log_prob_gaussian = torch.sum(log_prob_gaussian, dim=-1)
# Get log-prob for squashed Gaussian.
unsquashed_values_tanhd = torch.tanh(unsquashed_values)
log_prob = log_prob_gaussian - torch.sum(
torch.log(1 - unsquashed_values_tanhd**2 + SMALL_NUMBER), dim=-1)
return log_prob
def _squash(self, raw_values):
# Returned values are within [low, high] (including `low` and `high`).
squashed = ((torch.tanh(raw_values) + 1.0) / 2.0) * \
(self.high - self.low) + self.low
return torch.clamp(squashed, self.low, self.high)
def _unsquash(self, values):
normed_values = (values - self.low) / (self.high - self.low) * 2.0 - \
1.0
# Stabilize input to atanh.
save_normed_values = torch.clamp(normed_values, -1.0 + SMALL_NUMBER,
1.0 - SMALL_NUMBER)
unsquashed = atanh(save_normed_values)
return unsquashed
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class TorchBeta(TorchDistributionWrapper):
"""
A Beta distribution is defined on the interval [0, 1] and parameterized by
shape parameters alpha and beta (also called concentration parameters).
PDF(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
with Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
and Gamma(n) = (n - 1)!
"""
def __init__(self, inputs, model, low=0.0, high=1.0):
super().__init__(inputs, model)
# Stabilize input parameters (possibly coming from a linear layer).
self.inputs = torch.clamp(self.inputs, log(SMALL_NUMBER),
-log(SMALL_NUMBER))
self.inputs = torch.log(torch.exp(self.inputs) + 1.0) + 1.0
self.low = low
self.high = high
alpha, beta = torch.chunk(self.inputs, 2, dim=-1)
# Note: concentration0==beta, concentration1=alpha (!)
self.dist = torch.distributions.Beta(
concentration1=alpha, concentration0=beta)
@override(ActionDistribution)
def deterministic_sample(self):
self.last_sample = self._squash(self.dist.mean)
return self.last_sample
@override(TorchDistributionWrapper)
def sample(self):
# Use the reparameterization version of `dist.sample` to allow for
# the results to be backprop'able e.g. in a loss term.
normal_sample = self.dist.rsample()
self.last_sample = self._squash(normal_sample)
return self.last_sample
@override(ActionDistribution)
def logp(self, x):
unsquashed_values = self._unsquash(x)
return torch.sum(self.dist.log_prob(unsquashed_values), dim=-1)
def _squash(self, raw_values):
return raw_values * (self.high - self.low) + self.low
def _unsquash(self, values):
return (values - self.low) / (self.high - self.low)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class TorchDeterministic(TorchDistributionWrapper):
"""Action distribution that returns the input values directly.
This is similar to DiagGaussian with standard deviation zero (thus only
requiring the "mean" values as NN output).
"""
@override(ActionDistribution)
def deterministic_sample(self):
return self.inputs
@override(TorchDistributionWrapper)
def sampled_action_logp(self):
return torch.zeros((self.inputs.size()[0], ), dtype=torch.float32)
@override(TorchDistributionWrapper)
def sample(self):
return self.deterministic_sample()
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape)
class TorchMultiActionDistribution(TorchDistributionWrapper):
"""Action distribution that operates on multiple, possibly nested actions.
"""
def __init__(self, inputs, model, *, child_distributions, input_lens,
action_space):
"""Initializes a TorchMultiActionDistribution object.
Args:
inputs (torch.Tensor): A single tensor of shape [BATCH, size].
model (TorchModelV2): The TorchModelV2 object used to produce
inputs for this distribution.
child_distributions (any[torch.Tensor]): Any struct
that contains the child distribution classes to use to
instantiate the child distributions from `inputs`. This could
be an already flattened list or a struct according to
`action_space`.
input_lens (any[int]): A flat list or a nested struct of input
split lengths used to split `inputs`.
action_space (Union[gym.spaces.Dict,gym.spaces.Tuple]): The complex
and possibly nested action space.
"""
if not isinstance(inputs, torch.Tensor):
inputs = torch.from_numpy(inputs)
if isinstance(model, TorchModelV2):
inputs = inputs.to(next(model.parameters()).device)
super().__init__(inputs, model)
self.action_space_struct = get_base_struct_from_space(action_space)
self.input_lens = tree.flatten(input_lens)
flat_child_distributions = tree.flatten(child_distributions)
split_inputs = torch.split(inputs, self.input_lens, dim=1)
self.flat_child_distributions = tree.map_structure(
lambda dist, input_: dist(input_, model), flat_child_distributions,
list(split_inputs))
@override(ActionDistribution)
def logp(self, x):
if isinstance(x, np.ndarray):
x = torch.Tensor(x)
# Single tensor input (all merged).
if isinstance(x, torch.Tensor):
split_indices = []
for dist in self.flat_child_distributions:
if isinstance(dist, TorchCategorical):
split_indices.append(1)
else:
split_indices.append(dist.sample().size()[1])
split_x = list(torch.split(x, split_indices, dim=1))
# Structured or flattened (by single action component) input.
else:
split_x = tree.flatten(x)
def map_(val, dist):
# Remove extra categorical dimension.
if isinstance(dist, TorchCategorical):
val = torch.squeeze(val, dim=-1).int()
return dist.logp(val)
# Remove extra categorical dimension and take the logp of each
# component.
flat_logps = tree.map_structure(map_, split_x,
self.flat_child_distributions)
return functools.reduce(lambda a, b: a + b, flat_logps)
@override(ActionDistribution)
def kl(self, other):
kl_list = [
d.kl(o) for d, o in zip(self.flat_child_distributions,
other.flat_child_distributions)
]
return functools.reduce(lambda a, b: a + b, kl_list)
@override(ActionDistribution)
def entropy(self):
entropy_list = [d.entropy() for d in self.flat_child_distributions]
return functools.reduce(lambda a, b: a + b, entropy_list)
@override(ActionDistribution)
def sample(self):
child_distributions = tree.unflatten_as(self.action_space_struct,
self.flat_child_distributions)
return tree.map_structure(lambda s: s.sample(), child_distributions)
@override(ActionDistribution)
def deterministic_sample(self):
child_distributions = tree.unflatten_as(self.action_space_struct,
self.flat_child_distributions)
return tree.map_structure(lambda s: s.deterministic_sample(),
child_distributions)
@override(TorchDistributionWrapper)
def sampled_action_logp(self):
p = self.flat_child_distributions[0].sampled_action_logp()
for c in self.flat_child_distributions[1:]:
p += c.sampled_action_logp()
return p
@override(ActionDistribution)
def required_model_output_shape(self, action_space, model_config):
return np.sum(self.input_lens)
|
StarcoderdataPython
|
1655515
|
<filename>recoapis/__init__.py
from .baserecoapi import TrainingException, RecommendationException
from .abacusrecoapi import AbacusRecoApi
from .amazonrecoapi import AmazonRecoApi
from .dummyrecoapi import DummyRecoApi
from .recombeerecoapi import RecombeeRecoApi
from .xmindsrecoapi import XMindsRecoApi
APIS = {
'recombee': RecombeeRecoApi,
'xminds': XMindsRecoApi,
'amazon': AmazonRecoApi,
'abacus': AbacusRecoApi,
'dummy': DummyRecoApi,
}
|
StarcoderdataPython
|
1678215
|
#!/usr/bin/env python
from distutils.version import LooseVersion
from setuptools import setup, find_packages
def get_docker_client_requirement():
DOCKER_PY_REQUIREMENT = 'docker-py >= 1.8.1, < 2'
DOCKER_RRQUIREMENT = 'docker >= 2.0.0, < 3'
docker_client_installed = True
try:
import docker
except ImportError:
docker_client_installed = False
if docker_client_installed and\
LooseVersion(docker.__version__) < LooseVersion('2.0.0'):
return DOCKER_PY_REQUIREMENT
return DOCKER_RRQUIREMENT
def find_requirements(fn):
lines = []
with open(fn) as f:
for line in f:
line = line.strip()
if not line.startswith('#'):
lines.append(line)
return lines
setup(
name='docker-make',
description='build,tag,and push a bunch of related docker images via a single command',
version='1.1.7',
author='jizhilong',
author_email='<EMAIL>',
url='https://github.com/CtripCloud/docker-make',
license='Apache',
keywords=['docker', 'image',' build'],
packages=find_packages(exclude=['tests']),
entry_points={
'console_scripts': [
'docker-make = dmake.cli:main'
]
},
install_requires=find_requirements('requirements.pip') +\
[get_docker_client_requirement()],
tests_require=find_requirements('test-requirements.pip'),
test_suite='nose.collector',
classifiers=[],
)
|
StarcoderdataPython
|
1731373
|
<filename>new_preprocess_and_augment.py
from settings import *
import random
from scipy import ndarray
import cv2 as cv2
import numpy as np
import math
import skimage as sk
from skimage import transform
from skimage import util
from skimage import io
from skimage.transform import SimilarityTransform
import os
import glob
def get_mask(image):
result = image.copy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower = np.array([50, 100, 100])
upper = np.array([140, 255, 255])
mask = cv2.inRange(image, lower, upper)
#find bounds of a symbol and crop it
points = cv2.findNonZero(mask)
x,y,w,h = cv2.boundingRect(points)
crop_img = mask[y:y+h, x:x+w]
result = cv2.bitwise_and(result, result, mask=mask)
return crop_img
def get_resized_image(test_image):
input_width = test_image.shape[1]
input_height = test_image.shape[0]
scale_factor = (OUTPUT_IMAGE_SIZE / max(input_height, input_width))*0.6
needed_width = int(input_width * scale_factor)
needed_height = int(input_height * scale_factor)
dim = (needed_width, needed_height)
height = OUTPUT_IMAGE_SIZE
width = OUTPUT_IMAGE_SIZE
# resize image
test_image = cv2.resize(test_image, dim, interpolation = cv2.INTER_AREA)
blank_image = np.zeros(shape=[height, width, 3], dtype=np.uint8)
blank_image = cv2.cvtColor(blank_image, cv2.COLOR_BGR2GRAY)
x_offset = int((width - test_image.shape[1])/2)
y_offset = int((height - test_image.shape[0])/2)
blank_image[ y_offset:y_offset+test_image.shape[0], x_offset:x_offset+test_image.shape[1]] = test_image
return blank_image
def random_transformation1(img):
tform = transform.SimilarityTransform(scale=1, rotation=math.pi/4, translation=(img.shape[0]/2, -100))
rotated = transform.warp(img, tform)
return rotated
def random_transformation(img):
rows,cols = img.shape
og_pt1 = [rows/6, cols/6]
og_pt2 = [rows/6, cols - cols/6]
og_pt3 = [rows - rows/6, cols/6]
og_pts = [og_pt1, og_pt2, og_pt3]
tf_pts = []
for point in og_pts:
shift1 = random.uniform(-1*rows/9, rows/9)
shift2 = random.uniform(-1*cols/9, cols/9)
tf_pt = [point[0]+shift1, point[1]+shift2]
tf_pts.append(tf_pt)
pts1 = np.float32(og_pts)
pts2 = np.float32(tf_pts)
M = cv2.getAffineTransform(pts1,pts2)
dst = cv2.warpAffine(img,M,(cols,rows), borderValue=(0,0,0))
return dst
def random_dilate(img):
kernel = np.ones((5,5), np.uint8)
eroded = cv2.dilate(img, kernel, iterations=1)
return eroded
def random_erode(img):
kernel = np.ones((5,5), np.uint8)
eroded = cv2.erode(img, kernel, iterations=1)
return eroded
def random_rotation(img):
rows,cols = img.shape
img_center = (cols / 2, rows / 2)
random_degree = random.uniform(-20, 20)
M = cv2.getRotationMatrix2D(img_center, random_degree, 1)
rotated_image = cv2.warpAffine(img, M, (cols, rows), borderValue=(0,0,0))
return rotated_image
# pick a random degree of rotation between 25% on the left and 25% on the right
#random_degree = random.uniform(-25, 25)
#return sk.transform.rotate(image_array, random_degree)
def horizontal_warping(img):
# Horizontal wave
rows, cols = img.shape
img_output = np.zeros(img.shape, dtype=img.dtype)
for i in range(rows):
for j in range(cols):
offset_x = 0
a = float(random.randint(15, 18))
b = random.randint(140, 160)
offset_y = int(a * math.sin(2 * 3.14 * j / b))
if i+offset_y < rows:
img_output[i,j] = img[(i+offset_y)%rows,j]
else:
img_output[i,j] = 0
return img_output
def vertical_warping(img):
# Vertical wave
rows, cols = img.shape
img_output = np.zeros(img.shape, dtype=img.dtype)
for i in range(rows):
for j in range(cols):
a = float(random.randint(22, 27))
b = random.randint(170, 190)
offset_x = int(a * math.sin(2 * 3.14 * i / b))
offset_y = 0
if j+offset_x < rows:
img_output[i,j] = img[i,(j+offset_x)%cols]
else:
img_output[i,j] = 0
return img_output
# dictionary of the transformations functions we defined earlier
available_transformations = {
'transform': random_transformation,
'transform2': random_transformation1,
'erosion':random_erode,
'rotate': random_rotation,
"horizontal_warp": horizontal_warping,
"vertical_warp": vertical_warping,
#'noise': random_noise,
#'horizontal_flip': horizontal_flip
}
def preprocess(image_path):
image = cv2.imread(image_path)
mask = get_mask(image)
preprocessed = get_resized_image(mask)
return preprocessed
def augment_images(path_to_file, num_files_needed):
image_to_transform = cv2.imread(path_to_file)
image_to_transform = cv2.cvtColor(image_to_transform, cv2.COLOR_BGR2GRAY)
num_generated_files = 0
# print(path_to_file)
path_to_folder = os.path.dirname(path_to_file)
num_existing_images = len(glob.glob(os.path.join(path_to_folder, "*")))
# print("num_existing_images = ", num_existing_images)
while num_generated_files <= num_files_needed:
num_transformations = 0
transformed_image = None
num_transformations_to_apply = random.randint(1, len(available_transformations))
while num_transformations <= num_transformations_to_apply:
# choose a random transformation to apply for a single image
key = random.choice(list(available_transformations))
transformed_image = available_transformations[key](image_to_transform)
num_transformations += 1
new_file_path = '%s/augm_%s.png' % (path_to_folder, num_generated_files+num_existing_images)
# write image to the disk
sk.io.imsave(new_file_path, transformed_image, check_contrast=False)
num_generated_files += 1
def get_label_name(image_path):
# some image names have dashes at the beginning of the name, so use loop to get the first nonempty word
file_name = os.path.basename(image_path).split("-")
for element in file_name:
if len(element)>0:
return element
def preprocess_and_save(image_path, label_name, image_full_name):
preprocessed_image = preprocess(image_path)
# create a folder for symbol in train, save to train
os.chdir(PATH_TO_SAVE_TRAIN)
path_in_dir = os.path.join(PATH_TO_SAVE_TRAIN, label_name)
if not os.path.isdir(path_in_dir):
os.mkdir(path_in_dir)
new_file_name = image_full_name
path_for_image = os.path.join(path_in_dir, new_file_name)
cv2.imwrite(path_for_image, preprocessed_image)
# create a folder for symbol in train, save to train
os.chdir(PATH_TO_SAVE_VAL)
path_in_dir = os.path.join(PATH_TO_SAVE_VAL, label_name)
if not os.path.isdir(path_in_dir):
os.mkdir(path_in_dir)
new_file_name = image_full_name
path_for_image = os.path.join(path_in_dir, new_file_name)
cv2.imwrite(path_for_image, preprocessed_image)
"""
def preprocess_augment_and_save(image_path, label_name):
label_name = get_label_name(image_path)
preprocessed_image = preprocess(image_path)
# create a folder for each symbol in train, save and augment to train
os.chdir(PATH_TO_SAVE_TRAIN)
path_in_dir = os.path.join(PATH_TO_SAVE_TRAIN, label_name)
print("path for directory = ", path_in_dir)
if not os.path.isdir(path_in_dir):
os.mkdir(path_in_dir)
new_file_name = label_name + "_out.png"
path_for_image = os.path.join(path_in_dir, new_file_name)
cv2.imwrite(path_for_image, preprocessed_image)
augment_images(path_in_dir, preprocessed_image, NUM_TRAIN_FILES_DESIRED)
# create a folder for each symbol in val, save and augment to val
os.chdir(PATH_TO_SAVE_TRAIN)
path_in_dir = os.path.join(PATH_TO_SAVE_VAL, label_name)
if not os.path.isdir(path_in_dir):
os.mkdir(path_in_dir)
new_file_name = label_name + "_out.png"
path_for_image = os.path.join(path_in_dir, new_file_name)
cv2.imwrite(path_for_image, preprocessed_image)
augment_images(path_in_dir, preprocessed_image, NUM_VAL_FILES_DESIRED)
"""
|
StarcoderdataPython
|
1719127
|
from typing import List
from time import sleep
from models.client import Client
from models.account import Account
accounts: List[Account] = []
def main() -> None:
menu()
def menu() -> None:
print('=============================')
print('============ATM==============')
print('=========Python Bank=========')
print('Select on option:')
print('1 - Create Account')
print('2 - Make a withdrawal')
print('3 - Make a deposit')
print('4 - Make a tranference')
print('5 - List accounts')
print('6 - LogOFF')
option: int = int(input())
if option == 1:
create_account()
elif option == 2:
make_withdraw()
elif option == 3:
make_deposit()
elif option == 4:
make_transference()
elif option == 5:
list_accounts()
elif option == 6:
print('see you!')
sleep(2)
exit(0)
else:
print('Please verify the option that you want')
sleep(2)
menu()
def create_account() -> None:
"""
Creat account and add to the list of all accounts
"""
print('Please enter client data: ')
name: str = input('Name of the client: ')
email: str = input('Email of the client: ')
cpf: str = input('CPF of the client: ')
birth_date: str = input('Birth Date of the client: ')
client = Client(name, email, cpf, birth_date)
new_account: Account = Account(client)
accounts.append(new_account)
print('Account created with successfull!')
print('Your Datas: ')
print('------------')
print(new_account)
sleep(2)
menu()
def make_withdraw() -> None:
if len(accounts) > 0:
number: int = int(input('Inform your Account Number: '))
account: Account = search_account_by_number(number)
if account:
value: float = float(input('Informe the withdraw value: '))
account.withdraw(value)
else:
print("Ops, We don't find this Account Number {number}")
else:
print('oops, please very if there some account register .-.')
sleep(2)
menu()
def make_deposit() -> None:
if len(accounts) > 0:
number: int = int(input('Inform your Account Number: '))
account: Account = search_account_by_number(number)
if account:
value: float = float(input('Inform the deposit value: '))
account.deposit(value)
else:
print("Ops, We don't find this Account Number {number}")
else:
print('ops, there is some account register here?')
sleep(2)
menu()
def make_transference() -> None:
if len(accounts) > 0:
number: int = int(input('Inform your Account Number: '))
account_origin: Account = search_account_by_number(number)
if account_origin:
number_destionation_acc: int = int(
input('What is the destination account? '))
account_dest: Account = search_account_by_number(
number_destionation_acc)
if account_dest:
value: float = float(input('Inform the tranference value: '))
account_origin.tranfer(value, account_dest)
else:
print('Please, verify the destination account!')
else:
print("Ops, We don't find this Account Number {number}")
else:
print('ops, there is some account register here?')
sleep(2)
menu()
def list_accounts() -> None:
if len(accounts) > 0:
print('Listagem de contas')
for acc in accounts:
print(acc)
print('-----------')
sleep(1)
else:
print('ops, there is some account register here?')
sleep(2)
menu()
def search_account_by_number(number: int) -> Account:
c: Account = None
if len(accounts) > 0:
for acc in accounts:
if acc.number == number:
c = acc
return c
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3354865
|
<gh_stars>0
from distutils.core import setup
setup(
name='Flyrc',
version='0.1.1',
author='<NAME>',
author_email='<EMAIL>',
packages=['flyrc'],
url='https://github.com/mrflea/flyrc',
license='LICENSE.txt',
description='Fully-featured IRC client library.',
long_description=open('README.md').read(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Topic :: Communications :: Chat :: Internet Relay Chat",
"Topic :: Software Development :: Libraries :: Python Modules"
],
install_requires=[
'gevent >= 0.13.6'
]
)
|
StarcoderdataPython
|
3203035
|
<reponame>qpit/CVQKDsim
# utilities.py
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
from numba import njit
@njit(fastmath=True, cache=True)
def conditional_probability(k, i, r, a, p, d):
"""
Calculates the conditional probability to be used for the calculation of the a priori probabilities.
:param k: The discretized variable.
:param i: The value of the bin.
:param r: The correlation parameter.
:param a: The discretization cut-off parameter.
:param p: The number of bins exponent.
:param d: The constant-size interval divider.
:return: The conditional probability P(K|X).
"""
if i == 0:
ak = -np.inf
bk = -a + d
elif i == 2 ** p - 1:
ak = -a + (2 ** p - 1) * d
bk = np.inf
else:
ak = -a + i * d
bk = -a + (i + 1) * d
A = (ak - k * r) / np.sqrt(2 * (1 - r ** 2))
B = (bk - k * r) / np.sqrt(2 * (1 - r ** 2))
prob = 0.5 * (math.erf(B) - math.erf(A))
return prob
def q_ary_to_binary(m, q):
"""
Converts a q-ary sequence into a binary sequence of length q.
:param m: The q-ary sequence.
:param q: The Galois field exponent.
:return: The binary representations of the q-ary sequences.
"""
mA_bin = np.empty(len(m) * q, dtype=np.int8) # Binary representation of Alice's q-ary message
for i in range(len(m)):
bitsA = np.binary_repr(m[i], width=q)
for j in range(q):
mA_bin[i * q + j] = bitsA[j]
return mA_bin
|
StarcoderdataPython
|
186488
|
# @copyright@
# Copyright (c) 2006 - 2020 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
DOCUMENTATION = """
module: stacki_storage_controller_info
short_description: Return data about Stacki storage controllers
description:
- If name is supplied, and scope is not global, returns data about a single scoped item
- If name is supplied, and scope is global, then an error is returned
- If name is not supplied, and scope is not global, then all data in that scope is returned
- If name is not supplied, and scope is global, then all global data is returned
options:
name:
description:
- The name of the scoped item to return data about
type: str
required: false
scope:
description:
- The scope to return data about
type: str
required: false
choices: ['global', 'appliance', 'os', 'environment', 'host']
default: global
"""
EXAMPLES = """
- name: Get all global data
stacki_storage_controller_info:
register: results
- name: Get data about backend appliance
stacki_storage_controller_info:
name: backend
scope: appliance
register: results
- name: Get data about all hosts
stacki_storage_controller_info:
scope: host
register: results
"""
RETURN = """
controllers:
description:
- List of storage controllers
returned: on success
type: complex
contains:
appliance:
description:
- Name of the appliance for this data
type: str
returned: scope is appliance
os:
description:
- Name of the os for this data
type: str
returned: scope is os
environment:
description:
- Name of the environment for this data
type: str
returned: scope is environment
host:
description:
- Name of the host for this data
type: str
returned: scope is host
enclosure:
description:
- Enclosure number or None
type: int
adapter:
description:
- Adapter number or None
type: int
slot:
description:
- Slot number as a string, or '*' for all slots
type: str
raidlevel:
description:
- Raid level as a string
type: str
arrayid:
description:
- Array id as a string, or '*' for all arrays
type: str
options:
description:
- Controller options
type: str
source:
description:
- The scope source of the data
type: str
choices: ['G', 'A', 'O', 'E', 'H']
returned: scope is host
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.stacki import run_stack_command, StackCommandError
def main():
# Define the arguments for this module
argument_spec = dict(
name=dict(type="str", required=False, default=None),
scope=dict(
type="str", required=False, default="global",
choices=["global", "appliance", "os", "environment", "host"]
)
)
# Create our module object
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
# Initialize a blank result
result = {
"changed": False,
"controllers": []
}
# Bail if the user is just checking syntax of their playbook
if module.check_mode:
module.exit_json(**result)
# Fetch our info from Stacki
args = ["scope=" + module.params["scope"]]
if module.params["name"]:
args.append(module.params["name"])
try:
for controller in run_stack_command("list.storage.controller", args):
# Make sure slot and arrayid are both strings
controller["slot"] = str(controller["slot"])
controller["arrayid"] = str(controller["arrayid"])
# Add it to the results
result["controllers"].append(controller)
except StackCommandError as e:
# Fetching the data failed
module.fail_json(msg=e.message, **result)
# Return our data
module.exit_json(**result)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3319421
|
<reponame>Ifyokoh/End-to-End-Machine-Learning
from setuptools import setup, find_packages
classifiers = [
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
setup(
name='propertypro-scrapper',
version='2.0',
description='A scraper that helps scrape a housing website propertypro',
long_description=open('README.txt').read(),
url='https://github.com/Ifyokoh/End-to-End-Machine-Learning',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=classifiers,
keywords='scraper',
packages=find_packages()
)
|
StarcoderdataPython
|
86768
|
from opentrons import robot, containers, instruments
robot.head_speed(x=18000, y=18000, z=5000, a=700, b=700)
#Deck setup
tiprack_1000 = containers.load("tiprack-1000ul-H", "B3")
source_row = containers.load("FluidX_24_5ml", "A1", "acid")
source_col = containers.load("FluidX_24_5ml", "A2", "amine")
source_trough4row = containers.load("trough-12row", "C2")
destination_QC = containers.load("96-PCR-flat", "B1", "QC")
destination_screen = containers.load("Labcyte_384PP", "C1", "384_Screen")
trash = containers.load("point", "C3")
#Pipettes SetUp
p1000 = instruments.Pipette(
name= 'eppendorf1000_no_min',
axis='b',
trash_container=trash,
tip_racks=[tiprack_1000],
max_volume=1000,
min_volume=0,
channels=1,
)
row_loc_list = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'B1', 'B2', 'B3', 'B4', 'B5', 'B6']
col_loc_list = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'B1', 'B2']
volume_to_dispense = 20
location_QC_solvent = 'A5'
volume_QC_solvent = 100
volume_to_take_out = 30
# Do protocol
n=0
m=208
for i, x in enumerate(row_loc_list):
source_location = x
p1000.pick_up_tip()
p1000.transfer(volume_to_dispense, source_row.wells(source_location), destination_QC.wells(n).bottom(1), blow_out=True, new_tip = 'never')
p1000.transfer(volume_to_take_out, source_row.wells(source_location), destination_screen.wells(m).bottom(1), blow_out=True, new_tip = 'never')
p1000.drop_tip()
m=m+1
n=n+1
for i, x in enumerate(col_loc_list):
source_location = x
p1000.pick_up_tip()
p1000.transfer(volume_to_dispense, source_col.wells(source_location), destination_QC.wells(n).bottom(1), blow_out=True, new_tip = 'never')
p1000.transfer(volume_to_take_out, source_col.wells(source_location), destination_screen.wells(m).bottom(1), blow_out=True, new_tip = 'never')
p1000.drop_tip()
m=m+1
n=n+1
p1000.distribute(volume_QC_solvent, source_trough4row.wells(location_QC_solvent), [x.top() for x in destination_QC.wells(0, to=n-1)])
robot.home()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.