id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
26681
|
from django import template
register = template.Library()
@register.filter
def has_group(user, name):
return user.groups.filter(name=name).exists()
|
StarcoderdataPython
|
3267177
|
# Generated by Django 2.0.2 on 2018-02-19 14:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0005_auto_20160504_1520'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='deleted',
field=models.BooleanField(db_index=True, default=False),
),
migrations.AlterField(
model_name='notification',
name='emailed',
field=models.BooleanField(db_index=True, default=False),
),
migrations.AlterField(
model_name='notification',
name='public',
field=models.BooleanField(db_index=True, default=True),
),
migrations.AlterField(
model_name='notification',
name='unread',
field=models.BooleanField(db_index=True, default=True),
),
]
|
StarcoderdataPython
|
34073
|
<reponame>asuol/worky
"""
MIT License
Copyright (c) 2020 <NAME> <<EMAIL>.lousa.<EMAIL>ques at gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from flask import Flask, request, redirect
from flask.templating import render_template
from datetime import datetime, timedelta
from worky.models.index_model import IndexModel
from worky.models.completed_model import CompletedModel
from worky.models.create_task_model import CreateTaskModel
from worky.models.update_task_model import UpdateTaskModel
from worky.models.confirm_form_model import ConfirmFormModel
from worky.storage import Storage
from waitress import serve
app = Flask(__name__)
def run(db, host, port):
app.config['STORAGE'] = Storage(db)
serve(app, host=host, port=port)
@app.route('/')
def index():
storage = app.config['STORAGE']
overdue_tasks = storage.get_overdue_tasks()
active_tasks = storage.get_active_tasks()
index_model = IndexModel(active_tasks, overdue_tasks)
return render_template("index.html", model=index_model)
@app.route('/createForm')
def create_form():
default_due_date = datetime.utcnow() + timedelta(weeks=2)
default_due_date = datetime.strftime(default_due_date, '%Y-%m-%d')
create_task_model = CreateTaskModel("/createTask", default_due_date)
return render_template("createTask.html", model=create_task_model)
@app.route('/createTask')
def create_task():
storage = app.config['STORAGE']
description = request.args.get('description')
due_date = request.args.get('dueDate')
storage.create_task(description, due_date)
return redirect("/", code=302)
@app.route('/updateForm')
def update_form():
storage = app.config['STORAGE']
task_id = request.args.get('id')
task = storage.get_task(task_id)
update_task_model = UpdateTaskModel("/updateTask", task.due_date, task)
return render_template("updateTask.html", model=update_task_model)
@app.route('/updateTask')
def update_task():
storage = app.config['STORAGE']
task_id = request.args.get('id')
description = request.args.get('description')
due_date = request.args.get('dueDate')
storage.update_task(task_id, description, due_date)
return redirect("/", code=302)
@app.route('/deleteForm')
def delete_form():
storage = app.config['STORAGE']
task_id = request.args.get('id')
task = storage.get_task(task_id)
action = "Delete"
form_action = "/deleteTask"
confirm_model = ConfirmFormModel(form_action, task.due_date, task, action)
return render_template("confirmForm.html", model=confirm_model)
@app.route('/deleteTask')
def delete_task():
storage = app.config['STORAGE']
task_id = request.args.get('id')
storage.delete_task(task_id)
return redirect("/", code=302)
@app.route('/completeForm')
def complete_form():
storage = app.config['STORAGE']
task_id = request.args.get('id')
task = storage.get_task(task_id)
action = "Complete"
form_action = "/completeTask"
confirm_model = ConfirmFormModel(form_action, task.due_date, task, action)
return render_template("confirmForm.html", model=confirm_model)
@app.route('/completeTask')
def complete_task():
storage = app.config['STORAGE']
task_id = request.args.get('id')
storage.complete_task(task_id)
return redirect("/", code=302)
@app.route('/completed')
def completed():
storage = app.config['STORAGE']
completed_tasks = storage.get_completed_tasks()
completed_model = CompletedModel(completed_tasks)
return render_template("completed.html", model=completed_model)
|
StarcoderdataPython
|
30126
|
<filename>app/__init__.py<gh_stars>1-10
import os
import sys
from instance.config import DATABASE_URI
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_compress import Compress
app = Flask(__name__)
Compress(app)
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from app import bot
from app import web
|
StarcoderdataPython
|
3277041
|
'''For computing flag vector of product in various bases
>>> product_formula(2,3)
array([[[ 1, 12, 30, 34, 120, 21, 120, 180],
[ 1, 15, 39, 44, 159, 26, 159, 240],
[ 1, 18, 45, 48, 180, 27, 180, 270]],
<BLANKLINE>
[[ 1, 16, 40, 44, 160, 26, 160, 240],
[ 1, 20, 52, 57, 212, 32, 212, 320],
[ 1, 24, 60, 62, 240, 33, 240, 360]]])
'''
# For Python2 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__metaclass__ = type
import numpy
from .fibonacci import FIB_WORDS
from .data import P_flag
from .matrices import fib_zeros_array
def word_from_IC(s):
return b''.join(
{'C': b'\x01', 'D': b'\x02'}[c]
for c in s.replace('IC', 'D')
)
def index_from_IC(s):
return FIB_WORDS.index(word_from_IC(s))
def change_product_basis(product_triple, a, b, c):
n_a, n_b, n_c = product_triple.shape
value = numpy.zeros(product_triple.shape, int)
rows = numpy.reshape(product_triple, (n_a * n_b, n_c))
for i in range(n_a):
for j in range(n_b):
# Convolve the columns to get coefficients.
coefficients = [
r * s
# White space is to slow reader - pay attention.
for r in a[ :, i]
for s in b[ :, j]
]
join_ic = sum(
c * r
for (c, r) in zip(coefficients, rows)
)
join_cd = numpy.dot(c, join_ic)
value[i, j, : ] = join_cd
return value
# TODO: Check this - it only looks right.
def product_formula(n, m):
value = fib_zeros_array(n, m, n + m)
for line in P_flag[n + m]:
pieces = line.split()
keys, column = pieces[0], tuple(map(int, pieces[1:]))
keys = keys[2:-1].split(',')
i = index_from_IC(keys[0])[1]
j = index_from_IC(keys[1])[1]
if len(keys[0]) == n:
value[i, j, : ] = column
if len(keys[0]) == m:
value[j, i, : ] = column
return value
|
StarcoderdataPython
|
83704
|
<reponame>ecly/adventofcode2020
import sys
from collections import defaultdict
def parse():
lines = sys.stdin.read().strip().split("\n")
rules_fwd = defaultdict(list)
rules_bwd = defaultdict(list)
for line in lines:
if not line.strip():
continue
bag, rest = line.split(" bags contain ")
for rule in rest.split(", "):
if rule == "no other bags.":
continue
count, adj1, adj2, _ = rule.split()
tgt = f"{adj1} {adj2}"
rules_fwd[tgt].append(bag)
rules_bwd[bag].append((int(count), tgt))
return rules_fwd, rules_bwd
def part1(rules):
seen = set()
queue = rules["shiny gold"]
while queue:
c = queue.pop()
seen.add(c)
for color in rules[c]:
if color not in seen:
queue.append(color)
return len(seen)
def count_bags(color, rules):
count = 0
for cnt, clr in rules[color]:
count += cnt
count += cnt * count_bags(clr, rules)
return count
def part2(rules):
return count_bags("shiny gold", rules)
def main():
rules_fwd, rules_bwd = parse()
print(part1(rules_fwd))
print(part2(rules_bwd))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3341008
|
<filename>profiles/serializers_test.py<gh_stars>10-100
# pylint: disable=unused-argument,too-many-arguments,redefined-outer-name
"""
Tests for serializers for profiles REST APIS
"""
import pytest
import factory
from django.core.files.uploadedfile import SimpleUploadedFile
from rest_framework.exceptions import ValidationError
from profiles.factories import UserWebsiteFactory
from profiles.models import Profile, PERSONAL_SITE_TYPE, FACEBOOK_DOMAIN
from profiles.serializers import (
UserSerializer,
ProfileSerializer,
UserWebsiteSerializer,
)
small_gif = (
b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04"
b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02"
b"\x02\x4c\x01\x00\x3b"
)
def test_serialize_user(user):
"""
Test serializing a user
"""
profile = user.profile
assert UserSerializer(user).data == {
"id": user.id,
"username": user.username,
"profile": {
"name": profile.name,
"image": profile.image,
"image_small": profile.image_small,
"image_medium": profile.image_medium,
"image_file": profile.image_file.url,
"image_small_file": profile.image_small_file.url,
"image_medium_file": profile.image_medium_file.url,
"profile_image_small": profile.image_small_file.url,
"profile_image_medium": profile.image_medium_file.url,
"bio": profile.bio,
"headline": profile.headline,
"username": profile.user.username,
"placename": profile.location["value"],
},
}
def test_serialize_create_user(db, mocker):
"""
Test creating a user
"""
profile = {
"name": "name",
"image": "image",
"image_small": "image_small",
"image_medium": "image_medium",
"email_optin": True,
"toc_optin": True,
"bio": "bio",
"headline": "headline",
"placename": "",
}
get_or_create_auth_tokens_stub = mocker.patch(
"channels.api.get_or_create_auth_tokens"
)
enrollment_job_mock = mocker.patch(
"authentication.api.update_enrollments_for_email.delay"
)
serializer = UserSerializer(data={"email": "test<EMAIL>", "profile": profile})
serializer.is_valid(raise_exception=True)
user = serializer.save()
get_or_create_auth_tokens_stub.assert_called_once_with(user)
enrollment_job_mock.assert_called_once_with(user.email)
del profile["email_optin"] # is write-only
del profile["toc_optin"] # is write-only
profile.update(
{
"image_file": None,
"image_small_file": None,
"image_medium_file": None,
"profile_image_small": "image_small",
"profile_image_medium": "image_medium",
"username": user.username,
}
)
assert UserSerializer(user).data == {
"id": user.id,
"username": user.username,
"profile": profile,
}
@pytest.mark.parametrize(
"key,value",
[
("name", "name_value"),
("image", "image_value"),
("image_small", "image_small_value"),
("image_medium", "image_medium_value"),
("email_optin", True),
("email_optin", False),
("bio", "bio_value"),
("headline", "headline_value"),
("toc_optin", True),
("toc_optin", False),
],
)
def test_update_user_profile(mocker, user, key, value):
"""
Test updating a profile via the UserSerializer
"""
mock_after_profile_created_or_updated = mocker.patch(
"profiles.serializers.after_profile_created_or_updated"
)
profile = user.profile
serializer = UserSerializer(
instance=user, data={"profile": {key: value}}, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
profile2 = Profile.objects.get(user=user)
for prop in (
"name",
"image",
"image_small",
"image_medium",
"email_optin",
"toc_optin",
"bio",
"headline",
):
if prop == key:
if isinstance(value, bool):
assert getattr(profile2, prop) is value
else:
assert getattr(profile2, prop) == value
else:
assert getattr(profile2, prop) == getattr(profile, prop)
mock_after_profile_created_or_updated.assert_called_once_with(profile)
@pytest.mark.parametrize(
"data,is_valid",
[
({}, True),
("notjson", False),
({"bad": "json"}, False),
(None, True),
({"value": "city"}, True),
],
)
def test_location_validation(user, data, is_valid):
"""Test that lcoation validation works correctly"""
serializer = ProfileSerializer(
instance=user.profile, data={"location": data}, partial=True
)
assert serializer.is_valid(raise_exception=False) is is_valid
@pytest.mark.parametrize(
"key,value",
[
("name", "name_value"),
("bio", "bio_value"),
("headline", "headline_value"),
("location", {"value": "Hobbiton, The Shire, Middle-Earth"}),
(
"image_file",
SimpleUploadedFile("small.gif", small_gif, content_type="image/gif"),
),
],
)
def test_update_profile(mocker, user, key, value):
"""
Test updating a profile via the ProfileSerializer
"""
mock_after_profile_created_or_updated = mocker.patch(
"profiles.serializers.after_profile_created_or_updated"
)
profile = user.profile
serializer = ProfileSerializer(
instance=user.profile, data={key: value}, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
profile2 = Profile.objects.first()
for prop in (
"name",
"image_file",
"email_optin",
"toc_optin",
"bio",
"headline",
"location",
):
if prop == key:
if isinstance(value, bool):
assert getattr(profile2, prop) is value
elif key == "image_file":
assert getattr(profile2, prop).read() == small_gif
else:
assert getattr(profile2, prop) == value
else:
assert getattr(profile2, prop) == getattr(profile, prop)
mock_after_profile_created_or_updated.assert_called_once_with(profile)
def test_serialize_profile_websites(user):
"""Tests that the ProfileSerializer includes UserWebsite information when an option is set via the context"""
profile = user.profile
user_websites = UserWebsiteFactory.create_batch(
2,
profile=profile,
site_type=factory.Iterator([PERSONAL_SITE_TYPE, FACEBOOK_DOMAIN]),
)
serialized_profile = ProfileSerializer(
profile, context={"include_user_websites": True}
).data
serialized_sites = UserWebsiteSerializer(user_websites, many=True).data
assert len(serialized_profile["user_websites"]) == 2
# Check that the two lists of OrderedDicts are equivalent
assert sorted(
[list(data.items()) for data in serialized_profile["user_websites"]]
) == sorted([list(data.items()) for data in serialized_sites])
class TestUserWebsiteSerializer:
"""UserWebsiteSerializer tests"""
def test_serialize(self):
"""
Test serializing a user website
"""
user_website = UserWebsiteFactory.build()
assert UserWebsiteSerializer(user_website).data == {
"id": user_website.id,
"url": user_website.url,
"site_type": user_website.site_type,
}
def test_deserialize(self, mocker, user):
"""
Test deserializing a user website
"""
url = "https://example.com"
site_type = "dummy"
patched_get_site_type = mocker.patch(
"profiles.serializers.get_site_type_from_url", return_value=site_type
)
user_website_data = {"username": user.username, "url": url}
serializer = UserWebsiteSerializer(data=user_website_data)
is_valid = serializer.is_valid(raise_exception=True)
assert is_valid is True
assert serializer.validated_data["url"] == url
assert serializer.validated_data["site_type"] == site_type
assert serializer.validated_data["profile"] == user.profile
patched_get_site_type.assert_called_once_with(url)
@pytest.mark.parametrize(
"input_url,exp_result_url",
[("HTtPS://AbC.COM", "https://abc.com"), ("AbC.cOM", "http://abc.com")],
)
def test_user_website_url(self, mocker, user, input_url, exp_result_url):
"""
Test that deserializing a user website url adds a protocol if necessary and forces lowercase.
"""
site_type = "dummy"
mocker.patch(
"profiles.serializers.get_site_type_from_url", return_value=site_type
)
user_website_data = {"username": user.username, "url": input_url}
serializer = UserWebsiteSerializer(data=user_website_data)
is_valid = serializer.is_valid(raise_exception=True)
assert is_valid is True
assert serializer.validated_data["url"] == exp_result_url
def test_site_uniqueness(self, user):
"""
Test that a user can only save one of a specific type of site
"""
UserWebsiteFactory.create(
profile=user.profile, url="facebook.com/1", site_type=FACEBOOK_DOMAIN
)
user_website_data = {"username": user.username, "url": "facebook.com/2"}
serializer = UserWebsiteSerializer(data=user_website_data)
with pytest.raises(
ValidationError, match="A website of this type has already been saved."
):
serializer.is_valid(raise_exception=True)
serializer.save()
|
StarcoderdataPython
|
1694857
|
<gh_stars>0
from flask import Flask
import os
app = Flask(__name__, static_url_path='')
app.config.from_object('config')
# __file__ refers to the file settings.py
APP_ROOT = os.path.dirname(os.path.abspath(__file__)) # refers to application_top
APP_STATIC = os.path.join(APP_ROOT, 'static')
from app.routes import index
|
StarcoderdataPython
|
1710415
|
#!/usr/bin/env python
# Author: <NAME>
# Description: a.k.a. foo.py as posted at https://www.biostars.org/p/95929/
# Solves the problem of orphan reads in a pair that may remain after retaining
# only those that align uniquely (i.e., after filtering using grep -v "XS:i:")
# This script is invoked by samtools_PE_RNAseq_mapping_pipeline.sh
import csv
import sys
# ajt: added line for generating Unix (rather than Windows) text file
csv.register_dialect("unixEOL", delimiter="\t", lineterminator="\n")
f = csv.reader(sys.stdin, dialect="unixEOL")
of = csv.writer(sys.stdout, dialect="unixEOL")
last_read = None
for line in f :
#take care of the header
if(line[0][0] == "@") :
of.writerow(line)
continue
if(last_read == None) :
last_read = line
else :
if(last_read[0] == line[0]) :
of.writerow(last_read)
of.writerow(line)
last_read = None
else :
last_read = line
|
StarcoderdataPython
|
3337280
|
from django.apps import AppConfig
class GallerysConfig(AppConfig):
name = 'gallerys'
|
StarcoderdataPython
|
1716064
|
import pandas as pd
import numpy as np
import aif360.datasets
# generators for biased data by models
def feature_bias(rho_a, rho_z, N, d, mu):
'''
Bias that occurs when different protected attributes have different means (mu)
Parameters
-----------
rho_a : float
p(a = 1)
rho_z : float
p(z = 1)
N : int
number of samples
mu : matrix like, 2xD
mu[0] is the mean for a=0, mu[0][0] is the mean for a=0, z=0,
D = len(mu[0][0]) = number of features
d : int
total number of features
Returns
--------
df : DataFrame
a data frame with N rows and columns: a,y,z, x0:xD
'''
# portion of disadvantaged group
p_a = [1-rho_a, rho_a]
# portion of allocation of target variable
p_z = [1-rho_z, rho_z]
cov = np.eye(d)
a = np.random.choice([0,1], p=p_a, size=N)
z = np.random.choice([0,1], p=p_z, size=N)
y = z
x = [np.random.multivariate_normal(mu[a_i][z_i],cov) for a_i, z_i in zip(a,z)]
x = np.asarray(x)
# concatenate the data and p
data = np.concatenate([np.asarray([a,z,y]).T,x],axis=1)
labels =['a','z','y']
labels.extend(['x'+str(i) for i in range(d)])
df = pd.DataFrame(data=data, columns = labels)
return df
def subspace_bias(rho_a, rho_z, N, d, d_shared, mu):
'''
Bias that occurs when different features are informative for different protected classes
(d not shared)
Parameters
-----------
rho_a : float
p(a = 1)
rho_z : float
p(z=1)
N : int
number of samples
mu : matrix like, 2xD
mu[0] is the mean for z=0, D = len(mu[0]) = number of features
d : int
total number of features
d_shared : int
number of shared features
Returns
--------
df : DataFrame
a data frame with N rows and columns: a,y,z, x0:xD
'''
p_a = [1-rho_a, rho_a]
p_z = [1-rho_z, rho_z]
cov = np.eye(d)
d_noise = d-d_shared # noise dims per row
d_total = d + d_noise # total dims
a = np.random.choice([0,1], p=p_a, size=N)
z = np.random.choice([0,1], p=p_z, size=N)
y = z
labels_protected = np.asarray([a,z,y]).T
x_z = [np.random.multivariate_normal(mu[z_i],cov) for z_i in z]
x_n = np.random.multivariate_normal([0]*d_noise,np.eye(d_noise),N)
# functions for combining noise and true vectors
x_a = {0: lambda x,n: np.concatenate((x,n)),
1: lambda x,n: np.concatenate((n, x[d_shared-1:d], x[:d_noise]))}
x = [x_a[a](x_zi,x_ni) for a,x_zi,x_ni in zip(a,x_z,x_n)]
x = np.asarray(x)
# concatenate the data and p
data = np.concatenate([labels_protected,x],axis=1)
labels =['a','z','y']
labels.extend(['x'+str(i) for i in range(d_total)])
df = pd.DataFrame(data=data, columns = labels)
return df
def label_bias(rho_a, rho_z, beta, N, d, mu):
'''
Bias where the labeling errors are correlated with the protected attribute
Parameters
-----------
rho_a : float
p(a = 1)
rho_z : float
p(z=1)
beta : float
error rate in y, p(y=z) = 1-beta
N : int
number of samples
d : int
number of features
mu : matrix like, 2xD
mu[0] is the mean for z=0, D = len(mu[0]) = number of features
Returns
--------
df : DataFrame
a data frame with N rows and columns: a,y,z, x0:xD
'''
p_a = [1-rho_a, rho_a]
p_z = [1-rho_z, rho_z]
cov = np.eye(d)
a = np.random.choice([0,1], p=p_a, size=N)
z = np.random.choice([0,1], p=p_z, size=N)
x = [np.random.multivariate_normal(mu[z_i],cov) for z_i in z]
y = [np.random.choice([zi,1-zi],p=[1-beta[ai], beta[ai]]) for ai,zi in zip(a,z)]
# convert to numpy arrays and reshape
labels_protected = np.asarray([a,z,y]).T
x = np.asarray(x)
# concatenate the data and p
data = np.concatenate([labels_protected,x],axis=1)
labels =['a','z','y']
labels.extend(['x'+str(i) for i in range(len(mu[0]))])
df = pd.DataFrame(data=data, columns = labels)
return df
def convert_to_dataset(df, label_names, protected_attribute_names):
'''
Converts a dataframe created by one of the above functions into a dataset usable in IBM 360 package
Parameters
-----------
df : pandas dataframe
label_names : optional, a list of strings describing each label
protected_attribute_names : optional, a list of strings describing features corresponding to
protected attributes
Returns
--------
aif360.datasets.BinaryLabelDataset
'''
return aif360.datasets.BinaryLabelDataset(1.0, 0.0, df = df, label_names = label_names, protected_attribute_names = protected_attribute_names)
|
StarcoderdataPython
|
11481
|
<gh_stars>1-10
import unittest
from ....providers.aws.interactive import requires_replacement
def generate_resource_change(replacement=True):
resource_change = {
"Action": "Modify",
"Details": [],
"LogicalResourceId": "Fake",
"PhysicalResourceId": "arn:aws:fake",
"Replacement": "True" if replacement else "False",
"ResourceType": "AWS::Fake",
"Scope": ["Properties"],
}
return {
"ResourceChange": resource_change,
"Type": "Resource",
}
class TestInteractiveProvider(unittest.TestCase):
def test_requires_replacement(self):
changeset = [
generate_resource_change(),
generate_resource_change(replacement=False),
generate_resource_change(),
]
replacement = requires_replacement(changeset)
self.assertEqual(len(replacement), 2)
for resource in replacement:
self.assertEqual(resource["ResourceChange"]["Replacement"], "True")
|
StarcoderdataPython
|
3281330
|
<gh_stars>0
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.chart
from enum import IntFlag
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.chart import ChartDataCaption as ChartDataCaption
if hasattr(ChartDataCaption, '_constants') and isinstance(ChartDataCaption._constants, dict):
ChartDataCaption._constants['__ooo_ns__'] = 'com.sun.star.chart'
ChartDataCaption._constants['__ooo_full_ns__'] = 'com.sun.star.chart.ChartDataCaption'
ChartDataCaption._constants['__ooo_type_name__'] = 'const'
def build_enum():
global ChartDataCaptionEnum
ls = [f for f in dir(ChartDataCaption) if not callable(getattr(ChartDataCaption, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(ChartDataCaption, name)
ChartDataCaptionEnum = IntFlag('ChartDataCaptionEnum', _dict)
build_enum()
else:
from ...lo.chart.chart_data_caption import ChartDataCaption as ChartDataCaption
class ChartDataCaptionEnum(IntFlag):
"""
Enum of Const Class ChartDataCaption
These values specify how the captions of data points are displayed.
**since**
LibreOffice 7.1
"""
NONE = ChartDataCaption.NONE
"""
No captions are displayed.
"""
VALUE = ChartDataCaption.VALUE
"""
The caption contains the value of the data point in the number format of the axis that is attached to the respective data series.
"""
PERCENT = ChartDataCaption.PERCENT
"""
The caption contains the value of the data point in percent of all data points of one category.
That means, if a data point is the first one of a series, the percentage is calculated by using the first data points of all available series.
"""
TEXT = ChartDataCaption.TEXT
"""
The caption contains the category name of the category to which a data point belongs.
"""
FORMAT = ChartDataCaption.FORMAT
"""
The number formatter is always used for displaying the value as value.
So this setting is deprecated.
"""
SYMBOL = ChartDataCaption.SYMBOL
"""
The symbol of data column/row is additionally displayed in the caption.
"""
CUSTOM = ChartDataCaption.CUSTOM
"""
The caption contains a custom text, which belongs to a data point label.
**since**
LibreOffice 7.1
"""
DATA_SERIES = ChartDataCaption.DATA_SERIES
"""
The name of the data series is additionally displayed in the caption.
**since**
LibreOffice 7.2
"""
__all__ = ['ChartDataCaption', 'ChartDataCaptionEnum']
|
StarcoderdataPython
|
3336654
|
<filename>ansibleplaybookgrapher/__init__.py
__version__ = "0.9.1"
__prog__ = "ansible-playbook-grapher"
|
StarcoderdataPython
|
1724971
|
x:[int] = None
x = []
x[0] = 4
|
StarcoderdataPython
|
3397664
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Core module for pypcurve
"""
from .pypcurve import PCurve
__all__ = ["PCurve"]
__version__ = PCurve.__version__
|
StarcoderdataPython
|
3265910
|
# -*- coding: utf-8 -*-
"""! Test OPM Archive library.
@author zer0
@date 2015-12-16
"""
import unittest
import os
import __main__ as main
import libopm.archive as Archive
ARCHIVE_NAME = 'test_archive'
CURRENT_SCRIPT = os.path.basename(main.__file__)
class TestArchive(unittest.TestCase):
@classmethod
def setUpClass(cls):
Archive.compressArchive(ARCHIVE_NAME, [CURRENT_SCRIPT])
@classmethod
def tearDownClass(cls):
if os.path.exists(ARCHIVE_NAME):
os.remove(ARCHIVE_NAME)
def testDefault(self):
self.assertTrue(os.path.exists(ARCHIVE_NAME))
def testReadFileListWithArchive(self):
filelist = Archive.readFileListWithArchive(ARCHIVE_NAME)
self.assertEquals(len(filelist), 1)
self.assertEquals(filelist[0].name, CURRENT_SCRIPT)
def testReadStringWithArchive(self):
content = Archive.readStringWithArchive(ARCHIVE_NAME,
CURRENT_SCRIPT)
self.assertGreater(len(content), 1)
self.assertTrue(content.find(ARCHIVE_NAME))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1718803
|
from django.urls import path
from django.views.generic import TemplateView
from . import views
from . import views_v1_1
from . import views_v1_2
urlpatterns = [
path('', TemplateView.as_view(template_name='index.html'), name='index'),
path('data', views.rest_get, name='rest_get'),
path('v1.1/data', views_v1_1.rest_get, name='rest_get_v1_1'),
path('v1.1/data/<int:id_distribuidor>', views_v1_1.rest_get_single, name='rest_get_single_v1_1'),
path('v1.2/data', views_v1_2.rest_get, name='rest_get_v1_2'),
path('v1.2/data/<int:id_distribuidor>', views_v1_2.rest_get_single, name='rest_get_single_v1_2'),
path('v1.2/data/<int:id_distribuidor>', views_v1_2.rest_get_single, name='rest_get_single_v1_2'),
path('formulario/', TemplateView.as_view(template_name='index.html'), name='formulario'),
path('formulario/data', views_v1_2.rest_post_distribuidor_potencial, name='rest_post_distribuidor_potencial_v1_2')
]
|
StarcoderdataPython
|
1630237
|
import os
import asyncio
if os.name == 'nt':
from hotplug.windows_notifier import WindowsNotifier as Notifier
else:
from hotplug.linux_notifier import LinuxNotifier as Notifier
__instance = None
def get_notifier():
global __instance
if __instance is not None:
return __instance
__instance = Notifier()
asyncio.create_task(__instance.start())
return __instance
def __getattr__(name):
global __instance
if __instance is None:
__instance = get_notifier()
if hasattr(__instance, name):
return getattr(__instance, name)
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
StarcoderdataPython
|
3225648
|
__version__ = "3.0.2"
from .formatter import ConllFormatter
from .utils import init_parser
|
StarcoderdataPython
|
4839326
|
import os
import pytest
from . import run
from .conditions import has_http
def test_unit(cmake, unittest):
cwd = cmake(
["sentry_test_unit"], {"SENTRY_BACKEND": "none", "SENTRY_TRANSPORT": "none"}
)
env = dict(os.environ)
run(cwd, "sentry_test_unit", ["--no-summary", unittest], check=True, env=env)
@pytest.mark.skipif(not has_http, reason="tests need http transport")
def test_unit_transport(cmake, unittest):
cwd = cmake(["sentry_test_unit"], {"SENTRY_BACKEND": "none"})
env = dict(os.environ)
run(cwd, "sentry_test_unit", ["--no-summary", unittest], check=True, env=env)
|
StarcoderdataPython
|
1765007
|
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.utils import floor, flt, today, cint, cstr
@frappe.whitelist()
def calculate_deductible_hours(self,method):
deductible_hours = 0
if self.status == "Present":
if flt(self.working_hours) < 7.5 and flt(self.working_hours) > 5:
self.ind_deductible_hours = 7.5 - self.working_hours
@frappe.whitelist()
def validate_approved_hours(self,method):
if self.ind_approved_hours>self.ind_deductible_hours:
frappe.throw(_("Approved hours cannot be more than deducted hours"))
@frappe.whitelist()
def calculate_deductible_hours_salary_slip(self,method):
attendance = frappe.db.sql("""SELECT sum(ind_deductible_hours) AS 'ind_deductible_hours',
sum(ind_approved_hours) AS 'ind_approved_hours'
FROM `tabAttendance`
WHERE employee=%s
AND attendance_date BETWEEN %s AND %s""",(self.employee,self.start_date,self.end_date),as_dict=1)
if len(attendance) >= 1:
self.ind_hours = attendance[0].ind_deductible_hours
self.ind_approved_hours = attendance[0].ind_approved_hours
# if (self.ind_approved_hours<=self.ind_hours):
# else :
# self.ind_approved_hours=self.ind_hours
|
StarcoderdataPython
|
166572
|
# Shoot!
# by KidsCanCode 2014
# A generic space shooter - prototype (no art)
# For educational purposes only
import pygame
import sys
import random
# define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
BGCOLOR = BLACK
class Meteor(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
# start with a random speed
self.speed = random.randrange(3, 12)
self.image = pygame.Surface([36, 54])
self.image.fill(RED)
self.rect = self.image.get_rect()
# start off the top of the screen
self.rect.y = random.randrange(-50, -30)
self.rect.x = random.randrange(WIDTH)
def update(self):
# move the sprite
self.rect.y += self.speed
if self.rect.y > HEIGHT + 10:
self.rect.y = random.randrange(-50, 30)
self.rect.x = random.randrange(WIDTH)
class Player(pygame.sprite.Sprite):
speed = 12
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.speed_x = 0
self.level = 0
self.score = 0
self.shoot_sound = pygame.mixer.Sound("snd/laser4.wav")
self.hit_snd = pygame.mixer.Sound("snd/explode.wav")
self.explode_snd = pygame.mixer.Sound("snd/die.wav")
self.image = pygame.Surface([36, 54])
self.image.fill(GREEN)
self.rect = self.image.get_rect()
self.rect.x = WIDTH / 2
self.rect.bottom = HEIGHT - 10
def update(self):
# move the sprite
self.rect.x += self.speed_x
# check for edges
if self.rect.left < 0:
self.rect.left = 0
self.speed_x = 0
if self.rect.right > WIDTH:
self.rect.right = WIDTH
self.speed_x = 0
def go(self, dir):
if dir == 'L':
self.speed_x = -self.speed
elif dir == 'R':
self.speed_x = self.speed
def stop(self):
self.speed_x = 0
def shoot(self):
bullet = Bullet(self.rect.midtop, self.rect.y, self.level)
active_sprite_list.add(bullet)
bullet_sprite_list.add(bullet)
self.shoot_sound.play()
class Bullet(pygame.sprite.Sprite):
speed = -15
def __init__(self, x, y, level):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([10, 20])
self.image.fill(RED)
self.rect = self.image.get_rect()
self.rect.midtop = x
self.rect.y = y
def update(self):
self.rect.y += self.speed
if self.rect.bottom < 0:
self.kill()
def draw_text(text, size, x, y):
# utility function to draw text on screen
font_name = pygame.font.match_font('arial')
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
screen.blit(text_surface, text_rect)
def show_start_screen():
# Display the starting screen
screen.fill(BGCOLOR)
draw_text("Shoot!", 72, WIDTH/2, HEIGHT/4)
draw_text("Move with the arrow keys", 24, WIDTH/2, HEIGHT/2)
draw_text("Shoot the meteors", 24, WIDTH/2, HEIGHT*5/8)
draw_text("Press a key to begin", 24, WIDTH/2, HEIGHT*3/4)
pygame.display.update()
# wait for a keypress to start
wait_for_key()
while True:
if wait_for_key():
pygame.event.get()
return
def show_go_screen(score):
# display the Game Over screen
screen.fill(BGCOLOR)
draw_text("GAME OVER", 58, WIDTH/2, HEIGHT/4)
text = "Score: %s" % score
draw_text(text, 24, WIDTH/2, HEIGHT/2)
draw_text("Press a key to begin", 24, WIDTH/2, HEIGHT*3/4)
pygame.display.update()
# pause for a moment and then wait for key
pygame.time.wait(500)
wait_for_key()
while True:
if wait_for_key():
pygame.event.get()
return
def wait_for_key():
# utility function to pause waiting for a keypress
# still allow Esc to exit
# Actually, we look for KEYUP event, not KEYPRESS
if len(pygame.event.get(pygame.QUIT)) > 0:
pygame.quit()
sys.exit()
keyup_events = pygame.event.get(pygame.KEYUP)
if len(keyup_events) == 0:
return None
if keyup_events[0].key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
return keyup_events[0].key
# basic constants for your game options
WIDTH = 360
HEIGHT = 480
FPS = 30
# initialize pygame
pygame.init()
# initialize sound - remove if you're not using sound
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Shoot!")
clock = pygame.time.Clock()
running = True
show_start_screen()
while True:
active_sprite_list = pygame.sprite.Group()
meteor_sprite_list = pygame.sprite.Group()
bullet_sprite_list = pygame.sprite.Group()
player = Player()
active_sprite_list.add(player)
for i in range(10):
meteor = Meteor()
active_sprite_list.add(meteor)
meteor_sprite_list.add(meteor)
# play the game!
while running:
clock.tick(FPS)
# check for all your events
for event in pygame.event.get():
# this one checks for the window being closed
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# now check for keypresses
elif event.type == pygame.KEYDOWN:
# this one quits if the player presses Esc
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == pygame.K_LEFT:
player.go('L')
if event.key == pygame.K_RIGHT:
player.go('R')
if event.key == pygame.K_SPACE:
player.shoot()
elif event.type == pygame.KEYUP:
if event.key in (pygame.K_LEFT, pygame.K_RIGHT):
player.stop()
# add any other key events here
# Game logic goes here #########
active_sprite_list.update()
# check for collisions
# first, ship with meteors
hit = pygame.sprite.spritecollideany(player, meteor_sprite_list)
if hit:
# you die
player.explode_snd.play()
pygame.time.wait(500)
running = False
# next, check bullets with meteors
hits = pygame.sprite.groupcollide(meteor_sprite_list, bullet_sprite_list,
True, True)
# for each meteor destroyed, spawn a new one
for hit in hits:
player.hit_snd.play()
player.score += 10
newmeteor = Meteor()
active_sprite_list.add(newmeteor)
meteor_sprite_list.add(newmeteor)
# Draw/update screen ########
screen.fill(BGCOLOR)
active_sprite_list.draw(screen)
text = 'Score: %s' % player.score
draw_text(text, 18, 45, 10)
# after drawing, flip the display
pygame.display.flip()
show_go_screen(player.score)
running = True
|
StarcoderdataPython
|
178263
|
#!/usr/bin/env python3
import rospy
from std_msgs.msg import Int32
zodiac_num = 0
print("卯")
def cb(message):
global zodiac_num
zodiac_num = message.data
zodiac_num = zodiac_num - 1995
while zodiac_num > 12:
if zodiac_num > 12:
zodiac_num = zodiac_num - 12
elif zodiac_num <= 12:
zodiac_num = zodiac_num
if zodiac_num % 12 == 0:
print("亥")
elif zodiac_num % 11 == 0:
print("戌")
elif zodiac_num % 10 == 0:
print("酉")
elif zodiac_num % 9 == 0:
print("申")
elif zodiac_num % 8 == 0:
print("未")
elif zodiac_num % 7 == 0:
print("午")
elif zodiac_num % 6 == 0:
print("巳")
elif zodiac_num % 5 == 0:
print("辰")
elif zodiac_num % 4 == 0:
print("卯")
elif zodiac_num % 3 == 0:
print("寅")
elif zodiac_num % 2 == 0:
print("丑")
elif zodiac_num % 1 == 0:
print("子")
if __name__ == '__main__':
rospy.init_node('twice')
sub = rospy.Subscriber('count_up', Int32, cb)
pub = rospy.Publisher('twice', Int32, queue_size=1)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
pub.publish(zodiac_num)
rate.sleep()
|
StarcoderdataPython
|
1692499
|
from abc import ABCMeta, abstractmethod
class AbstractDataHandler(object):
"""Abstract Data Handler Class
The data handler is an abstract base class providing an interface for all
subsequent (inherited) data handlers (both live and historic).
The goal of a (derived) data handler object is to output a generated set of
bars for each symbol requested. This will replicate how a live strategy
would function as current market data would be sent 'down the pipe'. Thus a
historic and live system will be treated identically by the rest of the
backtesting suite.
"""
__metaclass__ = ABCMeta
def __init__(self, events, symbol_handler, price_handler):
"""Initialize parameters of the abstract data handler object."""
self.events = events
self.symbol_handler = symbol_handler
self.price_handler = price_handler
self.continue_trading = True
@abstractmethod
def update(self):
"""Objects that implement the data handler abstract base class must
implement a method for obtaining new bars from the data source. This
method places the most recently available bars onto a data structure for
access by methods and objects requiring access to the underlying
financial data.
"""
raise NotImplementedError()
@abstractmethod
def request_prices(self):
"""Request the current price of assets."""
raise NotImplementedError()
|
StarcoderdataPython
|
3242158
|
#!/usr/bin/env python
import functools
import subprocess
import re
import shutil
import os
import git
import pathlib
from copy import deepcopy
from cached_property import cached_property
from packaging.version import Version, InvalidVersion
from packaging.specifiers import SpecifierSet
from astropy.table import Table
from scalpl import Cut
import numpy as np
import astropy.units as u
from pathlib import Path
from schema import Or
from dataclasses import dataclass, field, replace
from typing import Union
from tollan.utils.dataclass_schema import add_schema
from tollan.utils import rupdate
from tollan.utils import call_subprocess_with_live_output
from tollan.utils.log import get_logger, timeit
from tollan.utils import ensure_abspath
from tollan.utils.fmt import pformat_yaml
from .base import PipelineEngine, PipelineEngineError
from ...utils import get_user_data_dir
from ...utils.misc import get_nested_keys
from ...utils.common_schema import RelPathSchema, PhysicalTypeSchema
from ...utils.runtime_context import yaml_load
REMOTE_CITLALI_REPO_URL = 'https://github.com/toltec-astro/citlali.git'
LOCAL_CITLALI_REPO_PATH = get_user_data_dir().joinpath("engines/citlali")
GIT_REMOTE_TIMEOUT = 10.
@functools.lru_cache(maxsize=1)
def _get_local_citlali_repo():
"""Return the Citlali git repo made available in the user data directory.
This repo is maintained by this package for check commit histories and
comparing versions.
"""
logger = get_logger()
if LOCAL_CITLALI_REPO_PATH.exists():
repo = git.Repo(LOCAL_CITLALI_REPO_PATH)
try:
repo.remote(name='origin').fetch(
kill_after_timeout=GIT_REMOTE_TIMEOUT)
return repo
except Exception:
logger.debug(
f"error fetching local citlali repo in "
f"{LOCAL_CITLALI_REPO_PATH}")
return repo
logger.debug(f"setup local citlali repo in ${LOCAL_CITLALI_REPO_PATH}")
# class _Progress(git.remote.RemoteProgress):
# def update(self, op_code, cur_count, max_count=None, message=''):
# logger.debug(self._cur_line)
repo = git.Repo.clone_from(
REMOTE_CITLALI_REPO_URL,
LOCAL_CITLALI_REPO_PATH,
# progress=_Progress()
)
return repo
UNKNOWN_VERSION = 'unknown'
"""Value used when the version is unknown."""
class CitlaliExec(object):
"""A low level wrapper class to run Citlali executable."""
logger = get_logger()
def __init__(self, path):
path = self._path = ensure_abspath(path)
version = self._version = self.get_version(path)
if version is UNKNOWN_VERSION:
raise ValueError(f"invalid Citlali executable path {path}")
def __repr__(self):
return f'{self.__class__.__name__}(version={self.version})'
@property
def path(self):
"""The path of the Citlali executable."""
return self._path
@property
def version(self):
"""The version of the Citlali executable."""
return self._version
@cached_property
def semver(self):
"""The semantic version of the Citlali executable."""
return self._ver_to_semver(self._version)
@classmethod
def get_version(cls, path):
"""Get the version of the Citlali."""
output = subprocess.check_output(
(path, '--version'),
stderr=subprocess.STDOUT,
).decode()
cls.logger.debug(f'check version of {path}:\n{output}')
r = re.compile(
r'^(?P<name>citlali\s)?(?P<version>.+)\s\((?P<timestamp>.+)\)$',
re.MULTILINE)
m0 = re.search(r, output)
# import pdb
# pdb.set_trace()
if m0 is None:
cls.logger.warning(
f"unable to parse citlali version: \n{output}")
version = UNKNOWN_VERSION
else:
m = m0.groupdict()
version = m['version']
return cls._norm_ver(version)
def get_default_config(self):
"""Get the default config of the Citlali."""
path = self.path
output = subprocess.check_output(
(path, '--dump_config'),
stderr=subprocess.STDOUT,
).decode()
self.logger.debug(f'dump config of {path}:\n{output}')
return yaml_load(output)
@staticmethod
def _norm_ver(ver, with_rev=True):
# removes any non-standard suffix
if with_rev:
return re.sub(r'(-dirty)$', '', ver)
return re.sub(r'(-dirty|~\d+|-\d+-.+)$', '', ver)
@classmethod
def _ver_to_semver(cls, ver):
"""Convert version string (tag, rev hash, etc.) to SemVer.
This is done by querying the local Citlali repo history.
"""
try:
return Version(ver)
except InvalidVersion:
pass
# try find the latest version tag in the history
repo = _get_local_citlali_repo()
try:
_ver = cls._norm_ver(repo.git.describe(ver, contains=True), with_rev=False)
except Exception:
_ver = cls._norm_ver(repo.git.describe(ver, contains=False), with_rev=False)
cls.logger.debug(f"version {ver} -> semver {_ver}")
return Version(_ver)
def check_version(self, version):
verspec = SpecifierSet(version)
self.logger.debug(f"check {self.semver} against {verspec}")
return self.semver in verspec
def check_for_update(self):
"""Check the current Citlali version agains the Github remote head.
"""
logger = get_logger()
repo = _get_local_citlali_repo()
def get_git_changelog(v1, v2):
changelog = repo.git.log(f'{v1}..{v2}', oneline=True)
if changelog == '':
# same revision
changelog = None
return changelog
# get remote head version
remote_rev = repo.git.rev_parse('origin/HEAD', short=True)
changelog = get_git_changelog(self.version, remote_rev)
if changelog is not None:
changelog_section = (
f"\n"
f"###################### Attention #######################\n"
f"You are using an outdated version of Citlali. The latest\n"
f"version has the following changes:\n\n"
f"{changelog}\n"
f"########################################################"
f"\n"
)
else:
changelog_section = 'Citlali is update-to-date!'
logger.info(
f"\n\n"
f"* Executable path: {self.path}"
f"* Remote Citlali rev: {remote_rev}\n"
f"* Local Citlali version: {self.version}\n"
f'{changelog_section}'
)
@staticmethod
@functools.lru_cache(maxsize=1)
def _get_line_buf_cmd():
stdbuf = shutil.which('stdbuf')
if stdbuf is not None:
return [stdbuf, '-oL']
return list()
def run(self, config_file, log_level="INFO", **kwargs):
exec_path = self.path
citlali_cmd = [
exec_path.as_posix(),
'-l', log_level.lower(),
config_file.as_posix(),
]
cmd = self._get_line_buf_cmd() + citlali_cmd
self.logger.info(
"run {} cmd: {}".format(self, ' '.join(citlali_cmd)))
return call_subprocess_with_live_output(cmd, **kwargs)
@functools.lru_cache(maxsize=None)
def _get_citlali_exec(path):
# This caches all the found exec.
return CitlaliExec(path=path)
class Citlali(PipelineEngine):
"""A wrapper class of Citlali, the TolTEC data reduction pipeline
engine.
It searches for instances of ``citlali`` executables and check their
versions against the required version. The latest one is adopted if
multiple are found.
Parameters
----------
path : str, `pathlib.Path`, list
The path to search the executable for. A list of paths is accepted
which searches the executable in the list in order.
version : str
A version specifier/predicate that specifies the required version of
pipeline.
use_env_path : bool
If True, the system PATH env var is consulted.
"""
logger = get_logger()
def __init__(
self, path=None, version=None, use_env_path=True):
citlali_executables = self.find_citlali_executables(
path=path, version=version, use_env_path=use_env_path)
if len(citlali_executables) == 0:
raise PipelineEngineError(
f"Cannot find Citlali executables for "
f"version {version}")
elif len(citlali_executables) > 1:
self.logger.warning(
f"Found multiple Citlali executables for "
f"version={version}\n"
# f"{citlali_executables['path', 'version', 'timestamp']}"
)
else:
pass
citlali_exec = self._citlali_exec = citlali_executables[0]
self.logger.debug(f"use citlali executable: {citlali_exec}")
def __repr__(self):
return f'{self.__class__.__name__}(version={self.version})'
@property
def exec_path(self):
return self._citlali_exec.path
@property
def version(self):
return self._citlali_exec.version
def get_default_config(self):
return self._citlali_exec.get_default_config()
def check_for_update(self):
return self._citlali_exec.check_for_update()
def run(self, *args, **kwargs):
return self._citlali_exec.run(*args, **kwargs)
@classmethod
def find_citlali_executables(
cls, path=None, version=None, use_env_path=True):
"""Return a list of `CitlaliExec` objects that satisfy the version
constraints."""
exec_name = 'citlali'
if path is None:
path = []
elif isinstance(path, (str, pathlib.Path)):
path = [path]
else:
pass
if use_env_path:
# check if the executable is in env path
exec_path = shutil.which(exec_name, path=None)
if exec_path is not None:
path.append(exec_path)
# now go over each item in path and get executable for checking
paths = dict()
def _check_and_add_path(exec_path):
try:
exec = CitlaliExec(path=exec_path)
paths[exec_path] = exec
except Exception:
cls.logger.debug(
f"skip invalid cialali exec {exec_path}")
cls.logger.debug(
f"find citali executables in paths:\n{pformat_yaml(path)}")
for p in path:
p = ensure_abspath(p)
if p.is_dir():
_check_and_add_path(shutil.which('citlali', path=p))
elif p.is_file() and os.access(p, os.X_OK):
_check_and_add_path(p)
else:
cls.logger.debug(f'skip invalid path {p}')
cls.logger.debug(
f"found {len(paths)} citlali executables:\n{pformat_yaml(paths)}")
# now check each executable with version
if version is None:
cls.logger.debug("skip checking executable versions")
return list(paths.values())
valid_execs = list()
cls.logger.debug("check executable versions")
for e in paths.values():
if e.check_version(version):
valid_execs.append(e)
cls.logger.debug(f"{e} version satisfies {version}")
else:
cls.logger.debug(f"{e} version does not satisfies {version}")
return valid_execs
@timeit
def proc_context(self, config):
"""Return a `CitlaliProc` that run reduction for given input dataset.
"""
return CitlaliProc(citlali=self, config=config)
class CitlaliProc(object):
"""A context class for running Citlali."""
logger = get_logger()
def __init__(self, citlali, config):
self._citlali = citlali
self._config = replace(
config,
low_level=self._resolve_low_level_config(config.low_level)
)
@property
def citlali(self):
return self._citlali
@property
def config(self):
return self._config
def __enter__(self):
return self
def __exit__(self, *args):
pass
def __call__(
self, dataset, output_dir,
log_level='INFO', logger_func=None):
# resolve the dataset to input items
tbl = dataset.index_table
grouped = tbl.group_by(
['obsnum', 'subobsnum', 'scannum', 'master', 'repeat'])
self.logger.debug(f"collected {len(grouped)} raw obs")
for key, group in zip(grouped.groups.keys, grouped.groups):
self.logger.debug(
'****** obs name={obsnum}_{subobsnum}_{scannum} '
'*******'.format(**key))
self.logger.debug(f'{group}\n')
input_items = [
self._resolve_input_item(d) for d in grouped.groups]
# create low level config object and dump to file
# the low level has been resolved to a dict when __init__ is called
cfg = deepcopy(self.config.low_level)
rupdate(cfg, {
'inputs': input_items,
'runtime': {
# TODO get rid of the trailing slash
'output_dir': output_dir.as_posix() + '/'
}
})
cfg_hl = self._resolve_high_level_config(self.config)
rupdate(cfg, cfg_hl)
self.logger.debug(
f'resolved low level config:\n{pformat_yaml(cfg)}')
# show the high level config entries that over writes the low level
# values
chl = Cut(cfg_hl)
cll = Cut(self.config.low_level)
updated_entries = []
for key in get_nested_keys(cfg_hl):
if key not in cll:
continue
new = chl[key]
old = cll[key]
if new == old:
continue
updated_entries.append((key, old, new))
if updated_entries:
updated_entries = Table(
rows=updated_entries,
names=['low_level_config_key', 'default', 'updated'])
self.logger.info(
f"low level config entries overwitten by high level config:\n\n"
f"{updated_entries}\n")
name = input_items[0]['meta']['name']
output_name = f'citlali_o{name}_c{len(input_items)}.yaml'
cfg_filepath = output_dir.joinpath(output_name)
with open(cfg_filepath, 'w') as fo:
fo.write(pformat_yaml(cfg))
# yaml_dump(cfg, fo)
success = self._citlali.run(
cfg_filepath, log_level=log_level, logger_func=logger_func)
# TODO implement the logic to locate the generated output files
# which will be used to create data prod object.
if success:
return output_dir
raise RuntimeError(
f"failed to run {self.citlali} with config file {cfg_filepath}")
def _resolve_low_level_config(self, low_level):
"""Return a low-level config dict from low_level config entry."""
if low_level is None:
return self.citlali.get_default_config()
if isinstance(low_level, Path):
with open(low_level, 'r') as fo:
return yaml_load(fo)
# a dict already
return low_level
def _resolve_high_level_config(self, high_level):
"""Return a low level config dict from high level dict."""
# image_frame_params
cfg = Cut(dict())
pixel_size = high_level.image_frame_params.pixel_size
if pixel_size is not None:
cfg.setdefault(
'mapmaking.pixel_size_arcsec', pixel_size.to_value(u.arcsec))
return cfg.data
@classmethod
def _resolve_input_item(cls, index_table):
"""Return an citlali input list entry from index table."""
tbl = index_table
d0 = tbl[0]
meta = {
'name': f'{d0["obsnum"]}_{d0["subobsnum"]}_{d0["scannum"]}'
}
data_items = list()
cal_items = list()
for entry in tbl:
instru = entry['instru']
interface = entry['interface']
source = entry['source']
extra = dict()
if instru == 'toltec':
c = data_items
elif interface == 'lmt':
c = data_items
elif interface == 'apt':
c = cal_items
# TODO implement in citlali the proper
# ecsv handling
source = _fix_apt(source)
extra = {'type': 'array_prop_table'}
else:
continue
c.append(dict({
'filepath': source,
'meta': {
'interface': interface
}
}, **extra)
)
cls.logger.debug(
f"collected input item name={meta['name']} "
f"n_data_items={len(data_items)} "
f"n_cal_items={len(cal_items)}")
# this is a hack. TODO fix the proper ordering of data items
data_items = sorted(
data_items,
key=lambda d: (
int(d['meta']['interface'][6:])
if d['meta']['interface'].startswith('toltec')
else -1)
)
return {
'meta': meta,
'data_items': data_items,
'cal_items': cal_items,
}
def _fix_apt(source):
# this is a temporary fix to make citlali work with the
# apt
tbl = Table.read(source, format='ascii.ecsv')
tbl_new = Table()
tbl_new['nw'] = np.array(tbl['nw'], dtype='d')
tbl_new['array'] = np.array(tbl['array'], dtype='d')
tbl_new['flxscale'] = np.array(tbl['flxscale'], dtype='d')
tbl_new['x_t'] = tbl['x_t'].quantity.to_value(u.deg)
tbl_new['y_t'] = tbl['y_t'].quantity.to_value(u.deg)
tbl_new['a_fwhm'] = tbl['a_fwhm'].quantity.to_value(u.deg)
tbl_new['b_fwhm'] = tbl['b_fwhm'].quantity.to_value(u.deg)
source_new = source.replace('.ecsv', '_trimmed.ecsv')
tbl_new.write(source_new, format='ascii.ecsv', overwrite=True)
return source_new
# High level config classes
# TODO some of these config can be made more generic and does not needs to
# be citlali specific.
@add_schema
@dataclass
class ImageFrameParams(object):
"""Params related to 2D image data shape and WCS."""
pixel_size: u.Quantity = field(
default=None,
metadata={
'description': 'The pixel size of image.',
'schema': PhysicalTypeSchema('angle')
}
)
@add_schema
@dataclass
class CitlaliConfig(object):
"""The high-level config for Citlali."""
low_level: Union[None, Path, dict] = field(
default=None,
metadata={
'description': 'The low level config used as the base.',
'schema': Or(RelPathSchema(), dict)
}
)
image_frame_params: ImageFrameParams = field(
default_factory=ImageFrameParams,
metadata={
'description':
'The params related to the output image data shape and WCS.'
}
)
class Meta:
schema = {
'ignore_extra_keys': False,
'description': 'The high level config for Citlali.'
}
|
StarcoderdataPython
|
3380991
|
# Copyright 2022 IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is part of the code to reproduce the results in the paper:
# <NAME> and <NAME>, "Circuit optimization of Hamiltonian
# simulation by simultaneous diagonalization of Pauli clusters," Quantum 4,
# p. 322, 2020. https://doi.org/10.22331/q-2020-09-12-322
import cl
for pauli in ['y', 'xyz','-zzz', ['yy','-yy','jyy','-jyy','xi','-xi','jxi','-jxi']] :
if (not isinstance(pauli,list)) : pauli = [pauli]
matrix = cl.pauli_to_matrix(pauli)
print(matrix)
print(pauli)
print(cl.matrix_to_pauli(matrix))
|
StarcoderdataPython
|
3292961
|
#!/usr/bin/env python2
"""Stego Helper Identification Tool - Hide"""
from scipy.misc import imread as read_img
import numpy as np
import util
import os
import json
###############################################################################
######################################################################## bit ##
###############################################################################
def decode_msg_from_bit(img_array, stegobit):
img_bit_array = util.unpack_img_to_bits(img_array)
bitmsg = [byte[stegobit] for byte in img_bit_array]
msg = np.packbits(bitmsg)
return''.join(chr(x) for x in msg if x != 0)
def decode_nth_bit(inp, stegobit):
"""Retrieves a text message in a specific colour value bit of an image"""
assert os.path.isfile(inp), '%s is not a file.' % inp
assert 0 <= stegobit <= 7, '%d is an invalid bit value.' %stegobit
img = read_img(inp)
res = decode_msg_from_bit(img, stegobit)
print res
return res
###############################################################################
################################################################## patchwork ##
###############################################################################
def decode_msg_with_patchwork(img_array, key_array_A, key_array_B):
value = []
for a, b in zip(key_array_A, key_array_B):
value.append(img_array[tuple(a)] - img_array[tuple(b)])
return ''.join(chr(v[0]) for v in value)
def decode_patchwork(inp, key_array_A, key_array_B):
"""Retrieves a text message which is hidden by patchwork steganography"""
assert os.path.isfile(inp), '%s is not a file.' % inp
img = read_img(inp)
key_array_A = json.loads(key_array_A)
key_array_B = json.loads(key_array_B)
res = decode_msg_with_patchwork(img, key_array_A, key_array_B)
print res
return res
METHODS = {
'bit': {
'function': decode_nth_bit,
'arguments': (
{'name': 'stegobit', 'type': int, 'help': 'Bit where the message is hidden'},
{'name': 'inp', 'metavar': 'input_path', 'help': 'Path to image'},
)
},
'patchwork': {
'function': decode_patchwork,
'arguments': (
{'name': 'inp', 'metavar': 'input_path', 'help': 'Path to image'},
{'name': 'key_array_A', 'help': 'First key stream'},
{'name': 'key_array_B', 'help': 'Second key stream'},
)
}
}
if __name__ == '__main__':
util.start(__doc__, METHODS)
# path = "C:\\Users\\juckef34\\ss15\\project\\tests\\files\\diffpixels_test.png"
# b = 7
# bit(path, b)
|
StarcoderdataPython
|
1631330
|
import pandas as pd
from flask import Flask, jsonify, request, Response
import pickle
import base64
import jsonpickle
import numpy as np
import cv2
import json
from PIL import Image
# app
app = Flask(__name__)
prototxt = 'model/bvlc_googlenet.prototxt'
model = 'model/bvlc_googlenet.caffemodel'
labels = 'model/synset_words.txt'
# load the class labels from disk
rows = open(labels).read().strip().split("\n")
classes = [r[r.find(" ") + 1:].split(",")[0] for r in rows]
# load our serialized model from disk
net = cv2.dnn.readNetFromCaffe(prototxt, model)
# routes
@app.route('/', methods=['POST', 'GET'])
def predict():
return 'Homepage Backend'
@app.route('/api/test', methods=['POST', 'GET'])
def test():
try:
if request.method == 'POST':
r = request
img = Image.open(r.files['file_field'])
image = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
cv2.imwrite('image.jpg', image)
# our CNN requires fixed spatial dimensions for our input image(s)
# so we need to ensure it is resized to 224x224 pixels while
# performing mean subtraction (104, 117, 123) to normalize the input;
# after executing this command our "blob" now has the shape:
# (1, 3, 224, 224)
blob = cv2.dnn.blobFromImage(image, 1, (224, 224), (104, 117, 123))
# set the blob as input to the network and perform a forward-pass to
# obtain our output classification
net.setInput(blob)
preds = net.forward()
# sort the indexes of the probabilities in descending order (higher
# probabilitiy first) and grab the top-5 predictions
idxs = np.argsort(preds[0])[::-1][:50]
listResults = []
# loop over the top-5 predictions and display them
for (i, idx) in enumerate(idxs):
# draw the top prediction on the input image
if i == 0:
text = "Label: {}, {:.2f}%".format(classes[idx],
preds[0][idx] * 100)
cv2.putText(image, text, (5, 25), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 0, 255), 2)
# display the predicted label + associated probability to the console
output = ("{}, {}, {:.5}".format(i + 1,
classes[idx], preds[0][idx]))
listResults.append(output)
response = {'results' : listResults}
response_pickled = jsonpickle.encode(response)
return Response(response=response_pickled, status=200, mimetype="application/json")
else:
return ('[ERROR] La richiesta non è in POST')
except Exception as e:
response = {'Error' : str(e)}
if __name__ == '__main__':
app.run(port = 5000, debug=True)
|
StarcoderdataPython
|
1638372
|
"""Module containing class `PluginTypePluginType`."""
from vesper.plugin.plugin_type_plugin_interface_1_0 import \
PluginTypePluginInterface_1_0
import vesper.plugin.plugin_utils as plugin_utils
class PluginTypePluginType(PluginTypePluginInterface_1_0):
"""
The plugin type of plugin types.
This plugin type is also known as the *root plugin type*, since
it is the root of the plugin type hierarchy.
Note that this plugin is its own plugin type. This makes sense
since it is intended to be the parent of all plugin types, and
is itself a plugin type.
"""
# Plugin attributes.
name = 'Plugin Type'
version = '1.0.0'
description = 'The plugin type of plugin types.'
author = '<NAME>'
license = 'MIT'
# Note that we cannot set the `type` attribute of this class here,
# since the class to which we want to set it (i.e. this class) does
# not yet exist (since we are still in the process of defining it).
# Instead, we set the attribute (along with the `interface`
# attribute) below, after the end of the class definition.
# Plugin type attributes.
entry_point_group_name = 'vesper.plugin_types'
supported_interfaces = (PluginTypePluginInterface_1_0,)
def _load_plugins(self):
# Load plugin types other than this one.
plugins = super()._load_plugins()
return (self,) + plugins
# TODO: Perform extra validation needed for plugin types.
# Set root plugin type `type` and `interface` attributes. See note
# above regarding why we do this here rather than within the
# `PluginTypePluginType` class.
plugin_utils._set_plugin_type_attributes(
PluginTypePluginType, PluginTypePluginType)
|
StarcoderdataPython
|
3219970
|
<gh_stars>0
def romanToDecimal(roman_number):
roman_list = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}
result = 0
for index,current_number in enumerate(roman_number):
if (index+1) == len(roman_number) or roman_list[current_number] >= roman_list[roman_number[index+1]]:
result+=roman_list[current_number]
else:
result-=roman_list[current_number]
return result
'''
arabes=['I','II','III','IV','V','VI','VII','VIII','IX','X','XI','XII','XIII','XIV','XV','XVI','XVII','XVIII','XIX','XX','XXI','XXII','XXIII','XXIV','XXV','XXVI','XXVII','XXVIII','XXIX','XXX','XXXI','XXXII','XXXIII','XXXIV','XXXV','XXXVI','XXXVII','XXXVIII','XXXIX','XL','XLI','XLII','XLIII','XLIV','XLV','XLVI','XLVII','XLVIII','XLIX','L']
for i in arabes:
roman_to_decimal(i)
'''
def sortRoman(names):
listOrdererByName = []
listOrdererByNumber = []
middleList = []
tempList = []
# To generate the equivalent decimal number in a list of tuples
for currentName in names:
listOrdererByName.append((currentName.split(' ')[0],currentName.split(' ')[1],romanToDecimal(currentName.split(' ')[1])))
# To order by name
listOrdererByName=sorted(listOrdererByName)
# To split equal list of names
for index,currentName in enumerate(listOrdererByName):
if index == 0:
tempList.append(currentName)
else:
# If the current name is equal to the previous, then append it in the same sublist
if currentName[0] == listOrdererByName[index-1][0]:
tempList.append(currentName)
# Ultimo nombre
if index == (len(listOrdererByName)-1):
middleList.append(tempList)
# In other case, append it in a new sublist
else:
middleList.append(tempList)
tempList=[]
tempList.append(currentName)
# Last iteration
if index == (len(listOrdererByName)-1):
middleList.append(tempList)
# To order sublist
for subList in middleList:
tempList=[]
tempList.append(sorted(subList,key=lambda x:x[2]))
# Finally, generate the resulting list
for index,currentName in enumerate(tempList[0]):
listOrdererByNumber.append(currentName[0]+' '+currentName[1])
return print(listOrdererByNumber)
def main():
names=['<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>']
sortRoman(names)
names=['<NAME>','<NAME>']
sortRoman(names)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1652812
|
from deepleaps.dataloader.transforms import *
"""
All transform methods are registered here. If the default transform method path is here, import the transform method.
"""
TRANSFORM = {}
TRANSFORM['ToTensor'] = ToTensor
TRANSFORM['ToNumpy'] = ToNumpy
|
StarcoderdataPython
|
3388597
|
<gh_stars>0
import sys
import time
import requests
from tqdm import tqdm
import pickle
import os.path
delay = 0.50
def delay_call(clock_since_call):
delay_since_call = time.time() - clock_since_call
if delay_since_call < delay:
time.sleep(delay - delay_since_call)
def save(item, file_name):
with open(file_name + '.pickle', 'wb') as handle:
pickle.dump(item, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load(file_name):
with open(file_name + '.pickle', 'rb') as handle:
return pickle.load(handle)
def get_meta_from_mint(mint):
url = "http://api-mainnet.magiceden.dev/v2/tokens/" + mint
response = requests.request("GET", url)
return response.json()
def get_how_collections():
url = "https://api.howrare.is/v0.1/collections"
response = requests.request("GET", url)
response = response.json()
list = response['result']['data']
ret = {}
for i in list:
ret[i['url'].replace('/', '')] = i['floor']
return ret
def get_first_mint(HR_symbol):
url = "https://api.howrare.is/v0.1/collections/" + HR_symbol + "/only_rarity/"
try:
response = requests.request("GET", url)
response = response.json()
return response['result']['data']['items'][0]['mint']
except:
return None
def get_me_symbol(HR_symbol):
mint = get_first_mint(HR_symbol)
try:
return get_meta_from_mint(mint)['collection']
except:
return None
def gen_magic_how_pair(HR_symbols):
ret = {}
for sym in tqdm(HR_symbols):
clock_since_call = time.time()
try:
ret[sym] = get_me_symbol(sym)
delay_call(clock_since_call)
except:
print("Could not add: " + sym)
return ret
def update_magic_how_pair(HR_symbols, ME_HR_map):
for sym in tqdm(HR_symbols):
if sym not in ME_HR_map.keys():
clock_since_call = time.time()
try:
ME_HR_map[sym] = get_me_symbol(sym)
delay_call(clock_since_call)
except:
print("Could not add: " + sym)
return ME_HR_map
def get_attr_floor_new(collection, all):
price_map = {}
if not all:
print(collection + ": Fetching current listings (this will take a minute)")
listed = 0
clock_since_call = time.time()
try:
url = "https://api-mainnet.magiceden.dev/v2/collections/" + collection + "/stats"
response = requests.request("GET", url)
listed = response.json()['listedCount']
delay_call(clock_since_call)
clock_since_call = time.time()
url = "https://api-mainnet.magiceden.dev/v2/collections/" + collection + "/listings?offset=" + str(listed - 20) + "&limit=20"
response = requests.request("GET", url)
listings = response.json()
delay_call(clock_since_call)
except:
return {}
for listing in listings:
try:
price_map[listing['tokenMint']] = listing['price']
except:
continue
return price_map
def get_attr_floor(collection, all):
price_map = {}
if not all:
print(collection + ": Fetching current listings (this will take a minute)")
i = 0
while True:
clock_since_call = time.time()
try:
url = "https://api-mainnet.magiceden.dev/v2/collections/" + collection + "/listings?offset=" + str(i*20) + "&limit=20"
response = requests.request("GET", url)
listings = response.json()
except:
break;
if response.json() == []:
break;
for listing in listings:
price_map[listing['tokenMint']] = listing['price']
delay_call(clock_since_call)
i+=1
return price_map
def filter(ME_HR_map, syms, bounds):
for hr_name in list(syms):
if not (hr_name in syms.keys() and float(syms[hr_name]) >= bounds[0] and float(syms[hr_name]) <= bounds[1]):
ME_HR_map.pop(hr_name, None)
return ME_HR_map
def filter_bound(deals, bound, price_map):
for mint in list(deals):
if not (mint in price_map.keys() and float(price_map[mint]) <= bound):
deals.pop(mint, None)
return deals
def get_rarity(collection):
url = "https://api.howrare.is/v0.1/collections/" + collection + "/only_rarity/"
try:
response = requests.request("GET", url)
response = response.json()
data = response['result']['data']['items']
except:
return None
size = len(data)
rarity = {}
for nft in data:
rarity[nft['mint']] = nft['rank']
return rarity
def get_rarity_price(rarity_map, price_map):
rarity_price_map = {}
size = len(rarity_map.keys())
for mint in price_map.keys():
if mint in rarity_map.keys() and mint in price_map.keys():
rarity_price_map[mint] = (float(rarity_map[mint]) / size) * price_map[mint]
else:
rarity_price_map[mint] = 9999999
return rarity_price_map
if "-a" in sys.argv:
top_n = 10 if len(sys.argv) <= 2 else int(sys.argv[2])
bound = [float(sys.argv[3]), float(sys.argv[4])] if len(sys.argv) >= 5 else None
syms = get_how_collections()
if os.path.isfile('./ME_HR_map.pickle'):
print("Checking for new HowRare-ME key mappings")
ME_HR_map = load('ME_HR_map')
ME_HR_map = update_magic_how_pair(syms.keys(), ME_HR_map)
else:
print("Downloading HowRare-ME key mapping")
ME_HR_map = gen_magic_how_pair(syms.keys())
save(ME_HR_map, 'ME_HR_map')
price_map = {}
rarity_map = {}
rarity_price_map = {}
if bound != None:
ME_HR_map = filter(ME_HR_map, syms, bound)
print("Fetching deals per collection")
for hr_name in tqdm(ME_HR_map.keys()):
if ME_HR_map[hr_name] == None:
continue
tmp_price_map = get_attr_floor_new(ME_HR_map[hr_name], True)
price_map.update(tmp_price_map)
tmp_rarity_map = get_rarity(hr_name)
rarity_map.update(tmp_rarity_map)
tmp_rarity_price_map = get_rarity_price(tmp_rarity_map, tmp_price_map)
rarity_price_map.update(tmp_rarity_price_map)
if bound != None:
rarity_price_map = filter_bound(rarity_price_map, bound[1], price_map)
deals = dict(sorted(rarity_price_map.items(), key=lambda item: item[1]))
print("=================================================================")
if bound != None:
print("Top " + str(top_n) + " deals on MagicEden priced between " + str(bound[0]) + "-" + str(bound[1]) + " SOL ")
else:
print("Top " + str(top_n) + " deals on MagicEden")
top_deals = list(deals.keys())[:top_n]
for i in range(0, len(top_deals)):
print("\t- Rank " + str(rarity_map[top_deals[i]]) + ", " + str(price_map[top_deals[i]]) + " SOL: https://magiceden.io/item-details/" + top_deals[i])
print("=================================================================\n")
else:
ME_name = sys.argv[1]
HR_name = sys.argv[2]
top_n = 10 if len(sys.argv) <= 3 else int(sys.argv[3])
price_map = get_attr_floor(ME_name, False)
rarity_map = get_rarity(HR_name)
rarity_price_map = get_rarity_price(rarity_map, price_map)
deals = dict(sorted(rarity_price_map.items(), key=lambda item: item[1]))
print("=================================================================")
print("Top " + str(top_n) + " " + ME_name + " deals on MagicEden")
top_deals = list(deals.keys())[:top_n]
for i in range(0, len(top_deals)):
print("\t- Rank " + str(rarity_map[top_deals[i]]) + ", " + str(price_map[top_deals[i]]) + " SOL: https://magiceden.io/item-details/" + top_deals[i])
print("=================================================================\n")
|
StarcoderdataPython
|
47012
|
<reponame>Speedy1991/graphene-django-jwt
from django.contrib.auth.models import AnonymousUser
from graphene_django_jwt.blacklist import Blacklist
from graphene_django_jwt.shortcuts import get_user_by_token
from graphene_django_jwt.utils import get_credentials, get_payload
def _load_user(request):
token = get_credentials(request)
if token is not None:
refresh_token = get_payload(token)['refresh_token']
if Blacklist.is_blacklisted(refresh_token):
return None
return get_user_by_token(token)
class JSONWebTokenMiddleware:
def __init__(self, *args, **kwargs):
self._skip = False
def resolve(self, next, root, info, **kwargs):
if self._skip:
return next(root, info, **kwargs)
if not info.context.user.is_authenticated:
user = _load_user(info.context)
info.context.user = user or AnonymousUser()
self._skip = True
return next(root, info, **kwargs)
|
StarcoderdataPython
|
3327014
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from .views import PreApprovedSalesViewSet, RegisteredSaleViewSet
app_name = "core"
router = DefaultRouter()
router.register("preapproved", PreApprovedSalesViewSet)
router.register("sale", RegisteredSaleViewSet)
urlpatterns = [
path("", include(router.urls)),
]
|
StarcoderdataPython
|
105068
|
<gh_stars>1-10
"""
Tests to ensure that flows get stratified correctly.
That is, when a stratification is applied, there are the right number of flows,
connected to the right compartments, with the right adjustments applied.
"""
import pytest
from summer import AgeStratification
from summer import Compartment as C
from summer import CompartmentalModel, StrainStratification, Stratification
from summer.adjust import Multiply, Overwrite
from summer.flows import (
CrudeBirthFlow,
DeathFlow,
ImportFlow,
InfectionFrequencyFlow,
TransitionFlow,
)
def test_stratify_entry_flows__with_no_explicit_adjustments():
"""
Ensure entry flows are stratified correctly when no adjustments are requested.
Expect flow to be conserved, split evenly over the new strata.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
model.add_importation_flow("imports", 10, "S", split_imports=False)
expected_flows = [ImportFlow("imports", C("S"), 10)]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
strat = Stratification("location", ["urban", "rural"], ["S", "I", "R"])
model.stratify_with(strat)
expected_flows = [
ImportFlow("imports", C("S", {"location": "urban"}), 10, [Multiply(0.5)]),
ImportFlow("imports", C("S", {"location": "rural"}), 10, [Multiply(0.5)]),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
strat = Stratification("age", ["young", "old"], ["S", "I", "R"])
model.stratify_with(strat)
expected_flows = [
ImportFlow(
"imports",
C("S", {"location": "urban", "age": "young"}),
10,
[Multiply(0.5), Multiply(0.5)],
),
ImportFlow(
"imports",
C("S", {"location": "urban", "age": "old"}),
10,
[Multiply(0.5), Multiply(0.5)],
),
ImportFlow(
"imports",
C("S", {"location": "rural", "age": "young"}),
10,
[Multiply(0.5), Multiply(0.5)],
),
ImportFlow(
"imports",
C("S", {"location": "rural", "age": "old"}),
10,
[Multiply(0.5), Multiply(0.5)],
),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify_entry_flows__with_split_imports():
"""
Ensure split_import entry flows are scaled correctly (when added post-stratification)
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
strat = Stratification("age", ["young", "old"], ["S", "I", "R"])
model.stratify_with(strat)
model.add_importation_flow("imports", 10, "S", split_imports=True)
expected_flows = [
ImportFlow(
"imports",
C("S", {"age": "young"}),
10,
[Multiply(0.5)],
),
ImportFlow(
"imports",
C("S", {"age": "old"}),
10,
[Multiply(0.5)],
),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
strat = Stratification("location", ["urban", "rural"], ["S", "I", "R"])
strat.set_flow_adjustments("imports", {"urban": None, "rural": Multiply(0.7)})
model.stratify_with(strat)
expected_flows = [
ImportFlow(
"imports",
C("S", {"age": "young", "location": "urban"}),
10,
[Multiply(0.5)],
),
ImportFlow(
"imports",
C("S", {"age": "young", "location": "rural"}),
10,
[Multiply(0.5), Multiply(0.7)],
),
ImportFlow(
"imports",
C("S", {"age": "old", "location": "urban"}),
10,
[Multiply(0.5)],
),
ImportFlow(
"imports",
C("S", {"age": "old", "location": "rural"}),
10,
[Multiply(0.5), Multiply(0.7)],
),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify_entry_flows__with_explicit_adjustments():
"""
Ensure entry flows are stratified correctly when adjustments are requested.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
model.add_importation_flow("imports", 10, "S", split_imports=False)
expected_flows = [ImportFlow("imports", C("S"), 10)]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
strat = Stratification("location", ["urban", "rural"], ["S", "I", "R"])
strat.set_flow_adjustments("imports", {"urban": Multiply(0.9), "rural": None})
model.stratify_with(strat)
expected_flows = [
ImportFlow(
"imports",
C("S", {"location": "urban"}),
10,
[Multiply(0.9)],
),
ImportFlow(
"imports",
C("S", {"location": "rural"}),
10,
[],
),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
strat = Stratification("age", ["young", "old"], ["S", "I", "R"])
strat.set_flow_adjustments("imports", {"young": Multiply(0.8), "old": Overwrite(1)})
model.stratify_with(strat)
expected_flows = [
ImportFlow(
"imports",
C("S", {"location": "urban", "age": "young"}),
10,
[Multiply(0.9), Multiply(0.8)],
),
ImportFlow(
"imports",
C("S", {"location": "urban", "age": "old"}),
10,
[Multiply(0.9), Overwrite(1)],
),
ImportFlow(
"imports",
C("S", {"location": "rural", "age": "young"}),
10,
[Multiply(0.8)],
),
ImportFlow(
"imports",
C("S", {"location": "rural", "age": "old"}),
10,
[Overwrite(1)],
),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_add_entry_flows_post_stratification():
"""
Ensure we can add flows after a model is stratified.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
assert len(model._flows) == 0
strat = Stratification("location", ["urban", "rural"], ["S", "I", "R"])
model.stratify_with(strat)
with pytest.raises(AssertionError):
model.add_importation_flow("imports", 10, "S", split_imports=False, expected_flow_count=1)
assert len(model._flows) == 0
model.add_importation_flow("imports", 10, "S", split_imports=False, expected_flow_count=2)
assert len(model._flows) == 2
expected_flows = [
ImportFlow("imports", C("S", {"location": "urban"}), 10, []),
ImportFlow("imports", C("S", {"location": "rural"}), 10, []),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_add_entry_flows_post_stratification__with_filter():
"""
Ensure we can add flows after a model is stratified when a strata filter is applied
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
assert len(model._flows) == 0
strat = Stratification("location", ["urban", "rural"], ["S", "I", "R"])
model.stratify_with(strat)
assert len(model._flows) == 0
model.add_importation_flow(
"imports", 10, "S", dest_strata={"location": "urban"}, split_imports=False, expected_flow_count=1
)
assert len(model._flows) == 1
expected_flows = [
ImportFlow("imports", C("S", {"location": "urban"}), 10, []),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify__age__validate_ageing_flows_added():
"""
Ensure, when using an age stratification, that ageing flows are automatically added
and that birth flows are all sent to age 0.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
assert len(model._flows) == 0
model.add_crude_birth_flow("births", 0.02, "S")
assert len(model._flows) == 1
strat = AgeStratification("age", ["0", "5", "15"], ["S", "I", "R"])
model.stratify_with(strat)
# Expect ageing flows amongst age group and a birth flow that only goes to age 0.
expected_flows = [
CrudeBirthFlow("births", C("S", {"age": "0"}), 0.02),
TransitionFlow(
"ageing_SXage_0_to_SXage_5", C("S", {"age": "0"}), C("S", {"age": "5"}), 0.2
),
TransitionFlow(
"ageing_IXage_0_to_IXage_5", C("I", {"age": "0"}), C("I", {"age": "5"}), 0.2
),
TransitionFlow(
"ageing_RXage_0_to_RXage_5", C("R", {"age": "0"}), C("R", {"age": "5"}), 0.2
),
TransitionFlow(
"ageing_SXage_5_to_SXage_15", C("S", {"age": "5"}), C("S", {"age": "15"}), 0.1
),
TransitionFlow(
"ageing_IXage_5_to_IXage_15", C("I", {"age": "5"}), C("I", {"age": "15"}), 0.1
),
TransitionFlow(
"ageing_RXage_5_to_RXage_15", C("R", {"age": "5"}), C("R", {"age": "15"}), 0.1
),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify__age__validate_ageing_flows_added_second():
"""
Ensure that age stratification works when applied after a previous stratification.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
assert len(model._flows) == 0
model.add_crude_birth_flow("births", 0.02, "S")
assert len(model._flows) == 1
strat = Stratification("location", ["urban", "rural"], ["S", "I", "R"])
model.stratify_with(strat)
strat = AgeStratification("age", ["0", "5", "15"], ["S", "I", "R"])
model.stratify_with(strat)
# Expect ageing flows amongst age group and a birth flow that only goes to age 0.
expected_flows = [
CrudeBirthFlow("births", C("S", {"location": "urban", "age": "0"}), 0.02, [Multiply(0.5)]),
CrudeBirthFlow("births", C("S", {"location": "rural", "age": "0"}), 0.02, [Multiply(0.5)]),
TransitionFlow(
"ageing_SXlocation_urbanXage_0_to_SXlocation_urbanXage_5",
C("S", {"location": "urban", "age": "0"}),
C("S", {"location": "urban", "age": "5"}),
0.2,
),
TransitionFlow(
"ageing_SXlocation_ruralXage_0_to_SXlocation_ruralXage_5",
C("S", {"location": "rural", "age": "0"}),
C("S", {"location": "rural", "age": "5"}),
0.2,
),
TransitionFlow(
"ageing_IXlocation_urbanXage_0_to_IXlocation_urbanXage_5",
C("I", {"location": "urban", "age": "0"}),
C("I", {"location": "urban", "age": "5"}),
0.2,
),
TransitionFlow(
"ageing_IXlocation_ruralXage_0_to_IXlocation_ruralXage_5",
C("I", {"location": "rural", "age": "0"}),
C("I", {"location": "rural", "age": "5"}),
0.2,
),
TransitionFlow(
"ageing_RXlocation_urbanXage_0_to_RXlocation_urbanXage_5",
C("R", {"location": "urban", "age": "0"}),
C("R", {"location": "urban", "age": "5"}),
0.2,
),
TransitionFlow(
"ageing_RXlocation_ruralXage_0_to_RXlocation_ruralXage_5",
C("R", {"location": "rural", "age": "0"}),
C("R", {"location": "rural", "age": "5"}),
0.2,
),
TransitionFlow(
"ageing_SXlocation_urbanXage_5_to_SXlocation_urbanXage_15",
C("S", {"location": "urban", "age": "5"}),
C("S", {"location": "urban", "age": "15"}),
0.1,
),
TransitionFlow(
"ageing_SXlocation_ruralXage_5_to_SXlocation_ruralXage_15",
C("S", {"location": "rural", "age": "5"}),
C("S", {"location": "rural", "age": "15"}),
0.1,
),
TransitionFlow(
"ageing_IXlocation_urbanXage_5_to_IXlocation_urbanXage_15",
C("I", {"location": "urban", "age": "5"}),
C("I", {"location": "urban", "age": "15"}),
0.1,
),
TransitionFlow(
"ageing_IXlocation_ruralXage_5_to_IXlocation_ruralXage_15",
C("I", {"location": "rural", "age": "5"}),
C("I", {"location": "rural", "age": "15"}),
0.1,
),
TransitionFlow(
"ageing_RXlocation_urbanXage_5_to_RXlocation_urbanXage_15",
C("R", {"location": "urban", "age": "5"}),
C("R", {"location": "urban", "age": "15"}),
0.1,
),
TransitionFlow(
"ageing_RXlocation_ruralXage_5_to_RXlocation_ruralXage_15",
C("R", {"location": "rural", "age": "5"}),
C("R", {"location": "rural", "age": "15"}),
0.1,
),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify_exit_flows():
"""
Ensure exit flows are stratified correctly.
Death flows should remain the same after stratification, so that everyone in all of the stratified compartments
continues to die at the same rate as before stratification.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
model.add_death_flow("d_S", 3, "S")
model.add_death_flow("d_I", 5, "I")
model.add_death_flow("d_R", 7, "R")
expected_flows = [
DeathFlow("d_S", C("S"), 3),
DeathFlow("d_I", C("I"), 5),
DeathFlow("d_R", C("R"), 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
# Apply partial stratification
strat = Stratification("location", ["urban", "rural"], ["S", "I"])
model.stratify_with(strat)
expected_flows = [
DeathFlow("d_S", C("S", {"location": "urban"}), 3),
DeathFlow("d_S", C("S", {"location": "rural"}), 3),
DeathFlow("d_I", C("I", {"location": "urban"}), 5),
DeathFlow("d_I", C("I", {"location": "rural"}), 5),
DeathFlow("d_R", C("R"), 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
# Apply partial stratification with flow adjustments
strat = Stratification("age", ["young", "old"], ["I", "R"])
strat.set_flow_adjustments("d_I", {"young": Multiply(0.5), "old": Multiply(2)})
strat.set_flow_adjustments("d_R", {"young": Multiply(0.5), "old": Multiply(2)})
model.stratify_with(strat)
expected_flows = [
DeathFlow("d_S", C("S", {"location": "urban"}), 3),
DeathFlow("d_S", C("S", {"location": "rural"}), 3),
DeathFlow("d_I", C("I", {"location": "urban", "age": "young"}), 5, [Multiply(0.5)]),
DeathFlow("d_I", C("I", {"location": "urban", "age": "old"}), 5, [Multiply(2)]),
DeathFlow("d_I", C("I", {"location": "rural", "age": "young"}), 5, [Multiply(0.5)]),
DeathFlow("d_I", C("I", {"location": "rural", "age": "old"}), 5, [Multiply(2)]),
DeathFlow("d_R", C("R", {"age": "young"}), 7, [Multiply(0.5)]),
DeathFlow("d_R", C("R", {"age": "old"}), 7, [Multiply(2)]),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_add_exit_flows_post_stratification():
"""
Ensure user can add exit flows post stratification.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
assert len(model._flows) == 0
# Apply partial stratification
strat = Stratification("location", ["urban", "rural"], ["S", "I"])
model.stratify_with(strat)
assert len(model._flows) == 0
model.add_death_flow("d_S", 3, "S")
model.add_death_flow("d_I", 5, "I")
model.add_death_flow("d_R", 7, "R")
expected_flows = [
DeathFlow("d_S", C("S", {"location": "urban"}), 3),
DeathFlow("d_S", C("S", {"location": "rural"}), 3),
DeathFlow("d_I", C("I", {"location": "urban"}), 5),
DeathFlow("d_I", C("I", {"location": "rural"}), 5),
DeathFlow("d_R", C("R"), 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_add_exit_flows_post_stratification__with_filter():
"""
Ensure user can add exit flows post stratification when a strata filter is applied
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
assert len(model._flows) == 0
# Apply partial stratification
strat = Stratification("location", ["urban", "rural"], ["S", "I"])
model.stratify_with(strat)
assert len(model._flows) == 0
model.add_death_flow("d_S", 3, "S", source_strata={"location": "rural"}, expected_flow_count=1)
model.add_death_flow("d_I", 5, "I", source_strata={"location": "rural"}, expected_flow_count=1)
# FIXME: Ask Matt, would it be better if this just crashed?
model.add_death_flow("d_R", 7, "R", source_strata={"location": "rural"}, expected_flow_count=0)
expected_flows = [
DeathFlow("d_S", C("S", {"location": "rural"}), 3),
DeathFlow("d_I", C("I", {"location": "rural"}), 5),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify_transition_flows__with_source_and_dest_stratified():
"""
Ensure transition flows are stratified correctly when both the flow source and dest are stratified.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
model.add_infection_frequency_flow("infection", 0.03, "S", "I")
model.add_transition_flow("recovery", 1 / 7, "I", "R")
expected_flows = [
InfectionFrequencyFlow(
"infection", C("S"), C("I"), 0.03, model._get_infection_frequency_multiplier
),
TransitionFlow("recovery", C("I"), C("R"), 1 / 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
# Apply stratification
strat = Stratification("location", ["urban", "rural"], ["S", "I", "R"])
model.stratify_with(strat)
expected_flows = [
InfectionFrequencyFlow(
"infection",
C("S", {"location": "urban"}),
C("I", {"location": "urban"}),
0.03,
model._get_infection_frequency_multiplier,
),
InfectionFrequencyFlow(
"infection",
C("S", {"location": "rural"}),
C("I", {"location": "rural"}),
0.03,
model._get_infection_frequency_multiplier,
),
TransitionFlow(
"recovery", C("I", {"location": "urban"}), C("R", {"location": "urban"}), 1 / 7
),
TransitionFlow(
"recovery", C("I", {"location": "rural"}), C("R", {"location": "rural"}), 1 / 7
),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify_transition_flows__with_source_only_stratified():
"""
Ensure transition flows are stratified correctly when only the flow source is stratified.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
model.add_transition_flow("recovery", 1 / 7, "I", "R")
expected_flows = [
TransitionFlow("recovery", C("I"), C("R"), 1 / 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
# Apply stratification
strat = Stratification("location", ["urban", "rural"], ["S", "I"])
model.stratify_with(strat)
expected_flows = [
TransitionFlow("recovery", C("I", {"location": "urban"}), C("R"), 1 / 7),
TransitionFlow("recovery", C("I", {"location": "rural"}), C("R"), 1 / 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify_transition_flows__with_dest_only_stratified():
"""
Ensure transition flows are stratified correctly when only the flow destination is stratified.
Expect an person-conserving adjustment of 1/N to be applied to each flow - N being the number of new strata.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
model.add_transition_flow("recovery", 1 / 7, "I", "R")
expected_flows = [
TransitionFlow("recovery", C("I"), C("R"), 1 / 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
# Apply stratification
strat = Stratification("location", ["urban", "rural"], ["R"])
model.stratify_with(strat)
expected_flows = [
TransitionFlow("recovery", C("I"), C("R", {"location": "urban"}), 1 / 7, [Multiply(0.5)]),
TransitionFlow("recovery", C("I"), C("R", {"location": "rural"}), 1 / 7, [Multiply(0.5)]),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify_transition_flows__with_dest_only_stratified__with_adjustments():
"""
Ensure transition flows are stratified correctly when only the flow destination is stratified.
Expect adjustments to override the automatic person-conserving adjustment.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
model.add_transition_flow("recovery", 1 / 7, "I", "R")
expected_flows = [
TransitionFlow("recovery", C("I"), C("R"), 1 / 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
# Apply stratification
strat = Stratification("location", ["urban", "rural"], ["R"])
strat.set_flow_adjustments("recovery", {"urban": Overwrite(0.7), "rural": Overwrite(0.1)})
model.stratify_with(strat)
expected_flows = [
TransitionFlow("recovery", C("I"), C("R", {"location": "urban"}), 1 / 7, [Overwrite(0.7)]),
TransitionFlow("recovery", C("I"), C("R", {"location": "rural"}), 1 / 7, [Overwrite(0.1)]),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify_transition_flows__with_dest_only_stratified__with_strains():
"""
Ensure transition flows are stratified correctly when only the flow destination is stratified.
Expect the strain stratification to ignore the automatic person-conserving adjustment.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
model.add_transition_flow("recovery", 1 / 7, "I", "R")
expected_flows = [
TransitionFlow("recovery", C("I"), C("R"), 1 / 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
# Apply stratification
strat = StrainStratification("location", ["urban", "rural"], ["R"])
model.stratify_with(strat)
# No adjustments added
expected_flows = [
TransitionFlow("recovery", C("I"), C("R", {"location": "urban"}), 1 / 7),
TransitionFlow("recovery", C("I"), C("R", {"location": "rural"}), 1 / 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
def test_stratify_transition_flows__with_dest_only_stratified__with_adjustments_and_strains():
"""
Ensure transition flows are stratified correctly when only the flow destination is stratified.
Expect adjustments to override the automatic person-conserving adjustment when using a strain strat.
"""
model = CompartmentalModel(
times=[0, 5], compartments=["S", "I", "R"], infectious_compartments=["I"]
)
model.add_transition_flow("recovery", 1 / 7, "I", "R")
expected_flows = [
TransitionFlow("recovery", C("I"), C("R"), 1 / 7),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
# Apply stratification
strat = StrainStratification("location", ["urban", "rural"], ["R"])
strat.set_flow_adjustments("recovery", {"urban": Overwrite(0.7), "rural": Overwrite(0.1)})
model.stratify_with(strat)
expected_flows = [
TransitionFlow("recovery", C("I"), C("R", {"location": "urban"}), 1 / 7, [Overwrite(0.7)]),
TransitionFlow("recovery", C("I"), C("R", {"location": "rural"}), 1 / 7, [Overwrite(0.1)]),
]
assert len(expected_flows) == len(model._flows)
assert all([a._is_equal(e) for e, a in zip(expected_flows, model._flows)])
|
StarcoderdataPython
|
4833296
|
<gh_stars>0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
'''The model architecture used was first created by the user polomarco for a Kaggle competition:
https://www.kaggle.com/polomarco/ecg-classification-cnn-lstm-attention-mechanism
However, this example has been altered to fit the FLUTE architecture'''
import torch
from torch import nn
from torch.nn import functional as F
# ReLu alternative
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class ConvNormPool(nn.Module):
"""Conv Skip-connection module"""
def __init__(
self,
input_size,
hidden_size,
kernel_size,
norm_type='bachnorm'
):
super().__init__()
self.kernel_size = kernel_size
self.conv_1 = nn.Conv1d(
in_channels=input_size,
out_channels=hidden_size,
kernel_size=kernel_size
)
self.conv_2 = nn.Conv1d(
in_channels=hidden_size,
out_channels=hidden_size,
kernel_size=kernel_size
)
self.conv_3 = nn.Conv1d(
in_channels=hidden_size,
out_channels=hidden_size,
kernel_size=kernel_size
)
self.swish_1 = Swish()
self.swish_2 = Swish()
self.swish_3 = Swish()
if norm_type == 'group':
self.normalization_1 = nn.GroupNorm(
num_groups=8,
num_channels=hidden_size
)
self.normalization_2 = nn.GroupNorm(
num_groups=8,
num_channels=hidden_size
)
self.normalization_3 = nn.GroupNorm(
num_groups=8,
num_channels=hidden_size
)
else:
self.normalization_1 = nn.BatchNorm1d(num_features=hidden_size)
self.normalization_2 = nn.BatchNorm1d(num_features=hidden_size)
self.normalization_3 = nn.BatchNorm1d(num_features=hidden_size)
self.pool = nn.MaxPool1d(kernel_size=2)
def forward(self, input):
conv1 = self.conv_1(input)
x = self.normalization_1(conv1)
x = self.swish_1(x)
x = F.pad(x, pad=(self.kernel_size - 1, 0))
x = self.conv_2(x)
x = self.normalization_2(x)
x = self.swish_2(x)
x = F.pad(x, pad=(self.kernel_size - 1, 0))
conv3 = self.conv_3(x)
x = self.normalization_3(conv1+conv3)
x = self.swish_3(x)
x = F.pad(x, pad=(self.kernel_size - 1, 0))
x = self.pool(x)
return x
class RNN(nn.Module):
"""RNN module(cell type lstm or gru)"""
def __init__(
self,
input_size,
hid_size,
num_rnn_layers=1,
dropout_p = 0.2,
):
super().__init__()
self.rnn_layer = nn.LSTM(
input_size=input_size,
hidden_size=hid_size,
num_layers=num_rnn_layers,
dropout=dropout_p if num_rnn_layers>1 else 0,
bidirectional=False,
batch_first=True,
)
def forward(self, input):
outputs, hidden_states = self.rnn_layer(input)
return outputs, hidden_states
class Net(nn.Module):
def __init__(
self,
input_size=1,
hid_size=64,
n_classes=5,
kernel_size=5,
):
super().__init__()
self.rnn_layer = RNN(
input_size=46,
hid_size=hid_size,
)
self.conv1 = ConvNormPool(
input_size=input_size,
hidden_size=hid_size,
kernel_size=kernel_size,
)
self.conv2 = ConvNormPool(
input_size=hid_size,
hidden_size=hid_size,
kernel_size=kernel_size,
)
self.avgpool = nn.AdaptiveMaxPool1d((1))
self.attn = nn.Linear(hid_size, hid_size, bias=False)
self.fc = nn.Linear(in_features=hid_size, out_features=n_classes)
def forward(self, input):
x = self.conv1(input)
x = self.conv2(x)
x_out, hid_states = self.rnn_layer(x)
x = torch.cat([hid_states[0], hid_states[1]], dim=0).transpose(0, 1)
x_attn = torch.tanh(self.attn(x))
x = x_attn.bmm(x_out)
x = x.transpose(2, 1)
x = self.avgpool(x)
x = x.view(-1, x.size(1) * x.size(2))
x = F.softmax(self.fc(x), dim=-1)
return x
class SuperNet(nn.Module):
'''This is the parent of the net with some extra methods'''
def __init__(self, model_config):
super().__init__()
self.net = Net()
def loss(self, input: torch.Tensor):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
features, labels = input['x'].to(device), input['y'].to(device)
output = self.net.forward(features)
return F.cross_entropy(output, labels.long())
def inference(self, input):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
features, labels = input['x'].to(device), input['y'].to(device)
output = self.net.forward(features)
n_samples = features.shape[0]
accuracy = torch.mean((torch.argmax(output, dim=1) == labels).float()).item()
return output, accuracy, n_samples
def set_eval(self):
'''Bring the model into evaluation mode'''
self.eval()
def set_train(self):
'''Bring the model into training mode'''
self.train()
|
StarcoderdataPython
|
19815
|
from apscheduler.schedulers.background import BackgroundScheduler
from des.ccd import start_pipeline
def download_queue():
start_pipeline()
scheduler = BackgroundScheduler()
scheduler.add_job(
download_queue,
'interval',
# minutes=1
seconds=20,
max_instances=1,
id='des_download_ccd'
)
scheduler.start()
|
StarcoderdataPython
|
4837499
|
"""
BinarySearchTree
Interface:
==========
insert(value)
remove(value)
search(value): Return True if found and False otherwise.
traverse(order): `order` can be one of `pre_order`, `post_order`, in_order`, `out_order`, or `breadth_first_order`.
isEmpty()
height
clear()
size
Private helper methods: (DON'T use them in user code!)
==================
[......]
Methods prefixed with underscore are private helper methods.
They are not intended for public exposure or usage out of its containing scope.
Their implementation detail might be subject to change in the future.
It's recommended to adhere to API explicitly provided.
Complexity:
===========
| Operation | Complexity |
----------------------
| insert() | O(H) |
| search() | O(H) |
| remove() | O(H) |
| traverse() | O(N) |
| clear() | O(1) |
| size | O(1) |
| height | O(N) |
where N is number of nodes.
where H denotes tree height, which is in average O(logN). Reference: https://www.sciencedirect.com/science/article/pii/0022000082900046
"""
__all__ = ["BinarySearchTree", "BST"]
import random
from functools import wraps
from ..utils import decorate_all_methods
from .binary_tree import BinaryNode as Node
from .binary_tree import BinaryTree
def check_comparable(func):
error_messages = {"'<' not supported", "'>' not supported",
"'==' not supported", "'>=' not supported", "'<=' not supported"}
@wraps(func)
def wrapper(*args, **kw):
try:
return func(*args, **kw)
except TypeError as e:
for err in error_messages:
if str(e).startswith(err):
raise ValueError("Key should be comparable.")
raise e
return wrapper
@decorate_all_methods(check_comparable)
class BinarySearchTree(BinaryTree):
"""
Key should be comparable (and orderable)?
"""
# TODO: make height computation O(1) instead of O(N)
def insert(self, data):
self._root = self.recur_insert(self._root, data)
return self
def recur_insert(self, node, data):
if node is None:
self._size += 1
return Node(data)
if data == node.data:
return node
elif data < node.data:
node.left = self.recur_insert(node.left, data)
return node
else:
node.right = self.recur_insert(node.right, data)
return node
def search(self, data):
return self.recur_search(self._root, data)
def recur_search(self, node, data):
if node is None:
return False
if data == node.data:
return True
elif data > node.data:
return self.recur_search(node.right, data)
else:
return self.recur_search(node.left, data)
def remove(self, data):
self._root = self.recur_remove(self._root, data)
return self
def recur_remove(self, node, data):
if node is None:
return None
if data < node.data:
node.left = self.recur_remove(node.left, data)
return node
elif data > node.data:
node.right = self.recur_remove(node.right, data)
return node
else:
return self._remove_node(node)
def _remove_node(self, node):
if node is None:
return None
self._size -= 1
# Randomly pick node to delete from two choices:
# "max node in left sub tree", or "min node in right sub tree"
if random.choice([0, 1]):
if node.left is not None:
itr = node.left
prev = node
while itr.right is not None:
prev = itr
itr = itr.right
prev.right = itr.left
node.data = itr.data
return node
else:
return node.right
else:
if node.right is not None:
itr = node.right
prev = node
while itr.left is not None:
prev = itr
itr = itr.left
prev.left = itr.right
node.data = itr.data
return node
else:
return node.left
default_traversal_order = "in_order"
def in_order_traverse(self):
"""
`in_order` traversal retrieves nodes in sorted order
"""
return self.recur_in_order_traverse(self._root)
def recur_in_order_traverse(self, node):
if node is None:
return
yield from self.recur_in_order_traverse(node.left)
yield node
yield from self.recur_in_order_traverse(node.right)
def out_order_traverse(self):
return self.recur_out_order_traverse(self._root)
def recur_out_order_traverse(self, node):
if node is None:
return
yield from self.recur_out_order_traverse(node.right)
yield node
yield from self.recur_out_order_traverse(node.left)
# Alias
BST = BinarySearchTree
if __name__ == '__main__':
tree1 = BST()
tree1.insert(2).insert(1).insert(4).insert(3).insert(5).delete(5)
print(tree1)
|
StarcoderdataPython
|
3272148
|
__________________________________________________________________________________________________
class Solution:
def calculateTime(self, keyboard: str, word: str) -> int:
pos = 0
ans = 0
for ch in word:
cur = keyboard.find(ch)
ans += abs(cur - pos)
pos = cur
return ans
__________________________________________________________________________________________________
__________________________________________________________________________________________________
|
StarcoderdataPython
|
198601
|
from simbatch.core import core
from simbatch.core import settings
import pytest
import os
@pytest.fixture(scope="module")
def sib():
# TODO pytest-datadir pytest-datafiles vs ( path.dirname( path.realpath(sys.argv[0]) )
settings_file = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + os.sep + "config_tests.ini"
return core.SimBatch("Stand-alone", ini_file=settings_file)
def test_init_simbatch(sib):
#
sib.sts.debug_level = 0
sib.logger.console_level = 0
#
assert sib.sts.runtime_env == "Stand-alone"
lo = sib.load_data()
assert lo[0] is True
def test_set_prj_sch_tsk(sib):
sib.sio.create_unit_tests_example_data(do_save=True)
utest_proj_id = sib.prj.get_id_from_name("pytest proj")
assert utest_proj_id is not None
assert sib.prj.update_current_from_id(sib.prj.max_id) is True
assert sib.prj.current_project.project_name == "pytest proj"
sib.dfn.update_current_definition_by_name("Maya")
sib.sch.update_current_from_id(sib.tsk.tasks_data[-1].schema_id)
sib.tsk.update_current_from_id(sib.tsk.max_id)
assert sib.tsk.current_task_id > 0
sib.que.remove_queue_items(only_done=True)
def test_generate_template_queue_item(sib):
print "\n\n___ template_queue_item ___"
template_queue_item = sib.que.generate_template_queue_item(sib.tsk.current_task, sib.sch.current_schema)
assert template_queue_item.shot == "ut01"
template_queue_item.print_this()
def test_generate_template_evo_script(sib):
print "\n\n___ template script ___"
for i, act in enumerate(sib.sch.current_schema.actions_array):
act.actual_value = "test_user_input_" + str(i+1)
template_script = sib.que.generate_template_evo_script(sib.sch.current_schema)
print template_script
def test_get_params_val_arr_from_string(sib):
evos_var = sib.pat.get_params_val_arr_from_string("BND 1 2 3")
assert evos_var[0] == 3
assert evos_var[1] == [['BND', '1.0', '2.0', '3.0']]
evos_var = sib.pat.get_params_val_arr_from_string("bNd 7 14.5; stR 4.00 5 6")
assert evos_var[0] == 6
assert evos_var[1][0] == ['BND', '7.0', '14.5']
assert evos_var[1][1] == ['STR', '4.0', '5.0', '6.0']
evos_var = sib.pat.get_params_val_arr_from_string("bNd 7 7 14.5 7 14.5; stR 4.00 5 6 ; MASS 123 4 5; MAS 1 2 3")
assert evos_var[0] == 18
assert evos_var[1][0] == ['BND', '7.0', '14.5']
assert evos_var[1][1] == ['STR', '4.0', '5.0', '6.0']
assert evos_var[1][2] == ['MAS', '1.0', '2.0', '3.0']
# TODO
# evos_var = sib.pat.get_params_val_arr_from_string("stR 4.00 5 6 ; ZZZ 123 4 5; MAS 1 2 3")
# evos_var = sib.pat.get_params_val_arr_from_string("stR 4.00 5 6 ; ZZZZZ 123 4 5 MAS 1 2 3")
def test_get_array_of_scripts_params_val_from_schema_actions(sib):
print "\n\n___ scripts_params ___"
arr_scripts_params = sib.que.get_array_of_scripts_params_val_from_schema_actions(sib.sch.current_schema)
print "\n", arr_scripts_params
evo_action_index = sib.sch.current_schema.get_first_evos_possible()
if evo_action_index is not None:
sib.sch.current_schema.actions_array[evo_action_index].actual_value = "Bnd 5 55"
arr_scripts_params = sib.que.get_array_of_scripts_params_val_from_schema_actions(sib.sch.current_schema)
print "\n", arr_scripts_params
evo_action_index = sib.sch.current_schema.get_first_evos_possible()
if evo_action_index is not None:
sib.sch.current_schema.actions_array[evo_action_index].actual_value = "bND 5 55; MAS 1 2 3"
arr_scripts_params = sib.que.get_array_of_scripts_params_val_from_schema_actions(sib.sch.current_schema)
print "\n", arr_scripts_params
evo_action_index = sib.sch.current_schema.get_first_evos_possible()
if evo_action_index is not None:
sib.sch.current_schema.actions_array[evo_action_index].actual_value = "bND 5 55; MAS 1 2 3; STI 300 303"
arr_scripts_params = sib.que.get_array_of_scripts_params_val_from_schema_actions(sib.sch.current_schema)
print "\n", arr_scripts_params
def test_do_params_combinations(sib):
print "\n\n___ params_combinations ___"
arr_scripts_params = sib.que.get_array_of_scripts_params_val_from_schema_actions(sib.sch.current_schema)
all_evo_combinations_array = sib.que.do_params_combinations(arr_scripts_params)
print "\n", all_evo_combinations_array
in_arr = [[[u'STR:7.0', u'i'], [u'STR:8.0', u'i']]]
assert len(sib.que.do_params_combinations(in_arr)) == 2 # TODO check value !!!
in_arr = [[[u'STR:7.0', u'i'], [u'STR:8.0', u'i']],[[u'LFT:1.0', u'i'], [u'LFT:2.0', u'i']]]
assert len(sib.que.do_params_combinations(in_arr)) == 4 # TODO check value !!!
def test_generate_queue_items(sib):
print "\n\n___ queue_items ___"
qi1 = sib.que.generate_queue_items(sib.tsk.current_task_id)
# for qi in qi1:
# print "___"
# qi.print_this()
# WIP
def test_generate_queue_items_evo(sib):
print "\n\n___ queue_items with evo ___"
assert len(sib.tsk.tasks_data) > 0
sib.sch.update_current_from_id(sib.tsk.tasks_data[-1].schema_id)
sib.tsk.update_current_from_id(sib.tsk.max_id)
# sib.print_important_values()
custom_action_inputs = [["ooppoo"], [["sim_clt"], "DMP 1 2 3"], ["simed_file"]]
# TO DO !!! , action_inputs = custom_action_inputs
qi1 = sib.que.generate_queue_items(sib.tsk.current_task_id)
# for qi in qi1:
# print "___"
# qi.print_this()
# WIP
|
StarcoderdataPython
|
1653718
|
<gh_stars>1-10
import os
import numpy as np
from math import log10, sqrt
from numpy.fft import fft2, ifft2
from skimage import io
from scipy.signal import gaussian
# Get the peek signal to noise ratio for images
def PSNR(original, compressed):
mse = np.mean((original - compressed) ** 2)
if(mse == 0):
return 100
max_pixel = 255.0
psnr = 20 * log10(max_pixel / sqrt(mse))
return psnr
# Function will add gaussian noise to the image
def add_gaussian_noise(img, sigma):
gauss = np.random.normal(0, sigma, np.shape(img))
noisy_img = img + gauss
noisy_img[noisy_img < 0] = 0
noisy_img[noisy_img > 255] = 255
return noisy_img
# Function will return patches of default size (11,11)
def patchify(img, patch_shape):
X, Y = img.shape
x, y = patch_shape
shape = (X - x + 1, Y - y + 1, x, y)
X_str, Y_str = img.strides
strides = (X_str, Y_str, X_str, Y_str)
return np.lib.stride_tricks.as_strided(img, shape=shape, strides=strides)
# Function to convert RGB image to grayscale
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
# Creates a gaussian kernel
def gaussian_kernel(kernel_size = 3):
h = gaussian(kernel_size, kernel_size / 3).reshape(kernel_size, 1)
h = np.dot(h, h.transpose())
h /= np.sum(h)
return h
# Wiener filter
def wiener_filter(img, kernel, K):
kernel /= np.sum(kernel)
dummy = np.copy(img)
dummy = fft2(dummy)
kernel = fft2(kernel, s = img.shape)
kernel = np.conj(kernel) / (np.abs(kernel) ** 2 + K)
dummy = dummy * kernel
dummy = np.abs(ifft2(dummy))
return dummy
if __name__ == '__main__':
filename = os.path.join(os.getcwd(), './Noisy Image/photo.jpg')
myPhoto = io.imread(filename)
# Code to create a gaussian noisy image
gaussian_noise_image = add_gaussian_noise(myPhoto, 50)
kernel = gaussian_kernel(3)
# Checking if image is colored or not
if(len(gaussian_noise_image.shape) == 3):
noisy_image = np.copy(gaussian_noise_image)
red_noise = noisy_image[:, :, 0]
green_noise = noisy_image[:, :, 1]
blue_noise = noisy_image[:, :, 2]
red_filtered = wiener_filter(red_noise, kernel, K=10)
green_filtered = wiener_filter(green_noise, kernel, K=10)
blue_filtered = wiener_filter(blue_noise, kernel, K=10)
filtered_image = np.dstack((red_filtered, green_filtered, blue_filtered))
else:
filtered_image = wiener_filter(gaussian_noise_image, kernel, K=10)
# Calculating the PSNR value for the images
print("PSNR for noisy image: {}".format(PSNR(myPhoto, gaussian_noise_image)))
print("PSNR for filtered image: {}".format(PSNR(myPhoto, filtered_image)))
# Saving the noisy and restored image
io.imsave('./Noisy Image/gaussian_noise.jpg', gaussian_noise_image)
io.imsave('./Restored Images/restored.jpg', filtered_image)
|
StarcoderdataPython
|
133012
|
from numpy import interp
from os import listdir
from PIL import Image, ImageStat
# Directory for block textures extracted from version jar
textures = 'assets/minecraft/textures/block'
# Special case: animated blocks like crimson_stem are
# taller than 64px: crop when compositing later?
# List of blocks to allow loading
# > Change this file for different lists
with open('blocks_full.txt') as reader:
allow_blocks = reader.read().splitlines()
# Unused because redundant
# # List of blocks to deny loading
# with open('blocks_deny.txt') as reader:
# deny_blocks = reader.read().splitlines()
# Find png filenames in textures directory and remove .png extension
# (Create list of all blocks)
block_ids = [filename[:-4] for filename in listdir(textures) if filename.endswith('.png')]
# Remove all blocks except those in allow list from block id list
block_ids = [id for id in block_ids if id in allow_blocks]
# Unused because redundant
# # Remove blocks in deny list from block id list
# block_ids = [id for id in block_ids if not id in deny_blocks]
# Convert HSV into hsv(360°, 100%, 100%) color code string
def hsv_string (h, s, v):
hsv_string = f'hsv({round(h)}, {round(s)}%, {round(v)}%)'
return (hsv_string)
# Get average HSV color from image
def avg_hsv(block_id):
# Open Minecraft texture as RGBA image
im = Image.open(f'{textures}/{block_id}.png')
# Convert RGBA image into HSV (Hue, Saturation, Value) image
im = im.convert('HSV')
# Split HSV into separate channels
hue_channel = im.getchannel('H')
sat_channel = im.getchannel('S')
val_channel = im.getchannel('V')
# Get average of each channel
h = ImageStat.Stat(hue_channel).mean
s = ImageStat.Stat(sat_channel).mean
v = ImageStat.Stat(val_channel).mean
# Scale from 8-bit channel range (255, 255, 255) to hsv(360°, 100%, 100%) range
# These are converted to floats
h = interp(h, [0, 255], [0, 360])[0]
s = interp(s, [0, 255], [0, 100])[0]
v = interp(v, [0, 255], [0, 100])[0]
# Collect this block's data in a dictionary
return {'block_id': block_id, 'hue': h, 'sat': s, 'val': v, 'hsv_string': hsv_string(h, s, v)}
# Make a list of blocks and their average colors
blocks = map(avg_hsv, block_ids)
# Sort blocks by hue, then saturation, then value
blocks = sorted(blocks, key = lambda block: (block['hue'], block['sat'], block['val']))
# Print blocks and their color
for block in blocks:
print(f"{block['block_id']} : {block['hsv_string']}")
|
StarcoderdataPython
|
3240597
|
import logging
import random
import time
import numpy
import torch
from .obs_ga import ObsGA
from .policy_ga import PolicyGA
from ..environment import Environment
LOG = logging.getLogger(__name__)
class TimeStat:
def __init__(self):
self._start_time = None
def start(self):
self._start_time = time.time()
def used_time(self) -> float:
now_time = time.time()
result = now_time - self._start_time
return round(result, 3)
class EnvironmentGA(Environment):
def __init__(self, limit, seed, time_limit):
"""_summary_
Args:
limit (int): tx counts limit
seed (int): seed for module like random, torch, numpy.random
time_limit (int): time limit in seconds
"""
super().__init__(limit, seed)
self.time_limit = time_limit
self.time_stat = TimeStat()
def fuzz_loop(self, policy: PolicyGA, obs: ObsGA):
self.time_stat.start()
obs.init()
LOG.info(obs.stat)
LOG.info('initial calls start')
self.init_txs(policy, obs)
LOG.info('initial calls end')
random.seed(self.seed)
torch.manual_seed(self.seed)
numpy.random.seed(self.seed)
for contract_name in policy.contract_manager.fuzz_contract_names:
generation_counts = 1
suite = []
missed_block_chrom = dict()
seeds = policy.initial_seeds(contract_name, obs)
while self.time_stat.used_time() < self.time_limit:
LOG.info('generation {}'.format(generation_counts))
LOG.info(obs.stat)
generation_counts += 1
seeds = policy.selection(seeds, suite, missed_block_chrom, contract_name, obs)
policy.crossover(seeds)
policy.mutation(seeds, obs)
policy.execute_seeds(seeds, obs)
|
StarcoderdataPython
|
107473
|
<gh_stars>0
def main(event, context):
print(f'This is from Lambda 2nd function.')
|
StarcoderdataPython
|
3362231
|
<filename>canteen/app.py
from flask import (
Flask,
render_template,
)
app = Flask(
__name__,
static_folder='../static/dist',
template_folder='../static',
)
@app.route('/')
def index():
return render_template('index.html')
|
StarcoderdataPython
|
3206585
|
<reponame>gregflynn/configs<filename>modules/pacman/__init__.py
from sanity.initializer import BaseInitializer
from sanity.settings import ExecWrapper
class Initializer(BaseInitializer):
@property
def requirements(self):
return ['pacman']
def install(self):
self.bin('pac', self.base_path('pac.py'), bin_type=ExecWrapper.PYTHON)
self.bin_autocomplete_click('pac')
|
StarcoderdataPython
|
199070
|
# -*- coding: utf-8 -*-
"""Generic requests handlers."""
import datetime
from operator import itemgetter
from google.appengine.ext import ndb
from jinja2 import FileSystemLoader
from webapp2 import cached_property
from webapp2 import RequestHandler
from webapp2 import uri_for
from webapp2_extras import auth
from webapp2_extras import i18n
from webapp2_extras import jinja2
from webapp2_extras import sessions
from webapp2_extras import sessions_memcache
try:
from appengine_config import template_loaders
except ImportError:
template_loaders = FileSystemLoader('')
try:
from appengine_config import available_languages
except ImportError:
available_languages = {
'en': 'en_US',
'fr': 'fr_FR'
}
try:
from appengine_config import language_code
except ImportError:
language_code = 'en_US'
EXTENSIONS = ['jinja2.ext.autoescape', 'jinja2.ext.with_', 'jinja2.ext.i18n']
def jinja2_factory(app, loaders=None):
"""Set configuration environment for Jinja2.
Args:
app -- (WSGIApplication)
loaders -- (list) Jinja2 template loaders
Return:
(Jinja2) A Jinja2 instance.
"""
if loaders is None:
loaders = template_loaders
config = {'environment_args': {'extensions': EXTENSIONS,
'loader': loaders},
'globals': {'uri_for': uri_for, 'datetime': datetime},
'filters': {}}
j = jinja2.Jinja2(app, config=config)
return j
class BaseRequestHandler(RequestHandler):
"""Base request handler with session support and Jinja2 templates."""
session_store = None
@cached_property
def auth(self):
"""Shortcut to access the auth instance as a property."""
return auth.get_auth()
@cached_property
def user_info(self):
"""Shortcut to access a subset of the user attributes.
That subset is stored in session.
The list of attributes to store in the session is specified in
config['webapp2_extras.auth']['user_attributes'].
Returns:
A dictionary with most user information
"""
return self.auth.get_user_by_session()
@cached_property
def user(self):
"""Shortcut to access the current logged in user.
Unlike user_info, it fetches information from the persistence layer and
returns an instance of the underlying model.
Returns:
The instance of the user model associated to the logged in user.
"""
usr = self.user_info
return self.user_model.get_by_id(usr['user_id'], namespace='') \
if usr else None
@cached_property
def user_model(self):
"""Return the implementation of the user model.
If set, it's consistent with
config['webapp2_extras.auth']['user_model']
"""
return self.auth.store.user_model
@cached_property
def session(self):
"""Shortcut to access the current session."""
factory = sessions_memcache.MemcacheSessionFactory
return self.session_store.get_session(factory=factory)
@cached_property
def jinja2(self):
"""Return a Jinja2 renderer cached in the app registry."""
return jinja2.get_jinja2(factory=jinja2_factory, app=self.app)
def render_html(self, _template, **context):
"""Render a template and writes the result to the response."""
context.update({'user': self.user_info})
resp = self.jinja2.render_template(_template, **context)
self.response.write(resp)
@classmethod
def _extract_locale_from_header(cls, locale_header):
"""Extract locale from HTTP Accept-Language header.
We only support langage, not locale for now.
Header with en-GB will be set as en_US, etc.
Args:
locale_header (str): HTTP Accept-Language header.
Returns:
(str) Locale.
"""
if locale_header is None:
return language_code
parts = (part.split(';q=')
for part in locale_header.replace(' ', '').split(','))
languages = ((part[0].split('-')[0].lower(),
float(part[1]) if len(part) > 1 else 1.0)
for part in parts)
languages = [language for language in languages
if language[0].lower() in available_languages]
languages.sort(key=itemgetter(1), reverse=True)
locale = available_languages[languages[0][0]] if len(
languages) > 0 else language_code
return locale
def request_language(self):
"""Return primary language from request."""
locale_header = self.request.headers.get('Accept-Language')
if locale_header is None:
return None
parts = (part.split(';q=')
for part in locale_header.replace(' ', '').split(','))
languages = [(part[0].split('-')[0].lower(),
float(part[1]) if len(part) > 1 else 1.0)
for part in parts]
languages.sort(key=itemgetter(1), reverse=True)
try:
return languages[0][0]
except IndexError:
return None
def __init__(self, request, response):
"""Override the initialiser in order to set the language."""
self.initialize(request, response)
# Set language.
locale_header = self.request.headers.get('Accept-Language')
locale = self._extract_locale_from_header(locale_header)
i18n.get_i18n().set_locale(locale)
self.LANGUAGE = i18n.get_i18n().locale[0:2]
def dispatch(self):
"""Override the dispatcher in order to set session."""
# Get a session store for this request.
self.session_store = sessions.get_store()
try:
super(BaseRequestHandler, self).dispatch()
finally:
self.session_store.save_sessions(self.response)
def get_geodata(self):
"""Return `Request` geo data dict."""
geopt = ndb.GeoPt(self.request.headers.get('X-AppEngine-CityLatLong'))\
if self.request.headers.get('X-AppEngine-CityLatLong') else None
return {'language': self.request_language(),
'country': self.request.headers.get('X-AppEngine-Country'),
'region': self.request.headers.get('X-AppEngine-Region'),
'city': self.request.headers.get('X-AppEngine-City'),
'geopt': geopt}
class RedirectHandler(BaseRequestHandler):
"""Redirect handler."""
redirect_url = None
def get(self, *args, **kwargs):
"""Return redirection."""
self.redirect(self.get_redirect_url())
def get_redirect_url(self):
"""Return success URL."""
return self.uri_for(self.redirect_url)
class ImproperlyConfigured(Exception):
"""App is somehow improperly configured."""
class TemplateRequestHandler(BaseRequestHandler):
"""Generic handler for template view."""
template_name = None
def get(self, *args, **kwargs):
"""Render template."""
context = self.get_context_data(**kwargs)
template = self.get_template_name()
self.render_html(template, **context)
def get_template_name(self):
"""Return a template name to be used for the request."""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateRequestHandler requires either a definition of "
"'template_name' or an implementation of "
"'get_template_names()'")
else:
return self.template_name
def get_context_data(self, **kwargs):
"""Return kwargs as template context."""
if 'view' not in kwargs:
kwargs['view'] = self
return kwargs
class ModelRequestHandler(TemplateRequestHandler):
"""To render an entity."""
model = None
_entity = None
def get(self, *args, **kwargs):
"""Render template with entity in context."""
if kwargs.get('object_id'):
self._set_entity(kwargs.get('object_id'))
if not self._entity:
return self.response.set_status(404)
super(ModelRequestHandler, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
"""Add model entity to context."""
kwargs = super(ModelRequestHandler, self).get_context_data(**kwargs)
if self._entity:
kwargs['object'] = self._entity
return kwargs
def _set_entity(self, object_id):
"""Set model entity."""
try:
object_id = int(object_id)
except ValueError:
pass
if self.request.route_kwargs.get('parent_id'):
parent_id = self.request.route_kwargs.get('parent_id')
try:
parent_id = int(parent_id)
except ValueError:
pass
key = ndb.Key(self.model.parent_class, parent_id,
self.model, object_id)
self._entity = key.get()
else:
self._entity = self.model.get_by_id(object_id)
class ChildsRequestHandler(TemplateRequestHandler):
"""To render list of an entity childrens."""
child_model = None
parent_id_key = 'parent_id'
limit = 20
def get_context_data(self, **kwargs):
"""Add list items to context."""
kwargs = super(ChildsRequestHandler, self).get_context_data(**kwargs)
ancestor = ndb.Key(self.child_model.parent_class,
kwargs[self.parent_id_key])
items = self.queryset(ancestor).fetch_async(self.limit)
parent = ancestor.get_async()
kwargs['parent'] = parent.get_result()
kwargs['items'] = items.get_result()
return kwargs
def queryset(self, ancestor):
"""Return child query."""
return self.child_model.query(ancestor=ancestor)
class FormRequestHandler(TemplateRequestHandler):
"""Render a form on GET and processes it on POST."""
form_class = None
form_prefix = ''
success_url = None
template_name = None
def get(self, *args, **kwargs):
"""Instantiate a blank version of the form."""
form_class = self.get_form_class()
form = self.get_form(form_class)
context = self.get_context_data(form=form)
template = self.get_template_name()
self.render_html(template, **context)
def post(self, *args, **kwargs):
"""Check POT variables for validity and call response method."""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.validate():
self.form_valid(form)
else:
self.form_invalid(form)
def form_valid(self, form):
"""If the form is valid, redirect to the supplied URL."""
self.redirect(self.get_succes_url())
def get_succes_url(self):
"""Return success URL."""
return self.uri_for(self.success_url)
def form_invalid(self, form):
"""If the form is invalid.
Re-render the context data with the data-filled form and errors.
"""
context = self.get_context_data(form=form)
template = self.get_template_name()
self.render_html(template, **context)
def get_form_class(self):
"""Return the form class to use in this view."""
return self.form_class
def get_form(self, form_class):
"""Return an instance of the form to be used in this view."""
return form_class(self.get_form_kwargs(), prefix=self.form_prefix)
def get_form_kwargs(self):
"""Return the keyword arguments for instantiating the form."""
method_name = self.request.method.upper().replace('-', '_')
if method_name == 'POST':
return self.request.POST
else:
return self.request.GET
class ModelFormRequestHandler(FormRequestHandler):
"""Render a model form on GET and processes it on POST."""
model = None
form_prefix = ''
_entity = None
def get(self, *args, **kwargs):
"""Instantiate a blank version of the form."""
if kwargs.get('object_id'):
self._set_entity(kwargs.get('object_id'))
if not self._entity:
return self.response.set_status(404)
super(ModelFormRequestHandler, self).get(*args, **kwargs)
def post(self, *args, **kwargs): # pylint: disable=W0613
"""Check POT variables for validity and call response method."""
if kwargs.get('object_id'):
self._set_entity(kwargs.get('object_id'))
if not self._entity:
return self.response.set_status(404)
super(ModelFormRequestHandler, self).post(*args, **kwargs)
def _set_entity(self, object_id):
"""Set model entity."""
try:
object_id = int(object_id)
except ValueError:
pass
if self.request.route_kwargs.get('parent_id'):
parent_id = self.request.route_kwargs.get('parent_id')
try:
parent_id = int(parent_id)
except ValueError:
pass
key = ndb.Key(self.model.parent_class, parent_id,
self.model, object_id)
self._entity = key.get()
else:
self._entity = self.model.get_by_id(object_id)
def form_valid(self, form):
"""Create entity and redirect."""
# Create entity
entity = self._entity or self.model()
form.populate_obj(entity)
entity.put()
# Redirect
super(ModelFormRequestHandler, self).form_valid(form)
def get_form(self, form_class):
"""Return an instance of the form to be used in this view."""
return form_class(self.get_form_kwargs(),
obj=self._entity,
data=self.request.route_kwargs,
prefix=self.form_prefix)
def get_context_data(self, **kwargs):
"""Return kwargs as template context."""
kwargs = super(ModelFormRequestHandler,
self).get_context_data(**kwargs)
if self._entity:
kwargs['object'] = self._entity
return kwargs
|
StarcoderdataPython
|
3390285
|
import sys
from collections import defaultdict
MAX_SIZE = 400
INF = 999999
SAFE_DIST = 10000
def manhattan(p1, p2):
return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])
def get_manhattans(points, dest_point):
return sum(map(lambda point: manhattan(point, dest_point), points))
def get_safe_points(points):
for x in range(MAX_SIZE):
for y in range(MAX_SIZE):
if get_manhattans(points, (x, y)) < SAFE_DIST:
yield x, y
def get_neighbours(point):
x, y = point
return [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]
def solve(lines):
points = list(map(lambda line: tuple(map(int, line.split(', '))), lines))
safe_points = set(get_safe_points(points))
return len(safe_points)
print(solve(sys.stdin))
|
StarcoderdataPython
|
1784764
|
<reponame>rainmanwy/robotframework-DatabaseLib
# -*- coding: utf-8 -*-
"""
Create by <EMAIL> at 7/3/19
"""
import os
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker, scoped_session
import sqlparse
from robot.utils import ConnectionCache
from robot.api import logger
from .base import HybridCore
from .base import keyword
from .version import VERSION
os.environ["NLS_LANG"] = ".UTF8"
class DatabaseLib(HybridCore):
"""
DatabaseLib is created based on [https://www.sqlalchemy.org/|sqlalchemy].
It support below features:
- Database operations(select/insert/update/delete...)
- Multi database connections, user could use "Switch Connection" to change current connection
- ORM extension support
- Extension this libraries easily
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = VERSION
def __init__(self, libraryComponents=[]):
"""
DatabaseLib could be extened through parameter libraryComponents
"""
self._connections = ConnectionCache()
self._sessions = {}
super(DatabaseLib, self).__init__(libraryComponents)
@property
def current(self):
return self._connections.current
@property
def session(self):
if self.current not in self._sessions:
raise RuntimeError('Session is not created!')
return self._sessions[self.current]
@keyword
def connect_to_db(self, hostOrUrl, port=None, database=None, user=None, password=None, dbPrefix=None, alias=None,
**kwargs):
"""
Connect to database [http://docs.sqlalchemy.org/en/latest/core/engines.html|sqlalchemy]
:param hostOrUrl: database hostname or database connection string
:param port: database port
:param database: database name
:param user: database user name
:param password: <PASSWORD>
:param dbPrefix: format is dialect+driver, dialect is optional
:param alias: connection alias, could be used to switch connection
:param kwargs: please check [http://docs.sqlalchemy.org/en/latest/core/engines.html|create_engine] to get more details
:return: connection index
Example:
| Connect To Db | 127.0.0.1 | 3306 | test | user | password | mysql |
| Connect To Db | mysql://user:[email protected]:3306/test?charset=utf8 |
"""
if '://' in hostOrUrl:
connectStr = hostOrUrl
elif 'mysql' in dbPrefix.lower():
connectStr = '%s://%s:%s@%s:%s/%s?charset=utf8' % (dbPrefix, user, password, hostOrUrl, port, database)
else:
connectStr = '%s://%s:%s@%s:%s/%s' % (dbPrefix, user, password, hostOrUrl, port, database)
logger.debug('Connection String: {0}'.format(connectStr))
engine = create_engine(connectStr, **kwargs)
connectionIndex = self._connections.register(engine, alias)
return connectionIndex
@keyword
def switch_connection(self, indexOrAlias):
"""
Switch database connection
:param indexOrAlias: connection alias or index
:return: previous index
Example:
| Connect To Db | 127.0.0.1 | 3306 | test1 | user | password | mysql | connection_1 |
| Connect To Db | 127.0.0.1 | 3306 | test2 | user | password | oracle | connection_2 |
| Switch Connection | connection_1 |
"""
oldIndex = self._connections.current_index
self._connections.switch(indexOrAlias)
return oldIndex
@keyword
def create_session(self, autoflush=True, autocommit=False, expireOnCommit=True, info=None, **kwargs):
"""
Create session based on current connection(engine)
if session is already for current connection, keyword will return created session directly.
This keyword could be used to extend library with ORM
:param autoflush: default value is True
:param autocommit: default value is False
:param expireOnCommit: default value is True
:param info: default value is None
:param kwargs: Please check Session in sqlalchemy
:return: session
"""
if self.current in self._sessions:
return self._sessions[self.current]
elif self.current is not None:
self.current.echo = 'debug'
session = scoped_session(sessionmaker(bind=self.current, autoflush=autoflush, autocommit=autocommit,
expire_on_commit=expireOnCommit, info=info, **kwargs))
self._sessions[self.current] = session
return session
raise RuntimeError('Current connection may closed, or not create connection yet!')
@keyword
def close_connection(self):
"""
Close current database connection
:return: None
"""
if self.current in self._sessions:
self._sessions.pop(self.current)
self.current.dispose()
self._connections.current = self._connections._no_current
@keyword
def close_all_connections(self):
"""
Close all database connections
:return: None
"""
self._sessions.clear()
self._connections.close_all('dispose')
@keyword
def execute(self, sql):
"""
Execute sql
:param sql: sql
:return: sqlalchemy ResultProxy
"""
return self.current.execute(sql)
@keyword
def query(self, sql, *args, **kwargs):
"""
Execute query
:param sql: sql string
:param args: if params in sql want to be replaced by index, use args
:param kwargs: if params in sql want to be replaced by key, use kwargs
:return: List of ResultProxy
Examples:
| ${results}= | Query | SELECT {0}, {1} FROM MY_TABLE | c1 | c2 |
| ${results}= | Query | SELECT {col1}, {col2} FROM MY_TABLE | col1=c1 | col2=c2 |
| ${results}= | Query | SELECT c1, c2 FROM MY_TABLE | | |
"""
if not args:
args = []
if not kwargs:
kwargs = {}
sql = sql.format(*args, **kwargs)
logger.debug('Execute: %s' % sql)
resultProxy = self.execute(sql)
results = [result for result in resultProxy]
logger.debug('Results: %s' % results)
return results
@keyword
def execute_sql_script(self, sqlFile):
"""
Execute sql script file
:param sqlFile: Path to sql script file, not format should be utf-8
:return: None
"""
with open(sqlFile, 'r', encoding='utf-8') as f:
content = f.read()
content = content.replace('\ufeff', '')
sqlStrList = sqlparse.split(sqlparse.format(content, strip_comments=True))
sqlStrList = [sqlparse.format(sqlStr.strip(';'), keyword_case='upper', reindent=True)
for sqlStr in sqlStrList if sqlStr.strip()]
for sqlStr in sqlStrList:
self.execute(sqlStr)
@keyword
def call_stored_procedure(self, name, *params):
"""
Call stored procedure
:param name: name of stored procedure
:param params: parameters of stored procedure
:return: results
"""
connection = self.current.raw_connection()
try:
cursor = connection.cursor()
results = cursor.callproc(name, params)
cursor.close()
connection.commit()
finally:
connection.close()
return results
|
StarcoderdataPython
|
1731531
|
import datetime
import json
import logging
import shutil
import subprocess
logger = logging.getLogger(__name__)
class ContCommandResult:
"""A representation engine command results."""
def __init__(self, exit_status=None, stdout=None, stderr=None, command=None):
self.exit_status = exit_status
self.stdout = stdout
self.stderr = stderr
self.command = command
@classmethod
def from_subprocess_out(cls, sub_out):
stdout = sub_out.stdout.decode().strip() if sub_out.stdout else ""
stderr = sub_out.stderr.decode().strip() if sub_out.stderr else ""
command = " ".join(sub_out.args)
if sub_out.returncode != 0 and stderr:
logger.warning(f"Error: {command} >> {stdout} >> {stderr}")
return cls(exit_status=sub_out.returncode, stdout=stdout, stderr=stderr, command=command,)
def __repr__(self):
return f"ContCommandResult(exit_status={self.exit_status})"
class PodmanEngine:
"""Podman/Docker engine wrapper."""
def __init__(self, name=None, engine="auto", *args, **kwargs):
if engine == "auto":
self.engine = "podman" if shutil.which("podman") else "docker"
else:
self.engine = engine
# check engine exist or not
if shutil.which(self.engine) is None:
raise ValueError(
f"'{engine}' engine not found. Make sure it should installed on your system."
)
self.name = name or f"rhel-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}"
def _exec(self, command):
"""Internal use to execute subprocess cmd."""
out = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return ContCommandResult.from_subprocess_out(out)
def run(self, image, hostname=None, envs=None, *args, **kwargs):
"""run container.
Args:
image: Image of rhel container
hostname: Set container hostname
env: List of environment variables to set in container
"""
cmd = [self.engine, "run", "--name", self.name, "--rm", "-d"]
if hostname:
cmd.extend(["--hostname", hostname])
if envs:
cmd.extend(["--env", " ".join(envs)])
cmd.extend([image])
return self._exec(cmd)
def kill(self):
"""Kill running container."""
return self._exec([self.engine, "kill", self.name])
def rm(self):
"""Remove container."""
return self._exec([self.engine, "rm", self.name])
def stop(self):
"""Stop container."""
return self._exec([self.engine, "stop", self.name])
def exec(self, cmd):
"""Execute command on contaienr.
Args:
cmd: command string
"""
command = [self.engine, "exec", self.name, "bash", "-c", cmd]
logger.info(f"Executing '{cmd}'")
return self._exec(command)
def cp(self, source, dest):
"""Copy file from sorce to destination.
Args:
source: sorce path
dest: destination path
"""
command = [self.engine, "cp", source, dest]
return self._exec(command)
def add_file(self, filename, content, overwrite=False):
if overwrite:
if self.exec(f"[ -f {filename} ]").exit_status == 0:
self.exec(f"rm {filename}")
return self.exec(f"cat >>{filename} <<EOF\n{content}\nEOF")
@property
def status(self):
"""Return status of container."""
command = [self.engine, "inspect", "--format", "{{.State.Status}}", self.name]
out = self._exec(command=command)
if out.exit_status != 0:
return f"{self.name} unavailable."
return out.stdout.title()
class OpenshiftEngine:
"""Openshift/k8s engine wrapper."""
def __init__(self, name=None, engine="auto", *args, **kwargs):
if engine == "auto":
self.engine = "oc" if shutil.which("oc") else "kubectl"
else:
self.engine = engine
# check engine exist or not
if shutil.which(self.engine) is None:
raise ValueError(
f"'{engine}' engine not found. Make sure it should installed on your system."
)
self.name = name or f"rhel-{datetime.datetime.now().strftime('%y%m%d-%H%M%S')}"
def _exec(self, command):
"""Internal use to execute subprocess cmd."""
out = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return ContCommandResult.from_subprocess_out(out)
def run(self, image, hostname=None, envs=None, *args, **kwargs):
"""run container.
Args:
hostname: Set container hostname
env: dict of environment variables to set in container
"""
cmd = [self.engine, "run", self.name]
if envs:
cmd.extend([f"--env='{k}={v}'" for k, v in envs.items()])
cmd.extend([f"--image={image}"])
if hostname:
cmd.extend([f"--overrides={{'spec': {{'hostname': '{hostname}'}}"])
return self._exec(cmd)
def stop(self):
"""Delete container than stopping."""
return self._exec([self.engine, "delete", "pod", self.name])
def exec(self, cmd):
"""Execute command on contaienr.
Args:
cmd: command string
"""
command = [self.engine, "exec", self.name, "--", "bash", "-c", cmd]
logger.info(f"Executing: '{cmd}'")
return self._exec(command)
def cp(self, source, dest):
"""Copy file from sorce to destination.
Args:
source: sorce path
dest: destination path
"""
command = [self.engine, "cp", source, dest]
return self._exec(command)
def add_file(self, filename, content, overwrite=False):
if overwrite:
if self.exec(f"[ -f {filename} ]").exit_status == 0:
self.exec(f"rm {filename}")
return self.exec(f"cat >>{filename} <<EOF\n{content}\nEOF")
def get_json(self, restype, name=None, label=None, namespace=None):
"""
Get json for resource type/name/label.
If name is None all resources of this type are returned
If label is not provided, then "oc get" will not be filtered on label
"""
command = [self.engine, "get", restype]
if name:
command.append(name)
if label:
command.extend(["-l", label])
if namespace:
command.extend(["-n", namespace])
command.extend(["-o", "json"])
out = self._exec(command=command)
if out.exit_status != 0:
return {}
try:
return json.loads(out.stdout)
except ValueError:
return {}
@property
def status(self):
"""Return status of pod."""
out = self.get_json(restype="pods", name=self.name)
if not out:
return f"{self.name} unavailable."
return out["status"]["phase"]
|
StarcoderdataPython
|
3386454
|
import logging
import configparser
from marshmallow import ValidationError
from mongoengine import DoesNotExist, ValidationError as ValidationErr, NotUniqueError
from pymongo.errors import DuplicateKeyError
from utils.responses import bad_request, not_found
def create_error_handlers(app):
@app.errorhandler(ValidationError)
def handle_marshmallow_validation_error(error):
logging.error(error.messages)
return bad_request(error.messages)
@app.errorhandler(ValidationErr)
def handle_mongoengine_validation_error(error):
logging.error(error.message)
return bad_request(error.message)
@app.errorhandler(DoesNotExist)
def handle_does_not_exist_error(error):
logging.error(error.args[0])
return not_found(error.args[0])
@app.errorhandler(NotUniqueError)
def handle_does_not_exist_error(error):
logging.error(error.args[0])
return bad_request(error.args[0])
@app.errorhandler(DuplicateKeyError)
def handle_duplicate_key_error(error):
logging.error(error.args[0])
return bad_request(error.args[0])
@app.errorhandler(KeyError)
def handle_key_error(error):
logging.error(error.messages)
return not_found(error.messages)
@app.errorhandler(configparser.NoSectionError)
def handle_no_section_error(error):
logging.error(error.messages)
return not_found(error.messages)
@app.errorhandler(configparser.NoOptionError)
def handle_no_option_error(error):
logging.error(error.messages)
return not_found(error.messages)
@app.errorhandler(AttributeError)
def handle_attribute_error(error):
logging.error(error.args[0])
return bad_request(error.args[0])
|
StarcoderdataPython
|
8920
|
<reponame>noshluk2/Wifi-Signal-Robot-localization<filename>scripts/Interfacing/encoder_class.py
import RPi.GPIO as GPIO
import threading
class Encoder(object):
def __init__(self, r_en_a,r_en_b,l_en_a,l_en_b):
GPIO.setmode(GPIO.BCM)
GPIO.setup(r_en_a, GPIO.IN)
GPIO.setup(r_en_b, GPIO.IN)
GPIO.setup(l_en_a, GPIO.IN)
GPIO.setup(l_en_b, GPIO.IN)
self.l_en_a=l_en_a;self.l_en_b=l_en_b;
self.r_en_a=r_en_a;self.r_en_b=r_en_b;
GPIO.add_event_detect(r_en_a, GPIO.BOTH, callback=self.Update_encR)
GPIO.add_event_detect(l_en_a, GPIO.BOTH, callback=self.Update_encL)
self.count_R =0
self.count_L=0
def Update_encR(self,channel):
if GPIO.input(self.r_en_a) == GPIO.input(self.r_en_b):
self.count_R=self.count_R + 1
else :
self.count_R = self.count_R - 1
def Update_encL(self,channel):
if GPIO.input(self.l_en_a) == GPIO.input(self.l_en_b):
self.count_L=self.count_L + 1
else :
self.count_L = self.count_L - 1
return (self.count_L)
def get_r_enc(self):
return self.count_R
def get_l_enc(self):
return self.count_L
def clear_encoders(self):
self.count_R=0
self.count_L=0
# r_en_a = 27
# r_en_b = 10
# l_en_a = 5
# l_en_b = 6
# enc_obj = Encoder(27,10,5,6)
# def update_encoders():
# threading.Timer(1,update_encoders).start()
# print(" looping ")
# update_encoders()
|
StarcoderdataPython
|
1614669
|
import sys
import os.path
import pprint
sys.path.append(os.path.abspath(__file__ + "\..\.."))
import windows
from windows.generated_def.winstructs import *
import windows.native_exec.simple_x86 as x86
class SingleSteppingDebugger(windows.debug.LocalDebugger):
SINGLE_STEP_COUNT = 4
def on_exception(self, exc):
code = self.get_exception_code()
context = self.get_exception_context()
print("EXCEPTION !!!! Got a {0} at 0x{1:x}".format(code, context.pc))
self.SINGLE_STEP_COUNT -= 1
if self.SINGLE_STEP_COUNT:
return self.single_step()
return EXCEPTION_CONTINUE_EXECUTION
class RewriteBreakpoint(windows.debug.HXBreakpoint):
def trigger(self, dbg, exc):
context = dbg.get_exception_context()
print("GOT AN HXBP at 0x{0:x}".format(context.pc))
# Rewrite the infinite loop with 2 nop
windows.current_process.write_memory(self.addr, "\x90\x90")
# Ask for a single stepping
return dbg.single_step()
d = SingleSteppingDebugger()
# Infinite loop + nop + ret
code = x86.assemble("label :begin; jmp :begin; nop; ret")
func = windows.native_exec.create_function(code, [PVOID])
print("Code addr = 0x{0:x}".format(func.code_addr))
# Create a thread that will infinite loop
t = windows.current_process.create_thread(func.code_addr, 0)
# Add a breakpoint on the infitine loop
d.add_bp(RewriteBreakpoint(func.code_addr))
t.wait()
print("Done!")
|
StarcoderdataPython
|
3234757
|
<reponame>facebookresearch/worldsheet
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import skimage.io
import numpy as np
import torch
from torch import nn
import timm.models as models
from mmf.neural_rendering.novel_view_projector import NovelViewProjector
from mmf.neural_rendering.inpainting.models import MeshRGBGenerator
from mmf.neural_rendering.inpainting.gan_loss import MeshGANLosses
from mmf.neural_rendering.losses import (
ImageL1Loss, DepthL1Loss, MeshLaplacianLoss, GridOffsetLoss, ZGridL1Loss,
VGG19PerceptualLoss
)
from mmf.neural_rendering.metrics.metrics import Metrics
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.utils.distributed import get_world_size, byte_tensor_to_object
logger = logging.getLogger(__name__)
@registry.register_model("mesh_renderer")
class MeshRenderer(BaseModel):
def __init__(self, config):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/mesh_renderer/defaults.yaml"
def build(self):
self.batch_size = self.config.batch_size // get_world_size()
self.image_size_H = self.config.image_size_H
self.image_size_W = self.config.image_size_W
if self.image_size_H != self.image_size_W:
raise NotImplementedError()
self.grid_stride = self.config.grid_stride
if (self.image_size_H % self.grid_stride != 0
or self.image_size_W % self.grid_stride != 0):
raise Exception("image sizes must be divisible by grid_stride")
self.grid_H = self.image_size_H // self.grid_stride + 1
self.grid_W = self.image_size_W // self.grid_stride + 1
# Offset and depth prediction
self.offset_and_depth_predictor = OffsetAndZGridPredictor(
grid_stride=self.grid_stride,
grid_H=self.grid_H,
grid_W=self.grid_W,
z_min=self.config.z_min,
z_max=self.config.z_max,
pred_inv_z=self.config.pred_inv_z,
pred_inv_z_synsin=self.config.pred_inv_z_synsin,
z_pred_scaling=self.config.z_pred_scaling,
z_pred_offset=self.config.z_pred_offset,
backbone_name=self.config.backbone_name,
backbone_dim=self.config.backbone_dim
)
if self.config.freeze_offset_and_depth_predictor:
assert self.config.use_inpainting, \
"freeze_offset_and_depth_predictor is intended for inpainter " \
"training alone."
for p in self.offset_and_depth_predictor.parameters():
p.requires_grad = False
self.novel_view_projector = NovelViewProjector(
batch_size=self.batch_size,
image_size_H=self.image_size_H,
image_size_W=self.image_size_W,
grid_H=self.grid_H,
grid_W=self.grid_W,
blur_radius=self.config.rendering.blur_radius,
faces_per_pixel=self.config.rendering.faces_per_pixel,
clip_barycentric_coords=self.config.rendering.clip_barycentric_coords,
sigma=self.config.rendering.sigma,
gamma=self.config.rendering.gamma,
background_color=self.config.rendering.background_color,
hfov=self.config.rendering.hfov,
z_background=self.config.rendering.z_background,
gblur_kernel_size=self.config.rendering.gblur_kernel_size,
gblur_sigma=self.config.rendering.gblur_sigma,
gblur_weight_thresh=self.config.rendering.gblur_weight_thresh,
directly_use_img_as_texture=self.config.directly_use_img_as_texture
)
self.use_discriminator = False
if self.config.use_inpainting:
self.inpainting_net_G = MeshRGBGenerator(self.config.inpainting.net_G)
if self.config.inpainting.use_discriminator:
self.mesh_gan_losses = MeshGANLosses(self.config.inpainting.net_D)
self.use_discriminator = True
self.build_losses()
self.build_metrics()
if self.config.save_forward_results:
os.makedirs(self.config.forward_results_dir, exist_ok=True)
if self.config.save_for_external_inpainting:
# save PNG image files for external inpainting training and eval
self.inpainting_src_dir = os.path.join(
self.config.forward_results_dir, 'inpainting_src'
)
self.inpainting_tgt_dir = os.path.join(
self.config.forward_results_dir, 'inpainting_tgt'
)
os.makedirs(self.inpainting_src_dir, exist_ok=True)
os.makedirs(self.inpainting_tgt_dir, exist_ok=True)
def build_losses(self):
self.loss_image_l1 = ImageL1Loss()
self.loss_depth_l1 = DepthL1Loss()
self.loss_mesh_laplacian = MeshLaplacianLoss(
self.grid_H * self.grid_W, self.novel_view_projector.faces,
use_l2_loss=self.config.mesh_laplacian_use_l2_loss
)
self.loss_grid_offset = GridOffsetLoss(self.grid_H, self.grid_W)
self.loss_z_grid_l1 = ZGridL1Loss(self.grid_H, self.grid_W)
self.loss_vgg19_perceptual = VGG19PerceptualLoss()
self.loss_weights = self.config.loss_weights
def build_metrics(self):
self.metrics = Metrics(self.config.metrics)
def get_optimizer_parameters(self, config):
# named_parameters contains ALL parameters, including those in discriminator
named_parameters = [(n, p) for n, p in self.named_parameters()]
param_groups = []
registered = set()
# 1. backbone for ResNet-50
backbone_params = list(self.offset_and_depth_predictor.backbone.parameters())
param_groups.append({"params": backbone_params, "lr": self.config.backbone_lr})
registered.update(backbone_params)
# 2. inpainting generator
if self.config.use_inpainting:
generator_params = list(self.inpainting_net_G.parameters())
param_groups.append({
"params": generator_params,
"lr": self.config.inpainting.net_G.optimizer.lr,
"betas": (
self.config.inpainting.net_G.optimizer.beta1,
self.config.inpainting.net_G.optimizer.beta2
),
"weight_decay": self.config.inpainting.net_G.optimizer.weight_decay,
})
registered.update(generator_params)
# 3. inpainting discriminator
if self.use_discriminator:
discriminator_params = list(self.mesh_gan_losses.parameters())
param_groups.append({
"params": discriminator_params,
"lr": self.config.inpainting.net_D.optimizer.lr,
"betas": (
self.config.inpainting.net_D.optimizer.beta1,
self.config.inpainting.net_D.optimizer.beta2
),
"weight_decay": self.config.inpainting.net_D.optimizer.weight_decay,
})
registered.update(discriminator_params)
# All remaining parameters
remaining_params = [
p for _, p in named_parameters if p not in registered
]
param_groups.insert(0, {"params": remaining_params})
return param_groups
def get_offset_and_depth_from_gt(self, sample_list):
batch_size = sample_list.trans_img_0.size(0)
device = sample_list.trans_img_0.device
xy_offset = torch.zeros(batch_size, self.grid_H * self.grid_W, 2, device=device)
sampling_grid = torch.cat(
[torch.linspace(1, -1, self.grid_W).view(1, self.grid_W, 1).expand(self.grid_H, -1, 1), # NoQA
torch.linspace(1, -1, self.grid_H).view(self.grid_H, 1, 1).expand(-1, self.grid_W, 1)], # NoQA
dim=-1
).unsqueeze(0).expand(batch_size, -1, -1, -1).to(device)
# sample ground-truth z-grid from ground-truth depth
z_grid = nn.functional.grid_sample(
sample_list.depth_0.unsqueeze(1), sampling_grid, padding_mode="border",
align_corners=True
).view(batch_size, self.grid_H * self.grid_W, 1)
return xy_offset, z_grid
def forward(self, sample_list):
if not self.config.fill_z_with_gt:
# use the transformed image (after mean subtraction and normalization) as
# network input
xy_offset, z_grid = self.offset_and_depth_predictor(sample_list.trans_img_0)
else:
xy_offset, z_grid = self.get_offset_and_depth_from_gt(sample_list)
if self.config.force_zero_xy_offset:
xy_offset = torch.zeros_like(xy_offset)
rendering_results = {}
if not self.config.train_z_grid_only:
# use the original image (RGB value in 0~1) as rendering input
rendering_results = self.novel_view_projector(
xy_offset=xy_offset,
z_grid=z_grid,
rgb_in=sample_list.orig_img_0,
R_in=sample_list.R_0,
T_in=sample_list.T_0,
R_out_list=[sample_list.R_0, sample_list.R_1],
T_out_list=[sample_list.T_0, sample_list.T_1],
render_mesh_shape=self.config.render_mesh_shape_for_vis,
)
if self.config.use_inpainting:
_, rgba_1_rec = rendering_results["rgba_out_rec_list"]
if self.config.sanity_check_inpaint_with_gt:
# as a sanity check, use the ground-truth image as input to make sure
# the generator has enough capacity to perfectly reconstruct it.
rgba_1_rec = torch.ones_like(rgba_1_rec)
rgba_1_rec[..., :3] = sample_list.orig_img_1
rgb_1_inpaint = self.inpainting_net_G(rgba_1_rec)
if self.config.inpainting.inpaint_missing_regions_only:
alpha_mask = rgba_1_rec[..., -1].unsqueeze(-1).ge(1e-4).float()
rgb_1_inpaint = rgb_1_inpaint * (1 - alpha_mask)
rgb_1_inpaint = rgb_1_inpaint + rgba_1_rec[..., :3] * alpha_mask
rendering_results["rgb_1_inpaint"] = rgb_1_inpaint
rendering_results["rgb_1_out"] = rendering_results["rgb_1_inpaint"]
else:
_, rgba_1_rec = rendering_results["rgba_out_rec_list"]
rendering_results["rgb_1_out"] = rgba_1_rec[..., :3]
# return only the rendering results and skip loss computation, usually for
# visualization on-the-fly by calling this model separately (instead of running
# it within the MMF trainer on MMF datasets)
if self.config.return_rendering_results_only:
return rendering_results
losses = self.forward_losses(sample_list, xy_offset, z_grid, rendering_results)
# compute metrics
if not self.training or not self.config.metrics.only_on_eval:
metrics_dict = self.forward_metrics(sample_list, rendering_results)
rendering_results.update(metrics_dict)
# average over batch, and do not compute gradient over metrics
losses.update({
f"{sample_list.dataset_type}/{sample_list.dataset_name}/no_grad_{k}":
v.detach().mean()
for k, v in metrics_dict.items()
})
if self.config.save_forward_results:
self.save_forward_results(sample_list, xy_offset, z_grid, rendering_results)
return {"losses": losses}
def save_forward_results(self, sample_list, xy_offset, z_grid, rendering_results):
texture_image_rec = rendering_results["texture_image_rec"]
rgba_0_rec, rgba_1_rec = rendering_results["rgba_out_rec_list"]
rgba_0_rec = rgba_0_rec.clamp(min=0, max=1)
rgba_1_rec = rgba_1_rec.clamp(min=0, max=1)
depth_0_rec, depth_1_rec = rendering_results["depth_out_rec_list"]
if self.config.render_mesh_shape_for_vis:
mesh_shape_0, mesh_shape_1 = rendering_results["mesh_shape_out_list"]
mesh_verts_world_coords = rendering_results["mesh_verts_world_coords"]
for n_im in range(xy_offset.size(0)):
image_id = byte_tensor_to_object(sample_list.image_id[n_im])
if self.config.save_for_realestate10k_eval:
# save for RealEstate10K evaluation, to be used by
# https://github.com/facebookresearch/synsin/blob/master/evaluation/evaluate_perceptualsim.py
# see https://github.com/facebookresearch/synsin/blob/master/REALESTATE.md for details
save_sub_dir = os.path.join(
self.config.forward_results_dir, image_id.split("_")[0]
)
os.makedirs(save_sub_dir, exist_ok=True)
im_output = rendering_results["rgb_1_out"][n_im].clamp(min=0, max=1)
im_input = sample_list.orig_img_0[n_im].clamp(min=0, max=1)
im_tgt = sample_list.orig_img_1[n_im].clamp(min=0, max=1)
im_output = skimage.img_as_ubyte(im_output.detach().cpu().numpy())
im_input = skimage.img_as_ubyte(im_input.detach().cpu().numpy())
im_tgt = skimage.img_as_ubyte(im_tgt.detach().cpu().numpy())
skimage.io.imsave(save_sub_dir + "/output_image_.png", im_output)
skimage.io.imsave(save_sub_dir + "/input_image_.png", im_input)
skimage.io.imsave(save_sub_dir + "/tgt_image_.png", im_tgt)
continue
if self.config.save_for_external_inpainting:
# save PNG image files for external inpainting training and eval
im_src = rgba_1_rec[n_im].clamp(min=0, max=1)
im_src_alpha_mask = im_src[..., 3:4].ge(1e-4).float()
# save source image into RGBA PNG file, where the last channel is
# the visibility alpha_mask
im_src = torch.cat([im_src[..., :3], im_src_alpha_mask], dim=-1)
im_tgt = sample_list.orig_img_1[n_im].clamp(min=0, max=1)
base_id = image_id.split("_")[0]
im_src = skimage.img_as_ubyte(im_src.detach().cpu().numpy())
im_tgt = skimage.img_as_ubyte(im_tgt.detach().cpu().numpy())
skimage.io.imsave(self.inpainting_src_dir + f"/{base_id}.png", im_src)
skimage.io.imsave(self.inpainting_tgt_dir + f"/{base_id}.png", im_tgt)
continue
save_file = os.path.join(
self.config.forward_results_dir,
'{}_outputs.npz'.format(image_id.replace("/", "-"))
)
save_dict = {
"orig_img_0": sample_list.orig_img_0[n_im],
"orig_img_1": sample_list.orig_img_1[n_im],
"xy_offset": xy_offset[n_im],
"z_grid": z_grid[n_im],
"texture_image_rec": texture_image_rec[n_im],
"rgba_0_rec": rgba_0_rec[n_im],
"rgba_1_rec": rgba_1_rec[n_im],
"depth_0_rec": depth_0_rec[n_im],
"depth_1_rec": depth_1_rec[n_im],
}
if self.config.render_mesh_shape_for_vis:
save_dict.update({
"mesh_shape_0": mesh_shape_0[n_im],
"mesh_shape_1": mesh_shape_1[n_im],
"mesh_verts_world_coords": mesh_verts_world_coords[n_im],
})
if sample_list.dataset_name in ["synsin_habitat", "replica"]:
save_dict.update({
"depth_0": sample_list.depth_0[n_im],
"depth_1": sample_list.depth_1[n_im],
"depth_mask_0": sample_list.depth_mask_0[n_im],
"depth_mask_1": sample_list.depth_mask_1[n_im],
})
if self.config.use_inpainting:
rgb_1_inpaint = rendering_results["rgb_1_inpaint"]
rgb_1_inpaint = rgb_1_inpaint.clamp(min=0, max=1)
save_dict.update({"rgb_1_inpaint": rgb_1_inpaint[n_im]})
save_dict = {k: v.detach().cpu().numpy() for k, v in save_dict.items()}
np.savez(save_file, **save_dict)
def forward_losses(self, sample_list, xy_offset, z_grid, rendering_results):
z_grid_l1_0 = None
if self.loss_weights["z_grid_l1_0"] != 0:
z_grid_l1_0 = self.loss_z_grid_l1(
z_grid_pred=z_grid, depth_gt=sample_list.depth_0,
depth_loss_mask=sample_list.depth_mask_0.float()
)
losses_unscaled = {
"z_grid_l1_0": z_grid_l1_0,
"grid_offset": self.loss_grid_offset(xy_offset),
}
use_vgg19_loss = self.training or not self.config.vgg19_loss_only_on_train
if not self.config.train_z_grid_only:
rgba_0_rec, rgba_1_rec = rendering_results["rgba_out_rec_list"]
depth_0_rec, depth_1_rec = rendering_results["depth_out_rec_list"]
scaled_verts = rendering_results["scaled_verts"]
rgb_1_rec = rgba_1_rec[..., :3]
depth_l1_0 = None
if self.loss_weights["depth_l1_0"] != 0:
depth_l1_0 = self.loss_depth_l1(
depth_pred=depth_0_rec, depth_gt=sample_list.depth_0,
loss_mask=sample_list.depth_mask_0.float()
)
depth_l1_1 = None
if self.loss_weights["depth_l1_1"] != 0:
depth_l1_1 = self.loss_depth_l1(
depth_pred=depth_1_rec, depth_gt=sample_list.depth_1,
loss_mask=sample_list.depth_mask_1.float()
)
image_l1_1 = self.loss_image_l1(
rgb_pred=rgb_1_rec, rgb_gt=sample_list.orig_img_1,
loss_mask=sample_list.depth_mask_1.float()
)
if use_vgg19_loss and self.loss_weights["vgg19_perceptual_1"] != 0:
vgg19_perceptual_1 = self.loss_vgg19_perceptual(
rgb_pred=rgb_1_rec, rgb_gt=sample_list.orig_img_1,
loss_mask=sample_list.depth_mask_1.float()
)
else:
vgg19_perceptual_1 = torch.tensor(0., device=rgb_1_rec.device)
losses_unscaled.update({
"depth_l1_0": depth_l1_0,
"depth_l1_1": depth_l1_1,
"image_l1_1": image_l1_1,
"vgg19_perceptual_1": vgg19_perceptual_1,
"mesh_laplacian": self.loss_mesh_laplacian(scaled_verts),
})
if self.config.use_inpainting:
rgb_1_inpaint = rendering_results["rgb_1_inpaint"]
image_l1_1_inpaint = self.loss_image_l1(
rgb_pred=rgb_1_inpaint, rgb_gt=sample_list.orig_img_1,
)
if use_vgg19_loss and self.loss_weights["vgg19_perceptual_1_inpaint"] != 0:
vgg19_perceptual_1_inpaint = self.loss_vgg19_perceptual(
rgb_pred=rgb_1_inpaint, rgb_gt=sample_list.orig_img_1,
)
else:
vgg19_perceptual_1_inpaint = torch.tensor(0., device=rgb_1_rec.device)
losses_unscaled.update({
"image_l1_1_inpaint": image_l1_1_inpaint,
"vgg19_perceptual_1_inpaint": vgg19_perceptual_1_inpaint,
})
if self.use_discriminator:
g_losses = self.mesh_gan_losses(
fake_img=rgb_1_inpaint, real_img=sample_list.orig_img_1,
alpha_mask=rgba_1_rec[..., 3:4].ge(1e-4).float(),
update_discriminator=self.training
)
losses_unscaled.update(g_losses)
for k, v in losses_unscaled.items():
if (v is not None) and (not torch.all(torch.isfinite(v)).item()):
raise Exception("loss {} becomes {}".format(k, v.mean().item()))
losses = {
f"{sample_list.dataset_type}/{sample_list.dataset_name}/{k}":
(v * self.loss_weights[k])
for k, v in losses_unscaled.items() if self.loss_weights[k] != 0
}
return losses
def forward_metrics(self, sample_list, rendering_results):
rgb_1_out = rendering_results["rgb_1_out"]
rgb_1_gt = sample_list.orig_img_1
vis_mask = sample_list.vis_mask if hasattr(sample_list, "vis_mask") else None
metrics_dict = self.metrics(rgb_1_out, rgb_1_gt, vis_mask)
return metrics_dict
class OffsetAndZGridPredictor(nn.Module):
def __init__(
self, grid_stride, grid_H, grid_W, z_min, z_max, pred_inv_z, pred_inv_z_synsin,
z_pred_scaling, z_pred_offset, backbone_name, backbone_dim
):
super().__init__()
assert grid_stride % 2 == 0
self.grid_stride = grid_stride
self.pad = grid_stride // 2
self.backbone_dim = backbone_dim
self.grid_H = grid_H
self.grid_W = grid_W
self.z_min = z_min
self.z_max = z_max
self.pred_inv_z = pred_inv_z
self.pred_inv_z_synsin = pred_inv_z_synsin
self.z_pred_scaling = z_pred_scaling
self.z_pred_offset = z_pred_offset
network = getattr(models, backbone_name)
# 1) the minimum output stride for resnet is 8 pixels
# if we want lower output stride, add ConvTranspose2d at the end
# 2) the maximum output stride for resnet is 32 pixels
# if we want higher output stride, use downsample_stride in output conv
resnet_grid_stride = min(max(grid_stride, 8), 32)
self.backbone = network(pretrained=True, output_stride=resnet_grid_stride)
assert (
resnet_grid_stride % grid_stride == 0 or
grid_stride % resnet_grid_stride == 0
)
upsample_stride = resnet_grid_stride // grid_stride
downsample_stride = grid_stride // resnet_grid_stride
if upsample_stride <= 1:
assert downsample_stride >= 1
self.slice_b = 0
self.slice_e = 0
# downsample by downsample_stride (usually 1) in the final prediction layer
self.xy_offset_predictor = nn.Conv2d(
backbone_dim, 2, kernel_size=1, stride=downsample_stride
)
self.z_grid_predictor = nn.Conv2d(
backbone_dim, 1, kernel_size=1, stride=downsample_stride
)
else:
self.slice_b = upsample_stride // 2
self.slice_e = upsample_stride - 1 - self.slice_b
# upsample in the final prediction layer
self.xy_offset_predictor = nn.ConvTranspose2d(
backbone_dim, 2, kernel_size=upsample_stride, stride=upsample_stride
)
self.z_grid_predictor = nn.ConvTranspose2d(
backbone_dim, 1, kernel_size=upsample_stride, stride=upsample_stride
)
# allow the vertices to move at most half grid length
# the relative image width and height are 2 (i.e. -1 to 1)
# so the grid length x is 2. / (self.grid_W - 1), and similary for y
xy_offset_scale = torch.tensor(
[1. / (self.grid_W - 1), 1. / (self.grid_H - 1)], dtype=torch.float32
)
self.register_buffer("xy_offset_scale", xy_offset_scale)
def forward(self, img):
assert img.size(1) == 3, 'The input image must be in NCHW format.'
batch_size = img.size(0)
img_pad = nn.functional.pad(
img, (self.pad, self.pad, self.pad, self.pad), mode='replicate'
)
features = self.backbone.forward_features(img_pad)
# predict in NCHW and permute NCHW -> NHWC
xy_offset = self.xy_offset_predictor(features).permute(0, 2, 3, 1)
z_grid = self.z_grid_predictor(features).permute(0, 2, 3, 1)
# strip boundaries
xy_offset = slice_output(xy_offset, self.slice_b, self.slice_e)
z_grid = slice_output(z_grid, self.slice_b, self.slice_e)
# flip the coordinate axis directions from input image to PyTorch3D screen
# - input image: x - right, y: down
# - PyTorch3D screen: x - left, y: up, see https://pytorch3d.org/docs/renderer_getting_started
# so we flip both horizontally and vertically
xy_offset = xy_offset.flip([1, 2])
z_grid = z_grid.flip([1, 2])
xy_offset = torch.tanh(xy_offset)
xy_offset = xy_offset * self.xy_offset_scale
# convert z prediction to the range of (z_min, z_max)
assert z_grid.size(-1) == 1
if self.pred_inv_z_synsin:
z_grid = torch.sigmoid(z_grid - 2.8)
z_grid = 1. / (z_grid * 10 + 0.01) - 0.1
z_grid = z_grid * self.z_pred_scaling + self.z_pred_offset
z_grid = torch.clamp(z_grid, min=self.z_min, max=self.z_max)
elif self.pred_inv_z:
z_grid = torch.sigmoid(z_grid)
z_grid = 1. / (z_grid * 0.75 + 0.01) - 1
z_grid = z_grid * self.z_pred_scaling + self.z_pred_offset
z_grid = torch.clamp(z_grid, min=self.z_min, max=self.z_max)
else:
z_grid = torch.sigmoid(z_grid)
z_grid = self.z_min + z_grid * (self.z_max - self.z_min)
# flatten the prediction to match the mesh vertices
xy_offset = xy_offset.view(batch_size, self.grid_H * self.grid_W, 2)
z_grid = z_grid.view(batch_size, self.grid_H * self.grid_W, 1)
assert torch.all(torch.isfinite(xy_offset)).item()
assert torch.all(torch.isfinite(z_grid)).item()
return xy_offset, z_grid
def slice_output(nhwc_tensor, slice_b, slice_e):
if slice_b == 0 and slice_e == 0:
return nhwc_tensor
b = slice_b
e = -slice_e if slice_e > 0 else None
return nhwc_tensor[:, b:e, b:e]
|
StarcoderdataPython
|
4828272
|
# -*- coding: utf-8 -*-
"""
==============================================================================
@author: <NAME>
@date: Thu May 13 09:50:26 2021
@reference: Ojala, A Comparative Study of Texture Measures with Classification on Feature Distributions
Ojala, Gray Scale and Roation Invariaant Texture Classification with Local Binary Patterns
==============================================================================
"""
import numpy as np
from skimage import feature
def _energy(x):
return np.multiply(x,x).sum()
def _entropy(x):
return -np.multiply(x, np.log(x+1e-16)).sum()
def lbp_features(f, mask, P=[8,16,24], R=[1,2,3]):
'''
Parameters
----------
f : numpy ndarray
Image of dimensions N1 x N2.
mask : numpy ndarray
Mask image N1 x N2 with 1 if pixels belongs to ROI, 0 else. Give None
if you want to consider ROI the whole image.
P : list, optional
Number of points in neighborhood. The default is [8,16,24].
R : list, optional
Radius/Radii. The default is [1,2,3].
Returns
-------
features : numpy ndarray
Energy and entropy of LBP image (2 x 1).
labels : list
Labels of features.
'''
if mask is None:
mask = np.ones(f.shape)
P = np.array(P)
R = np.array(R)
n = P.shape[0]
mask_ravel = mask.ravel()
features = []
labels = []
for i in range(n):
lbp = feature.local_binary_pattern(f, P[i], R[i], 'uniform')
lbp_ravel = lbp.ravel()
roi = lbp_ravel[mask_ravel.astype(bool)]
feats = np.zeros(2, np.double)
feats[0] = _energy(roi) / roi.sum()
feats[1] = _entropy(roi) / roi.sum()
features.append(feats)
labels.append('LBP_R_'+str(R[i])+'_P_'+str(P[i])+'_energy')
labels.append('LBP_R_'+str(R[i])+'_P_'+str(P[i])+'_entropy')
features = np.array(features, np.double).ravel()
return features, labels
|
StarcoderdataPython
|
1714969
|
<gh_stars>1-10
"""
Import as:
import helpers.hgit as hgit
"""
import collections
import functools
import logging
import os
import pprint
import re
from typing import Any, Dict, List, Match, Optional, Tuple
import helpers.hdbg as hdbg
import helpers.hio as hio
import helpers.hprint as hprint
import helpers.hsystem as hsysinte
_LOG = logging.getLogger(__name__)
# We refer to "Git" when we talk about the control system (e.g., "in a Git
# repository") and `git` when we refer to implementation of Git as a program
# installed in a computer.
# TODO(gp): Check
# https://git-scm.com/book/en/v2/Appendix-B%3A-Embedding-Git-in-your-Applications-Dulwich
# TODO(gp): Avoid "stuttering": the module is already called "git", so no need
# to make reference to git again.
# TODO(gp): Add mem caching to some functions below. We assume that one doesn't
# change dir (which is a horrible idea) and thus we can memoize.
# TODO(gp): Spell super_module and sub_module always in the same way in both
# comments and code. For simplicity (e.g., instead of `super_module` in code and
# `super-module` in comment) we might want to spell `supermodule` everywhere.
# #############################################################################
# Git branch functions
# #############################################################################
@functools.lru_cache()
def get_branch_name(dir_name: str = ".") -> str:
"""
Return the name of the Git branch including a certain dir.
E.g., `master` or `AmpTask672_Add_script_to_check_and_merge_PR`
"""
hdbg.dassert_exists(dir_name)
# > git rev-parse --abbrev-ref HEAD
# master
cmd = "cd %s && git rev-parse --abbrev-ref HEAD" % dir_name
data: Tuple[int, str] = hsysinte.system_to_one_line(cmd)
_, output = data
return output
def get_branch_next_name(dir_name: str = ".") -> str:
"""
Return a name derived from the branch so that the branch doesn't exist.
E.g., `AmpTask1903_Implemented_system_Portfolio` ->
`AmpTask1903_Implemented_system_Portfolio_3`
"""
curr_branch_name = get_branch_name(dir_name=dir_name)
hdbg.dassert_ne(curr_branch_name, "master")
_LOG.debug("curr_branch_name=%s", curr_branch_name)
#
for i in range(1, 10):
new_branch_name = f"{curr_branch_name}_{i}"
exists = does_branch_exist(new_branch_name, dir_name=dir_name)
_LOG.debug("'%s' -> exists=%s", new_branch_name, exists)
if not exists:
return new_branch_name
raise ValueError(f"Can't find the next branch name for '{curr_branch_name}'")
def get_branch_hash(dir_name: str = ".") -> str:
"""
Return the hash of the commit right before the branch in `dir_name` was
created.
"""
curr_branch_name = get_branch_name(dir_name=dir_name)
hdbg.dassert_ne(curr_branch_name, "master")
_LOG.debug("curr_branch_name=%s", curr_branch_name)
#
cmd = f"cd {dir_name} && git merge-base master {curr_branch_name}"
_, hash_ = hsysinte.system_to_string(cmd)
hash_ = hash_.rstrip("\n").lstrip("\n")
hdbg.dassert_eq(len(hash_.split("\n")), 1)
return hash_
# #############################################################################
# Git submodule functions
# #############################################################################
@functools.lru_cache()
def get_client_root(super_module: bool) -> str:
"""
Return the full path of the root of the Git client.
E.g., `/Users/saggese/src/.../amp`.
:param super_module: if True use the root of the Git super_module,
if we are in a submodule. Otherwise use the Git sub_module root
"""
if super_module and is_inside_submodule():
# https://stackoverflow.com/questions/957928
# > cd /Users/saggese/src/.../amp
# > git rev-parse --show-superproject-working-tree
# /Users/saggese/src/...
cmd = "git rev-parse --show-superproject-working-tree"
else:
# > git rev-parse --show-toplevel
# /Users/saggese/src/.../amp
cmd = "git rev-parse --show-toplevel"
# TODO(gp): Use system_to_one_line().
_, out = hsysinte.system_to_string(cmd)
out = out.rstrip("\n")
hdbg.dassert_eq(len(out.split("\n")), 1, msg="Invalid out='%s'" % out)
client_root: str = os.path.realpath(out)
return client_root
def get_project_dirname(only_index: bool = False) -> str:
"""
Return the name of the project name (e.g., `/Users/saggese/src/amp1` ->
`amp1`).
NOTE: this works properly only outside Docker, e.g., when calling from `invoke`.
Inside Docker the result might be incorrect since the Git client is mapped on
`/app`.
:param only_index: return only the index of the client if possible, e.g.,
E.g., for `/Users/saggese/src/amp1` it returns the string `1`
"""
git_dir = get_client_root(super_module=True)
_LOG.debug("git_dir=%s", git_dir)
ret = os.path.basename(git_dir)
if only_index:
last_char = ret[-1]
hdbg.dassert(
last_char.isdigit(),
"The last char `%s` of the git dir `%s` is not a digit",
last_char,
git_dir,
)
ret = last_char
_LOG.debug("ret=%s", ret)
return ret
@functools.lru_cache()
def is_inside_submodule(git_dir: str = ".") -> bool:
"""
Return whether a dir is inside a Git submodule or a Git supermodule.
We determine this checking if the current Git repo is included
inside another Git repo.
"""
cmd = []
# - Find the git root of the current directory
# - Check if the dir one level up is a valid Git repo
# Go to the dir.
cmd.append("cd %s" % git_dir)
# > cd im/
# > git rev-parse --show-toplevel
# /Users/saggese/src/.../amp
cmd.append('cd "$(git rev-parse --show-toplevel)/.."')
# > git rev-parse --is-inside-work-tree
# true
cmd.append("(git rev-parse --is-inside-work-tree | grep -q true)")
cmd_as_str = " && ".join(cmd)
rc = hsysinte.system(cmd_as_str, abort_on_error=False)
ret: bool = rc == 0
return ret
def _is_repo(repo_short_name: str) -> bool:
"""
Return whether we are inside `amp` and `amp` is a submodule.
"""
repo_full_name = get_repo_full_name_from_dirname(".", include_host_name=False)
return get_repo_name(repo_full_name, in_mode="full_name") == repo_short_name
def is_amp() -> bool:
"""
Return whether we are inside `amp` repo.
Either as super module, or a sub module depending on a current
working directory.
"""
return _is_repo("amp") or _is_repo("cmamp")
# TODO(gp): Be consistent with submodule and sub-module in the code. Same for
# supermodule.
def is_in_amp_as_submodule() -> bool:
"""
Return whether we are in the `amp` repo and it's a sub-module, e.g., of
`lm`.
"""
return is_amp() and is_inside_submodule(".")
def is_in_amp_as_supermodule() -> bool:
"""
Return whether we are in the `amp` repo and it's a super-module, i.e.,
`amp` by itself.
"""
return is_amp() and not is_inside_submodule(".")
# Using these functions is the last resort to skip / change the tests depending
# on the repo. We should control the tests through what functionalities they have,
# e.g.,
# ```
# hgit.execute_repo_config_code("has_dind_support()"),
# ```
#
# rather than their name.
def is_dev_tools() -> bool:
"""
Return whether we are inside `dev_tools` repo.
"""
return _is_repo("dev_tools")
def is_cmamp() -> bool:
"""
Return whether we are inside `cmamp` repo.
"""
return _is_repo("cmamp")
def is_lem() -> bool:
"""
Return whether we are inside `lem` repo.
"""
return _is_repo("lem")
def is_lime() -> bool:
"""
Return whether we are inside `lime` repo.
"""
return _is_repo("lime")
# #############################################################################
def _get_submodule_hash(dir_name: str) -> str:
"""
Report the Git hash that a submodule (e.g., amp) is at from the point of
view of a supermodule.
> git ls-tree master | grep <dir_name>
"""
hdbg.dassert_exists(dir_name)
cmd = "git ls-tree master | grep %s" % dir_name
data: Tuple[int, str] = hsysinte.system_to_one_line(cmd)
_, output = data
# 160000 commit 0011776388b4c0582161eb2749b665fc45b87e7e amp
_LOG.debug("output=%s", output)
data: List[str] = output.split()
_LOG.debug("data=%s", data)
git_hash = data[2]
return git_hash
@functools.lru_cache()
def get_path_from_supermodule() -> Tuple[str, str]:
"""
Return the path to the Git repo including the Git submodule for a
submodule, and return empty for a supermodule. See AmpTask1017.
E.g.,
- for amp included in another repo returns 'amp'
- for amp without supermodule returns ''
"""
cmd = "git rev-parse --show-superproject-working-tree"
# > cd /Users/saggese/src/.../lm/amp
# > git rev-parse --show-superproject-working-tree
# /Users/saggese/src/.../lm
#
# > cd /Users/saggese/src/.../lm
# > git rev-parse --show-superproject-working-tree
# (No result)
superproject_path: str = hsysinte.system_to_one_line(cmd)[1]
_LOG.debug("superproject_path='%s'", superproject_path)
#
cmd = (
f"git config --file {superproject_path}/.gitmodules --get-regexp path"
'| grep $(basename "$(pwd)")'
"| awk '{ print $2 }'"
)
# > git config --file /Users/saggese/src/.../.gitmodules --get-regexp path
# submodule.amp.path amp
submodule_path: str = hsysinte.system_to_one_line(cmd)[1]
_LOG.debug("submodule_path='%s'", submodule_path)
return superproject_path, submodule_path
@functools.lru_cache()
def get_submodule_paths() -> List[str]:
"""
Return the path of the submodules in this repo, e.g., `["amp"]` or `[]`.
"""
# > git config --file .gitmodules --get-regexp path
# submodule.amp.path amp
cmd = "git config --file .gitmodules --get-regexp path | awk '{ print $2 }'"
_, txt = hsysinte.system_to_string(cmd)
_LOG.debug("txt=%s", txt)
files: List[str] = hsysinte.text_to_list(txt)
_LOG.debug("files=%s", files)
return files
def has_submodules() -> bool:
return len(get_submodule_paths()) > 0
# #############################################################################
def _get_hash(git_hash: str, short_hash: bool, num_digits: int = 8) -> str:
hdbg.dassert_lte(1, num_digits)
if short_hash:
ret = git_hash[:num_digits]
else:
ret = git_hash
return ret
def _group_hashes(head_hash: str, remh_hash: str, subm_hash: str) -> str:
"""
head_hash: a
remh_hash: b
subm_hash: c
"""
map_ = collections.OrderedDict()
map_["head_hash"] = head_hash
map_["remh_hash"] = remh_hash
if subm_hash:
map_["subm_hash"] = subm_hash
#
inv_map = collections.OrderedDict()
for k, v in map_.items():
if v not in inv_map:
inv_map[v] = [k]
else:
inv_map[v].append(k)
#
txt = []
for k, v in inv_map.items():
# Transform:
# ('a2bfc704', ['head_hash', 'remh_hash'])
# into
# 'head_hash = remh_hash = a2bfc704'
txt.append("%s = %s" % (" = ".join(v), k))
txt = "\n".join(txt)
return txt
def report_submodule_status(dir_names: List[str], short_hash: bool) -> str:
"""
Return a string representing the status of the repos in `dir_names`.
"""
txt = []
for dir_name in dir_names:
txt.append("dir_name='%s'" % dir_name)
txt.append(" is_inside_submodule: %s" % is_inside_submodule(dir_name))
#
branch_name = get_branch_name(dir_name)
if branch_name != "master":
branch_name = "!!! %s !!!" % branch_name
txt.append(" branch: %s" % branch_name)
#
head_hash = get_head_hash(dir_name)
head_hash = _get_hash(head_hash, short_hash)
txt.append(" head_hash: %s" % head_hash)
#
remh_hash = get_remote_head_hash(dir_name)
remh_hash = _get_hash(remh_hash, short_hash)
txt.append(" remh_hash: %s" % remh_hash)
#
if dir_name != ".":
subm_hash = _get_submodule_hash(dir_name)
subm_hash = _get_hash(subm_hash, short_hash)
txt.append(" subm_hash: %s" % subm_hash)
txt_as_str = "\n".join(txt)
return txt_as_str
# #############################################################################
# GitHub repository name
# #############################################################################
# All functions should take as input `repo_short_name` and have a switch `mode`
# to distinguish full vs short repo name.
# TODO(gp): Maybe rename full -> long to keep it more symmetric "short vs long".
def _parse_github_repo_name(repo_name: str) -> Tuple[str, str]:
"""
Parse a repo name from `git remote`.
The supported formats are both SSH and HTTPS, e.g.,
- `[email protected]:alphamatic/amp`
- `https://github.com/alphamatic/amp`
For both of these strings the function returns ("github.com", "alphamatic/amp").
"""
# Try to parse the SSH format, e.g., `[email protected]:alphamatic/amp`
m = re.match(r"^git@(\S+.com):(\S+)$", repo_name)
if not m:
# Try tp parse the HTTPS format, e.g., `https://github.com/alphamatic/amp`
m = re.match(r"^https://(\S+.com)/(\S+)$", repo_name)
hdbg.dassert(m, "Can't parse '%s'", repo_name)
m: Match[str]
host_name = m.group(1)
repo_name = m.group(2)
_LOG.debug("host_name=%s repo_name=%s", host_name, repo_name)
# We expect something like "alphamatic/amp".
m = re.match(r"^\S+/\S+$", repo_name)
hdbg.dassert(m, "repo_name='%s'", repo_name)
# origin [email protected]:.../ORG_....git (fetch)
suffix_to_remove = ".git"
if repo_name.endswith(suffix_to_remove):
repo_name = repo_name[: -len(suffix_to_remove)]
return host_name, repo_name
def get_repo_full_name_from_dirname(
dir_name: str, include_host_name: bool
) -> str:
"""
Return the full name of the repo in `git_dir`, e.g., "alphamatic/amp".
This function relies on `git remote` to gather the required information.
:param include_hostname: prepend also the GitHub hostname, e.g., returning
"github.com/alphamatic/amp"
:return: the full name of the repo in `git_dir`, e.g., "alphamatic/amp".
"""
hdbg.dassert_exists(dir_name)
#
cmd = "cd %s; (git remote -v | grep origin | grep fetch)" % dir_name
_, output = hsysinte.system_to_string(cmd)
# > git remote -v
# origin <EMAIL>:alphamatic/amp (fetch)
# origin <EMAIL>:alphamatic/amp (push)
# TODO(gp): Make it more robust, by checking both fetch and push.
# "origin <EMAIL>:alphamatic/amp (fetch)"
data: List[str] = output.split()
_LOG.debug("data=%s", data)
hdbg.dassert_eq(len(data), 3, "data='%s'", str(data))
# Extract the middle string, e.g., "<EMAIL>:alphamatic/amp"
repo_name = data[1]
# Parse the string.
host_name, repo_name = _parse_github_repo_name(repo_name)
if include_host_name:
res = f"{host_name}/{repo_name}"
else:
res = repo_name
return res
def get_repo_full_name_from_client(super_module: bool) -> str:
"""
Return the full name of the repo (e.g., "alphamatic/amp") from a Git
client.
:param super_module: like in get_client_root()
"""
# Get the Git remote in the dir containing the Git repo.
git_dir = get_client_root(super_module)
repo_name = get_repo_full_name_from_dirname(git_dir, include_host_name=False)
return repo_name
# /////////////////////////////////////////////////////////////////////////
# Execute code from the `repo_config.py` in the super module.
def _get_repo_config_code(super_module: bool = True) -> str:
"""
Return the text of the code stored in `repo_config.py`.
"""
# TODO(gp): We should actually ask Git where the super-module is.
client_root = get_client_root(super_module)
file_name = os.path.join(client_root, "repo_config.py")
hdbg.dassert_file_exists(file_name)
code: str = hio.from_file(file_name)
return code
def execute_repo_config_code(code_to_execute: str) -> Any:
"""
Execute code in `repo_config.py`.
E.g.,
```
hgit.execute_repo_config_code("has_dind_support()")
```
"""
# Read the info from the current repo.
code = _get_repo_config_code()
# TODO(gp): make the linter happy creating this symbol that comes from the
# `exec()`.
exec(code, globals()) # pylint: disable=exec-used
ret = eval(code_to_execute)
return ret
# /////////////////////////////////////////////////////////////////////////
def _decorate_with_host_name(
dict_: Dict[str, str], host_name: str
) -> Dict[str, str]:
"""
Prepend the host name to all the values of the passed dictionary.
"""
res = {k: f"{host_name}/{v}" for k, v in dict_.items()}
return res
@functools.lru_cache()
def _get_repo_short_to_full_name(include_host_name: bool) -> Dict[str, str]:
"""
Return the map from short name (e.g., "amp") to full name (e.g.,
"alphamatic/amp") using the information in `repo_config.py`
"""
# From short name to long name.
repo_map = {
"amp": "alphamatic/amp",
"dev_tools": "alphamatic/dev_tools",
}
if include_host_name:
host_name = "github.com"
repo_map = _decorate_with_host_name(repo_map, host_name)
_LOG.debug(
"include_host_name=%s, repo_map=\n%s",
include_host_name,
pprint.pformat(repo_map),
)
# Read the info from the current repo.
code = _get_repo_config_code()
# TODO(gp): make the linter happy creating this symbol that comes from the
# `exec()`.
exec(code, globals()) # pylint: disable=exec-used
current_repo_map = (
get_repo_map() # type: ignore[name-defined] # noqa: F821 # pylint: disable=undefined-variable
)
if include_host_name:
host_name = (
get_host_name() # type: ignore[name-defined] # noqa: F821 # pylint: disable=undefined-variable
)
current_repo_map = _decorate_with_host_name(current_repo_map, host_name)
_LOG.debug(
"include_host_name=%s, current_repo_map=\n%s",
include_host_name,
pprint.pformat(current_repo_map),
)
# Update the map.
hdbg.dassert_not_intersection(repo_map.keys(), current_repo_map.keys())
repo_map.update(
get_repo_map() # type: ignore[name-defined] # noqa: F821 # pylint: disable=undefined-variable
)
hdbg.dassert_no_duplicates(repo_map.values())
_LOG.debug(
"include_host_name=%s, repo_map=\n%s",
include_host_name,
pprint.pformat(repo_map),
)
return repo_map
# /////////////////////////////////////////////////////////////////////////
def get_complete_repo_map(
in_mode: str, include_host_name: bool = False
) -> Dict[str, str]:
"""
Return the full / short name of a Git repo based on the alternative name.
:param in_mode: the values `full_name` or `short_name` determine how to interpret
`name`
"""
repo_map = _get_repo_short_to_full_name(include_host_name)
if in_mode == "full_name":
# Compute the reverse map.
repo_map = {v: k for (k, v) in repo_map.items()}
elif in_mode == "short_name":
pass
else:
raise ValueError("Invalid in_mode='%s'" % in_mode)
_LOG.debug(
"For in_mode=%s, include_host_name=%s, repo_map=\n%s",
in_mode,
include_host_name,
pprint.pformat(repo_map),
)
return repo_map
def get_repo_name(
name: str, in_mode: str, include_host_name: bool = False
) -> str:
"""
Return the full/short name of a Git repo based on the other name.
:param in_mode: the values `full_name` or `short_name` determine how to interpret
`name`
"""
repo_map = get_complete_repo_map(in_mode, include_host_name)
hdbg.dassert_in(
name, repo_map, "Invalid name='%s' for in_mode='%s'", name, in_mode
)
ret = repo_map[name]
return ret
def get_all_repo_names(
in_mode: str, include_host_name: bool = False
) -> List[str]:
"""
Return the names (full or short depending on `mode`) of all the Git repos.
:param in_mode: if "full_name" return the full names (e.g., "alphamatic/amp")
if "short_name" return the short names (e.g., "amp")
"""
repo_map = get_complete_repo_map(in_mode, include_host_name)
return sorted(list(repo_map.keys()))
def get_task_prefix_from_repo_short_name(short_name: str) -> str:
"""
Return the task prefix for a repo (e.g., "amp" -> "AmpTask").
"""
if short_name == "amp":
prefix = "AmpTask"
elif short_name == "dev_tools":
prefix = "DevToolsTask"
else:
# We assume that we can build the prefix from the name (e.g., "lm" ->
# "LmTask").
# TODO(gp): A more general approach is to save this information inside
# `repo_config.py`.
prefix = short_name.capitalize() + "Task"
return prefix
# #############################################################################
# Git path
# #############################################################################
@functools.lru_cache()
def find_file_in_git_tree(file_name: str, super_module: bool = True) -> str:
"""
Find the path of a file in a Git tree.
We get the Git root and then search for the file from there.
"""
root_dir = get_client_root(super_module=super_module)
# TODO(gp): Use -not -path '*/\.git/*'
cmd = "find %s -name '%s' | grep -v .git" % (root_dir, file_name)
_, file_name = hsysinte.system_to_one_line(cmd)
_LOG.debug("file_name=%s", file_name)
hdbg.dassert_ne(
file_name, "", "Can't find file '%s' in dir '%s'", file_name, root_dir
)
file_name: str = os.path.abspath(file_name)
hdbg.dassert_exists(file_name)
return file_name
def get_path_from_git_root(
file_name: str,
super_module: bool,
*,
git_root: Optional[str] = None,
) -> str:
"""
Get the path of `file_name` from the root of the Git client.
E.g., in Docker:
- `super_module=True` -> git_root=/app
- `super_module=False` -> git_root=/app/amp
:param super_module: like get_client_root()
"""
# Get the root of the Git client.
if git_root is None:
git_root = get_client_root(super_module)
#
git_root = os.path.normpath(git_root)
_LOG.debug("git_root=%s", git_root)
file_name = os.path.normpath(file_name)
_LOG.debug("file_name=%s", file_name)
if file_name.startswith(git_root):
# Remove the `git_root` from file_name.
ret = os.path.relpath(file_name, git_root)
else:
# If the file is not under the root, we can't normalize it.
raise ValueError(
"Can't normalize file_name='%s' for git_root='%s'"
% (file_name, git_root)
)
_LOG.debug(
"file_name=%s, git_root=%s (super_module=%s) -> ret=%s",
file_name,
git_root,
super_module,
ret,
)
return ret
@functools.lru_cache()
def get_amp_abs_path() -> str:
"""
Return the absolute path of `amp` dir.
"""
repo_sym_name = get_repo_full_name_from_client(super_module=False)
_LOG.debug("repo_sym_name=%s", repo_sym_name)
repo_sym_names = ["alphamatic/amp"]
code = "get_extra_amp_repo_sym_name()"
try:
repo_sym_names.append(execute_repo_config_code(code))
except NameError:
_LOG.debug("Can't execute the code '%s'", code)
if repo_sym_name in repo_sym_names:
# If we are in the amp repo, then the git client root is the amp
# directory.
git_root = get_client_root(super_module=False)
amp_dir = git_root
else:
# If we are not in the amp repo, then look for the amp dir.
amp_dir = find_file_in_git_tree("amp", super_module=True)
git_root = get_client_root(super_module=True)
amp_dir = os.path.join(git_root, amp_dir)
amp_dir = os.path.abspath(amp_dir)
# Sanity check.
hdbg.dassert_dir_exists(amp_dir)
return amp_dir
# TODO(gp): Is this needed?
def get_repo_dirs() -> List[str]:
"""
Return the list of the repo repositories, e.g., `[".", "amp", "infra"]`.
"""
dir_names = ["."]
dirs = ["amp"]
for dir_name in dirs:
if os.path.exists(dir_name):
dir_names.append(dir_name)
return dir_names
def find_docker_file(
file_name: str,
*,
root_dir: str = ".",
dir_depth: int = -1,
mode: str = "return_all_results",
candidate_files: Optional[List[str]] = None,
) -> List[str]:
"""
Convert a file or dir that was generated inside Docker to a file in the
current Git client.
This operation is best effort since it might not be able to find the
corresponding file in the current repo.
E.g.,
- A file like '/app/amp/core/dataflow_model/utils.py', in a Docker container with
Git root in '/app' becomes 'amp/core/dataflow_model/utils.py'
- For a file like '/app/amp/core/dataflow_model/utils.py' outside Docker, we look
for the file 'dataflow_model/utils.py' in the current client and then normalize
with respect to the
:param dir_depth: same meaning as in `find_file_with_dir()`
:param mode: same as `system_interaction.select_result_file_from_list()`
:param candidate_files: list of results from the `find` command for unit
test mocking
:return: the best guess for the file name corresponding to `file_name`
"""
_LOG.debug("# Processing file_name='%s'", file_name)
hdbg.dassert_isinstance(file_name, str)
# Clean up file name.
file_name = os.path.normpath(file_name)
_LOG.debug("file_name=%s", file_name)
# Find the file in the dir.
file_names = hsysinte.find_file_with_dir(
file_name,
root_dir=root_dir,
dir_depth=dir_depth,
mode=mode,
candidate_files=candidate_files,
)
# Purify.
_LOG.debug("Purifying file_names=%s", file_names)
file_names = [
os.path.relpath(file_name, root_dir) for file_name in file_names
]
return file_names
# #############################################################################
# Git hash
# #############################################################################
def get_head_hash(dir_name: str = ".", short_hash: bool = False) -> str:
"""
Report the hash that a Git repo is synced at.
```
> git rev-parse HEAD
4759b3685f903e6c669096e960b248ec31c63b69
```
"""
hdbg.dassert_exists(dir_name)
opts = "--short " if short_hash else " "
cmd = f"cd {dir_name} && git rev-parse {opts}HEAD"
data: Tuple[int, str] = hsysinte.system_to_one_line(cmd)
_, output = data
return output
# TODO(gp): Use get_head_hash() and remove this.
def get_current_commit_hash(dir_name: str = ".") -> str:
hdbg.dassert_exists(dir_name)
cmd = f"cd {dir_name} && git rev-parse HEAD"
data: Tuple[int, str] = hsysinte.system_to_one_line(cmd)
_, sha = data
# 0011776388b4c0582161eb2749b665fc45b87e7e
_LOG.debug("sha=%s", sha)
return sha
def get_remote_head_hash(dir_name: str) -> str:
"""
Report the hash that the remote Git repo is at.
"""
hdbg.dassert_exists(dir_name)
sym_name = get_repo_full_name_from_dirname(dir_name, include_host_name=False)
cmd = f"git ls-remote [email protected]:{sym_name} HEAD 2>/dev/null"
data: Tuple[int, str] = hsysinte.system_to_one_line(cmd)
_, output = data
# > git ls-remote [email protected]:alphamatic/amp HEAD 2>/dev/null
# 921676624f6a5f3f36ab507baed1b886227ac2e6 HEAD
return output
# #############################################################################
# Modified files
# #############################################################################
def get_modified_files(
dir_name: str = ".", remove_files_non_present: bool = True
) -> List[str]:
"""
Return the files that are added and modified in the Git client.
In other words the files that will be committed with a `git commit -am ...`.
Equivalent to `dev_scripts/git_files.sh`
:param dir_name: directory with Git client
:param remove_files_non_present: remove the files that are not
currently present in the client
:return: list of files
"""
# If the client status is:
# > git status -s
# AM dev_scripts/infra/ssh_tunnels.py
# M helpers/git.py
# ?? linter_warnings.txt
#
# The result is:
# > git diff --cached --name-only
# dev_scripts/infra/ssh_tunnels.py
#
# > git ls-files -m
# dev_scripts/infra/ssh_tunnels.py
# helpers/git.py
cmd = "(git diff --cached --name-only; git ls-files -m) | sort | uniq"
files: List[str] = hsysinte.system_to_files(
cmd, dir_name, remove_files_non_present
)
return files
# TODO(gp): -> ...previously...
def get_previous_committed_files(
dir_name: str = ".",
num_commits: int = 1,
remove_files_non_present: bool = True,
) -> List[str]:
"""
Return files changed in the Git client in the last `num_commits` commits.
Equivalent to `dev_scripts/git_previous_commit_files.sh`
:param dir_name: directory with Git client
:param num_commits: how many commits in the past to consider
:param remove_files_non_present: remove the files that are not currently present
in the client
:return: list of files
"""
cmd = []
cmd.append('git show --pretty="" --name-only')
cmd.append("$(git log --author $(git config user.name) -%d" % num_commits)
cmd.append(r"""| \grep "^commit " | perl -pe 's/commit (.*)/$1/')""")
cmd_as_str = " ".join(cmd)
files: List[str] = hsysinte.system_to_files(
cmd_as_str, dir_name, remove_files_non_present
)
return files
def get_modified_files_in_branch(
dst_branch: str, dir_name: str = ".", remove_files_non_present: bool = True
) -> List[str]:
"""
Return files modified in the current branch with respect to `dst_branch`.
Equivalent to `git diff --name-only master...`
Please remember that there is a difference between `master` and `origin/master`.
See https://stackoverflow.com/questions/18137175
:param dir_name: directory with Git client
:param dst_branch: branch to compare to, e.g., `master`, `HEAD`
:param remove_files_non_present: remove the files that are not
currently present in the client
:return: list of files
"""
if dst_branch == "HEAD":
target = dst_branch
else:
target = f"{dst_branch}..."
cmd = f"git diff --name-only {target}"
files: List[str] = hsysinte.system_to_files(
cmd, dir_name, remove_files_non_present
)
return files
def get_summary_files_in_branch(
dst_branch: str,
*,
dir_name: str = ".",
) -> str:
"""
Report summary of files in the current branch with respect to `dst_branch'.
Same interface as `get_modified_files_in_branch`.
"""
# File types (from https://git-scm.com/docs/git-diff).
file_types = [
("added", "A"),
("copied", "C"),
("deleted", "D"),
("modified", "M"),
("renamed", "R"),
("type changed", "T"),
("unmerged", "U"),
("unknown", "X"),
("broken pairing", "B"),
]
res = ""
for tag, diff_type in file_types:
cmd = f"git diff --diff-filter={diff_type} --name-only {dst_branch}..."
files = hsysinte.system_to_files(
cmd, dir_name, remove_files_non_present=False
)
_LOG.debug("files=%s", "\n".join(files))
if files:
res += f"# {tag}: {len(files)}\n"
res += hprint.indent("\n".join(files)) + "\n"
res = res.rstrip("\n")
return res
# #############################################################################
# Git commands.
# #############################################################################
# TODO(gp): -> get_user_name()
@functools.lru_cache()
def get_git_name() -> str:
"""
Return the git user name.
"""
cmd = "git config --get user.name"
# For some reason data is annotated as Any by mypy, instead of
# Tuple[int, str] so we need to cast it to the right value.
data: Tuple[int, str] = hsysinte.system_to_one_line(cmd)
_, output = data
return output
def git_log(num_commits: int = 5, my_commits: bool = False) -> str:
"""
Return the output of a pimped version of git log.
:param num_commits: number of commits to report
:param my_commits: True to report only the current user commits
:return: string
"""
cmd = []
cmd.append("git log --date=local --oneline --graph --date-order --decorate")
cmd.append(
"--pretty=format:" "'%h %<(8)%aN% %<(65)%s (%>(14)%ar) %ad %<(10)%d'"
)
cmd.append("-%d" % num_commits)
if my_commits:
# This doesn't work in a container if the user relies on `~/.gitconfig` to
# set the user name.
# TODO(gp): We should use `get_git_name()`.
cmd.append("--author $(git config user.name)")
cmd = " ".join(cmd)
data: Tuple[int, str] = hsysinte.system_to_string(cmd)
_, txt = data
return txt
def git_stash_push(
prefix: str, msg: Optional[str] = None, log_level: int = logging.DEBUG
) -> Tuple[str, bool]:
import helpers.hdatetime as hdateti
user_name = hsysinte.get_user_name()
server_name = hsysinte.get_server_name()
timestamp = hdateti.get_current_timestamp_as_string("naive_ET")
tag = "%s-%s-%s" % (user_name, server_name, timestamp)
tag = prefix + "." + tag
_LOG.debug("tag='%s'", tag)
cmd = "git stash push"
_LOG.debug("msg='%s'", msg)
push_msg = tag[:]
if msg:
push_msg += ": " + msg
cmd += " -m '%s'" % push_msg
hsysinte.system(cmd, suppress_output=False, log_level=log_level)
# Check if we actually stashed anything.
cmd = r"git stash list | \grep '%s' | wc -l" % tag
_, output = hsysinte.system_to_string(cmd)
was_stashed = int(output) > 0
if not was_stashed:
msg = "Nothing was stashed"
_LOG.warning(msg)
# raise RuntimeError(msg)
return tag, was_stashed
def git_stash_apply(mode: str, log_level: int = logging.DEBUG) -> None:
_LOG.debug("# Checking stash head ...")
cmd = "git stash list | head -3"
hsysinte.system(cmd, suppress_output=False, log_level=log_level)
#
_LOG.debug("# Restoring local changes...")
if mode == "pop":
cmd = "git stash pop --quiet"
elif mode == "apply":
cmd = "git stash apply --quiet"
else:
raise ValueError("mode='%s'" % mode)
hsysinte.system(cmd, suppress_output=False, log_level=log_level)
# TODO(gp): Consider using this everywhere. Maybe it can simplify handling issues
# stemming from the super-module / sub-module repo.
def _get_git_cmd(super_module: bool) -> str:
"""
Build the first part of a Git command line.
"""
cmd = []
cmd.append("git")
client_root = get_client_root(super_module=super_module)
# Set the path to the repository (".git" directory), avoiding Git to search for
# it (from https://git-scm.com/docs/git)
cmd.append(f"--git-dir='{client_root}/.git'")
# Set the path to the working tree.
cmd.append(f"--work-tree='{client_root}'")
cmd = " ".join(cmd)
return cmd
def git_tag(
tag_name: str, super_module: bool = True, log_level: int = logging.DEBUG
) -> None:
"""
Tag the Git tree with `tag_name` without pushing the tag to the remote.
"""
_LOG.debug("# Tagging current commit ...")
git_cmd = _get_git_cmd(super_module)
cmd = f"{git_cmd} tag -f {tag_name}"
_ = hsysinte.system(cmd, suppress_output=False, log_level=log_level)
def git_push_tag(
tag_name: str,
remote: str = "origin",
super_module: bool = True,
log_level: int = logging.DEBUG,
) -> None:
"""
Push the tag `tag_name` to the given remote.
"""
_LOG.debug("# Pushing current commit ...")
git_cmd = _get_git_cmd(super_module)
cmd = f"{git_cmd} push {remote} {tag_name}"
_ = hsysinte.system(cmd, suppress_output=False, log_level=log_level)
def git_describe(
match: Optional[str] = None, log_level: int = logging.DEBUG
) -> str:
"""
Return the closest tag in the repo, e.g., 1.0.0.
If there is no tag, this will return short commit hash.
:param match: e.g., `dev_tools-*`, only consider tags matching the given glob
pattern
"""
_LOG.debug("# Looking for version ...")
cmd = "git describe --tags --always --abbrev=0"
if match is not None:
hdbg.dassert_isinstance(match, str)
hdbg.dassert_ne(match, "")
cmd = f"{cmd} --match '{match}'"
num, tag = hsysinte.system_to_one_line(cmd, log_level=log_level)
_ = num
return tag
def git_add_update(
file_list: Optional[List[str]] = None, log_level: int = logging.DEBUG
) -> None:
"""
Add list of files to hgit.
If `file_list` is not specified, it adds all modified files.
:param file_list: list of files to `git add`
"""
_LOG.debug("# Adding all changed files to staging ...")
cmd = "git add %s" % (" ".join(file_list) if file_list is not None else "-u")
hsysinte.system(cmd, suppress_output=False, log_level=log_level)
def fetch_origin_master_if_needed() -> None:
"""
If inside CI system, force fetching `master` branch from Git repo.
When testing a branch, `master` is not always fetched, but it might
be needed by tests.
"""
if hsysinte.is_inside_ci():
_LOG.warning("Running inside CI so fetching master")
cmd = "git branch -a"
_, txt = hsysinte.system_to_string(cmd)
_LOG.debug("%s=%s", cmd, txt)
cmd = r'git branch -a | egrep "\s+master\s*$" | wc -l'
# * (HEAD detached at pull/1337/merge)
# master
# remotes/origin/master
# remotes/pull/1337/merge
_, num = hsysinte.system_to_one_line(cmd)
num = int(num)
_LOG.debug("num=%s", num)
if num == 0:
# See AmpTask1321 and AmpTask1338 for details.
cmd = "git fetch origin master:refs/remotes/origin/master"
hsysinte.system(cmd)
cmd = "git branch --track master origin/master"
hsysinte.system(cmd)
def is_client_clean(
dir_name: str = ".", abort_if_not_clean: bool = False
) -> bool:
"""
Return whether there are files modified, added, or removed in `dir_name`.
:param abort_if_not_clean: if True and the client is not clean, abort reporting
the files modified
"""
_LOG.debug(hprint.to_str("abort_if_not_clean"))
files = get_modified_files(dir_name)
# Remove "amp" from files.
if "amp" in files:
_LOG.warning("Skipping 'amp' in modified files")
files = [f for f in files if "amp" != f]
# A Git client is clean iff there are no files in the index.
is_clean = len(files) == 0
if abort_if_not_clean:
hdbg.dassert(
is_clean, "The Git client is not clean:\n%s", "\n".join(files)
)
return is_clean
def does_branch_exist(branch_name: str, dir_name: str = ".") -> bool:
# From https://stackoverflow.com/questions/35941566
cmd = f"cd {dir_name} && git fetch --prune"
hsysinte.system(cmd, abort_on_error=False)
# From https://stackoverflow.com/questions/5167957
# > git rev-parse --verify LimeTask197_Get_familiar_with_CF2
# f03bfa0b4577c2524afd6a1f24d06013f8aa9f1a
# > git rev-parse --verify I_dont_exist
# fatal: Needed a single revision
cmd = f"cd {dir_name} && git rev-parse --verify {branch_name}"
rc = hsysinte.system(cmd, abort_on_error=False)
exists = rc == 0
return exists
def delete_branches(
dir_name: str,
mode: str,
branches: List[str],
confirm_delete: bool,
abort_on_error: bool = True,
) -> None:
"""
Delete local or remote branches.
:param mode: local or remote
:param branches: list of branches to delete
:param confirm_delete: ask the user to confirm before deleting, or just do it
"""
hdbg.dassert_isinstance(branches, list)
delete_cmd = f"cd {dir_name} && "
if mode == "local":
delete_cmd += "git branch -d"
elif mode == "remote":
delete_cmd += "git push origin --delete"
else:
raise ValueError(f"Invalid mode='{mode}'")
# Ask whether to continue.
if confirm_delete:
branches_as_str = " ".join(branches)
msg = (
hdbg.WARNING
+ f": Delete {len(branches)} {mode} branch(es) '{branches_as_str}'?"
)
hsysinte.query_yes_no(msg, abort_on_no=True)
for branch in branches:
if mode == "remote":
prefix = "origin/"
hdbg.dassert(
branch.startswith(prefix),
"Remote branch '%s' needs to start with '%s'",
branch,
prefix,
)
branch = branch[len(prefix) :]
cmd = f"{delete_cmd} {branch}"
hsysinte.system(
cmd,
suppress_output=False,
log_level="echo",
abort_on_error=abort_on_error,
)
|
StarcoderdataPython
|
3225366
|
from django.core.mail import send_mail
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView
from users.forms import CreationForm
class SignUp(CreateView):
form_class = CreationForm
success_url = reverse_lazy('login')
template_name = 'reg.html'
def form_valid(self, form):
email = form.cleaned_data['email']
self.send_mail_ls(email)
return super().form_valid(form)
def send_mail_ls(self, email):
send_mail(
'Регистрация',
'Добро пожаловать на сайт!',
'<EMAIL> <<EMAIL>>',
[email],
fail_silently=False
)
|
StarcoderdataPython
|
1782008
|
# Copyright (c) 2018 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
"""The signac framework aids in the management of large and
heterogeneous data spaces.
It provides a simple and robust data model to create a
well-defined indexable storage layout for data and metadata.
This makes it easier to operate on large data spaces,
streamlines post-processing and analysis and makes data
collectively accessible."""
from __future__ import absolute_import
from . import contrib
from . import db
from . import cite
from . import errors
from . import warnings
from . import sync
from .contrib import Project
from .contrib import TemporaryProject
from .contrib import get_project
from .contrib import init_project
from .contrib import get_job
from .contrib import fetch
from .contrib import export_one
from .contrib import export
from .contrib import export_to_mirror
from .contrib import export_pymongo
from .contrib import filesystems as fs
from .contrib import Collection
from .contrib import index_files
from .contrib import index
from .contrib import RegexFileCrawler
from .contrib import MasterCrawler
from .contrib import SignacProjectCrawler
from .db import get_database
from .core.jsondict import buffer_reads_writes as buffered
from .core.jsondict import in_buffered_mode as is_buffered
from .core.jsondict import flush_all as flush
from .core.jsondict import get_buffer_size
from .core.jsondict import get_buffer_load
from .core.jsondict import JSONDict
from .core.h5store import H5Store
from .core.h5store import H5StoreManager
__version__ = '1.1.0'
__all__ = ['__version__', 'contrib', 'db', 'errors', 'warnings', 'sync',
'cite',
'Project', 'TemporaryProject', 'get_project', 'init_project', 'get_job',
'get_database', 'fetch',
'export_one', 'export', 'export_to_mirror',
'Collection',
'export_pymongo', 'fs',
'index_files', 'index',
'RegexFileCrawler',
'MasterCrawler',
'SignacProjectCrawler',
'buffered', 'is_buffered', 'flush', 'get_buffer_size', 'get_buffer_load',
'JSONDict',
'H5Store', 'H5StoreManager',
]
|
StarcoderdataPython
|
3337518
|
from .base import *
DEBUG = False
ALLOWED_HOSTS = [
# À changer avec le nom de domaine de votre site web
"www.monsiteweb.fr"
]
|
StarcoderdataPython
|
126569
|
import re
from ..exo_classes.exo_classes import BuiltInFunction, Number
from ..exo_classes.exo_context import Context
from ..exo_utils.exo_interpreter import Interpreter, SymbolTable
from ..exo_utils.exo_lexer import Lexer
from ..exo_utils.exo_parser import Parser
global_symbol_table = SymbolTable()
global_symbol_table.set('null', None, Number(0), None)
global_symbol_table.set('false', None, Number(0), None)
global_symbol_table.set('true', None, Number(1), None)
global_symbol_table.set("print", None, BuiltInFunction('print'), None)
global_symbol_table.set("input", None, BuiltInFunction('input'), None)
global_symbol_table.set("input_int", None, BuiltInFunction('input_int'), None)
global_symbol_table.set('mod', None, BuiltInFunction('mod'), None)
def filter_comments(text):
regexpr = r'[ ]*[#]+.*'
text = re.sub(regexpr, '', text)
return text
def run(file_name, text):
text = filter_comments(text)
lexer = Lexer(file_name, text)
tokens, error = lexer.make_tokens()
if error:
return None, error
parser = Parser(tokens)
ast = parser.parse()
if ast[-1].error:
return None, ast[-1].error
interpreter = Interpreter()
context = Context('<program>')
context.symbol_table = global_symbol_table
result = None
for statement in ast:
result = interpreter.visit(statement.node, context)
if result.error:
return result.value, result.error
if result:
return result.value, result.error
else:
return None
|
StarcoderdataPython
|
80152
|
<filename>backend_test/forms.py
"""Forms to upload the menu."""
#Django
from django import forms
#Models
from .utils.models import Ingredients, Menu, User, Orders
class MenuForm(forms.Form):
"""Form based on model Menu"""
dish_name = forms.CharField(
max_length=150,
required=True
)
description = forms.CharField(
required=True
)
new_ingredients = forms.CharField()
ingredients = forms.ModelMultipleChoiceField(
queryset=Ingredients.objects.all(),
widget=forms.CheckboxSelectMultiple(),
required=False
)
image = forms.ImageField()
class OrderForm(forms.Form):
"""Form based on model Order"""
menu = forms.ModelChoiceField(
queryset=Menu.objects.all(),
required=False)
ingredients = forms.ModelMultipleChoiceField(
queryset=Ingredients.objects.all(),
widget=forms.CheckboxSelectMultiple(),
required=False
)
quantity = forms.IntegerField(
max_value=5,
)
|
StarcoderdataPython
|
184020
|
from key_events import *
import os
os.nice(40)
pos = position()
for x in range(1000):
#time.sleep(.25)
mouseclick(*pos)
|
StarcoderdataPython
|
3244027
|
def home(self, room_name):
return {'room_name': room_name}
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions
from django.contrib.auth import get_user_model
from core.models import Thread, Message
class ReturnThreads(APIView):
authentication_classes = [authentication.TokenAuthentication]
permission_classes = [permissions.IsAuthenticated]
def get(self, request, format=None):
threads = Thread.objects.filter(users=request.user)
ths = []
users = []
avatar = ''
for i in threads:
for l in i.users.all():
users.append(l.username)
if len(i.users.all()) < 3 and l.id != request.user.id:
avatar = str(l.avatar)
ths.append({
'id': i.id,
'users': users,
'avatar': avatar
})
users=[]
return Response(ths)
class CreateThreadAPIView(APIView):
authentication_classes = [authentication.TokenAuthentication]
permission_classes = [permissions.IsAuthenticated]
def post(self, request, format=None):
other_user = get_user_model().objects.get(id=request.data.get('id'))
print('OT:', other_user)
thread = Thread.objects.create()
thread.users.add(other_user)
thread.users.add(request.user)
return Response({'id': thread.id})
|
StarcoderdataPython
|
122410
|
import os
# from sklearn.metrics import log_loss, roc_auc_score
import time
from librerank.utils import *
from librerank.reranker import *
from librerank.rl_reranker import *
def eval(model, data, l2_reg, batch_size, isrank, metric_scope, _print=False):
preds = []
# labels = []
losses = []
data_size = len(data[0])
batch_num = data_size // batch_size
print('eval', batch_size, batch_num)
t = time.time()
for batch_no in range(batch_num):
data_batch = get_aggregated_batch(data, batch_size=batch_size, batch_no=batch_no)
pred, loss = model.eval(data_batch, l2_reg)
preds.extend(pred)
# labels.extend(label)
losses.append(loss)
loss = sum(losses) / len(losses)
# cates = np.reshape(np.array(data[1])[:, :, 1], [-1, max_time_len]).tolist()
labels = data[4]
# print(preds[0], labels[0])
# poss = data[-2]
res = evaluate_multi(labels, preds, metric_scope, isrank, _print)
print("EVAL TIME: %.4fs" % (time.time() - t))
# return loss, res_low, res_high
return loss, res
def train(train_file, test_file, feature_size, max_time_len, itm_spar_fnum, itm_dens_fnum, profile_num, params):
tf.reset_default_graph()
# gpu settings
gpu_options = tf.GPUOptions(allow_growth=True)
perlist = False
if params.model_type == 'PRM':
model = PRM(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
elif params.model_type == 'SetRank':
model = SetRank(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
elif params.model_type == 'DLCM':
model = DLCM(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
elif params.model_type == 'GSF':
model = GSF(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm, group_size=params.group_size)
elif params.model_type == 'miDNN':
model = miDNN(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
elif params.model_type == 'EGR_evaluator':
model = EGR_evaluator(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
elif params.model_type == 'EGR_generator':
model = PPOModel(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm, rep_num=params.rep_num)
# discriminator = EGR_discriminator(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
# profile_num, max_norm=params.max_norm)
evaluator = EGR_evaluator(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
with evaluator.graph.as_default() as g:
sess = tf.Session(graph=g, config=tf.ConfigProto(gpu_options=gpu_options))
evaluator.set_sess(sess)
sess.run(tf.global_variables_initializer())
evaluator.load(params.evaluator_path)
# with discriminator.graph.as_default() as g:
# sess = tf.Session(graph=g, config=tf.ConfigProto(gpu_options=gpu_options))
# discriminator.set_sess(sess)
# sess.run(tf.global_variables_initializer())
elif params.model_type == 'Seq2Slate':
# model = Seq2Slate(feature_size, eb_dim, hidden_size, max_time_len, max_seq_len, item_fnum, num_cat, mu)
model = SLModel(feature_size, params.eb_dim, params.hidden_size, max_time_len, itm_spar_fnum, itm_dens_fnum,
profile_num, max_norm=params.max_norm)
else:
print('No Such Model', params.model_type)
exit(0)
with model.graph.as_default() as g:
sess = tf.Session(graph=g, config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
model.set_sess(sess)
# training_monitor = {
# 'train_loss': [],
# 'vali_loss': [],
# 'map_l': [],
# 'ndcg_l': [],
# 'clicks_l': [],
# 'utility_l': [],
# 'map_h':[],
# 'ndcg_h': [],
# 'clicks_h': [],
# 'utility_h': [],
# }
training_monitor = {
'train_loss': [],
'vali_loss': [],
'map_l': [],
'ndcg_l': [],
'clicks_l': [],
# 'utility_l': [],
# 'de_ndcg_l': [],
}
model_name = '{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(params.timestamp, initial_ranker, params.model_type, params.batch_size,
params.lr, params.l2_reg, params.hidden_size, params.eb_dim, params.keep_prob)
if not os.path.exists('{}/logs_{}/{}'.format(parse.save_dir, data_set_name, max_time_len)):
os.makedirs('{}/logs_{}/{}'.format(parse.save_dir, data_set_name, max_time_len))
if not os.path.exists('{}/save_model_{}/{}/{}/'.format(parse.save_dir, data_set_name, max_time_len, model_name)):
os.makedirs('{}/save_model_{}/{}/{}/'.format(parse.save_dir, data_set_name, max_time_len, model_name))
save_path = '{}/save_model_{}/{}/{}/ckpt'.format(parse.save_dir, data_set_name, max_time_len, model_name)
log_save_path = '{}/logs_{}/{}/{}.metrics'.format(parse.save_dir, data_set_name, max_time_len, model_name)
# training process
# with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
train_losses_step = []
# before training process
step = 0
vali_loss, res = eval(model, test_file, params.l2_reg, params.batch_size, False, params.metric_scope)
training_monitor['train_loss'].append(None)
training_monitor['vali_loss'].append(None)
training_monitor['map_l'].append(res[0][0])
training_monitor['ndcg_l'].append(res[1][0])
# training_monitor['de_ndcg_l'].append(res[2][0])
training_monitor['clicks_l'].append(res[2][0])
# training_monitor['utility_l'].append(res[4][0])
print("STEP %d INTIAL RANKER | LOSS VALI: NULL" % step)
for i, s in enumerate(params.metric_scope):
print("@%d MAP: %.4f NDCG: %.4f CLICKS: %.4f " % (s, res[0][i], res[1][i], res[2][i]))
early_stop = False
data = train_file
data_size = len(data[0])
batch_num = data_size // params.batch_size
eval_iter_num = (data_size // 5) // params.batch_size
print('train', data_size, batch_num)
# begin training process
for epoch in range(params.epoch_num):
# if early_stop:
# break
for batch_no in range(batch_num):
data_batch = get_aggregated_batch(data, batch_size=params.batch_size, batch_no=batch_no)
# if early_stop:
# break
if params.model_type == 'EGR_generator':
data_batch = repeat_data(data_batch, params.rep_num)
act_idx_out, act_probs_one, rl_sp_outputs, rl_de_outputs, mask_arr, lp_sp_data, lp_de_data, _\
= model.predict(data_batch, params.l2_reg)
pred = evaluator.predict(rl_sp_outputs, rl_de_outputs, data_batch[6])
# d_preds, d_rewards = discriminator.predict(rl_sp_outputs, rl_de_outputs, data_batch[6])
# rewards = pred + d_rewards.reshape((-1, max_time_len)) * c_rewards_d
rewards = pred
# rewards = pred
# train rl-rerank
# for _ in range(update_steps):
loss, mean_return = model.train(data_batch, rl_sp_outputs, rl_de_outputs, act_probs_one, act_idx_out,
rewards, mask_arr, params.c_entropy, params.lr, params.l2_reg, params.keep_prob)
# train discriminator
# if step % (update_rate_d * int(update_steps)) == 0:
# d_label = np.array([1] * lp_sp_data.shape[0] + [0] * rl_sp_outputs.shape[0])
# spar_data = np.concatenate([lp_sp_data, rl_sp_outputs], axis=0)
# dens_data = np.concatenate([lp_de_data, rl_de_outputs], axis=0)
# seq_len = np.array(data_batch[6] + data_batch[6])
# d_total_loss = discriminator.train([spar_data, dens_data, d_label, seq_len], lr, l2_reg)
# print('dis, step: %d' % (step), 'loss', d_total_loss)
elif params.model_type == 'Seq2Slate':
act_idx_out, act_probs_one, rl_sp_outputs, rl_de_outputs, mask_arr, lp_sp_data, lp_de_data, _ \
= model.predict(data_batch, params.l2_reg)
loss = model.train(data_batch, rl_sp_outputs, rl_de_outputs, mask_arr, params.lr,
params.l2_reg, params.keep_prob)
else:
loss = model.train(data_batch, params.lr, params.l2_reg, params.keep_prob)
step += 1
train_losses_step.append(loss)
if step % eval_iter_num == 0:
train_loss = sum(train_losses_step) / len(train_losses_step)
training_monitor['train_loss'].append(train_loss)
train_losses_step = []
vali_loss, res = eval(model, test_file, params.l2_reg, params.batch_size, True,
params.metric_scope, False)
training_monitor['train_loss'].append(train_loss)
training_monitor['vali_loss'].append(vali_loss)
training_monitor['map_l'].append(res[0][0])
training_monitor['ndcg_l'].append(res[1][0])
# training_monitor['de_ndcg_l'].append(res[2][0])
training_monitor['clicks_l'].append(res[2][0])
# training_monitor['utility_l'].append(res[4][0])
print("EPOCH %d STEP %d LOSS TRAIN: %.4f | LOSS VALI: %.4f" % (epoch, step, train_loss, vali_loss))
for i, s in enumerate(params.metric_scope):
print("@%d MAP: %.4f NDCG: %.4f CLICKS: %.4f " % (s, res[0][i], res[1][i], res[2][i]))
if training_monitor['map_l'][-1] > max(training_monitor['map_l'][:-1]):
# save model
model.save(save_path)
pkl.dump(res[-1], open(log_save_path, 'wb'))
print('model saved')
if len(training_monitor['map_l']) > 2 and epoch > 0:
# if (training_monitor['vali_loss'][-1] > training_monitor['vali_loss'][-2] and
# training_monitor['vali_loss'][-2] > training_monitor['vali_loss'][-3]):
# early_stop = True
if (training_monitor['map_l'][-2] - training_monitor['map_l'][-1]) <= 0.01 and (
training_monitor['map_l'][-3] - training_monitor['map_l'][-2]) <= 0.01:
early_stop = True
# generate log
if not os.path.exists('{}/logs_{}/{}/'.format(parse.save_dir, data_set_name, max_time_len)):
os.makedirs('{}/logs_{}/{}/'.format(parse.save_dir, data_set_name, max_time_len))
with open('{}/logs_{}/{}/{}.monitor.pkl'.format(parse.save_dir, data_set_name, max_time_len, model_name), 'wb') as f:
pkl.dump(training_monitor, f)
def reranker_parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--max_time_len', default=10, type=int, help='max time length')
parser.add_argument('--save_dir', type=str, default='./', help='dir that saves logs and model')
parser.add_argument('--data_dir', type=str, default='./data/toy/', help='data dir')
parser.add_argument('--model_type', default='PRM', choices=['PRM', 'DLCM', 'SetRank', 'GSF', 'miDNN', 'Seq2Slate', 'EGR_evaluator', 'EGR_generator'],
type=str, help='algorithm name, including PRM, DLCM, SetRank, GSF, miDNN, Seq2Slate, EGR_evaluator, EGR_generator')
parser.add_argument('--data_set_name', default='ad', type=str, help='name of dataset, including ad and prm')
parser.add_argument('--initial_ranker', default='lambdaMART', choices=['DNN', 'lambdaMART'], type=str, help='name of dataset, including DNN, lambdaMART')
parser.add_argument('--epoch_num', default=30, type=int, help='epochs of each iteration.')
parser.add_argument('--batch_size', default=16, type=int, help='batch size')
parser.add_argument('--rep_num', default=5, type=int, help='samples repeat number')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate')
parser.add_argument('--l2_reg', default=1e-4, type=float, help='l2 loss scale')
parser.add_argument('--keep_prob', default=0.8, type=float, help='keep probability')
parser.add_argument('--eb_dim', default=16, type=int, help='size of embedding')
parser.add_argument('--hidden_size', default=64, type=int, help='hidden size')
parser.add_argument('--group_size', default=1, type=int, help='group size for GSF')
parser.add_argument('--metric_scope', default=[1, 3, 5, 10], type=list, help='the scope of metrics')
parser.add_argument('--max_norm', default=0, type=float, help='max norm of gradient')
parser.add_argument('--c_entropy', default=0.001, type=float, help='entropy coefficient in loss')
# parser.add_argument('--decay_steps', default=3000, type=int, help='learning rate decay steps')
# parser.add_argument('--decay_rate', default=1.0, type=float, help='learning rate decay rate')
parser.add_argument('--timestamp', type=str, default=datetime.datetime.now().strftime("%Y%m%d%H%M"))
parser.add_argument('--evaluator_path', type=str, default='', help='evaluator ckpt dir')
parser.add_argument('--reload_path', type=str, default='', help='model ckpt dir')
parser.add_argument('--setting_path', type=str, default='./config/prm_setting.json', help='setting dir')
FLAGS, _ = parser.parse_known_args()
return FLAGS
if __name__ == '__main__':
# parameters
random.seed(1234)
parse = reranker_parse_args()
if parse.setting_path:
parse = load_parse_from_json(parse, parse.setting_path)
data_set_name = parse.data_set_name
processed_dir = parse.data_dir
stat_dir = os.path.join(processed_dir, 'data.stat')
max_time_len = parse.max_time_len
initial_ranker = parse.initial_ranker
if data_set_name == 'prm' and parse.max_time_len > 30:
max_time_len = 30
print(parse)
with open(stat_dir, 'r') as f:
stat = json.load(f)
num_item, num_cate, num_ft, profile_fnum, itm_spar_fnum, itm_dens_fnum, = stat['item_num'], stat['cate_num'], \
stat['ft_num'], stat['profile_fnum'], stat['itm_spar_fnum'], stat['itm_dens_fnum']
print('num of item', num_item, 'num of list', stat['train_num'] + stat['val_num'] + stat['test_num'],
'profile num', profile_fnum, 'spar num', itm_spar_fnum, 'dens num', itm_dens_fnum)
# train_file, val_file, test_file = pkl.load(open(os.path.join(processed_dir, 'data.data'), 'rb'))
# props = pkl.load(open(os.path.join(processed_dir, 'prop'), 'rb'))
# props[0] = [1e-6 for i in range(max_time_len)]
# profile = pkl.load(open(os.path.join(processed_dir, 'user.profile'), 'rb'))
# construct training files
train_dir = os.path.join(processed_dir, initial_ranker + '.data.train')
if os.path.isfile(train_dir):
train_lists = pkl.load(open(train_dir, 'rb'))
else:
train_lists = construct_list(os.path.join(processed_dir, initial_ranker + '.rankings.train'), max_time_len)
pkl.dump(train_lists, open(train_dir, 'wb'))
# construct test files
test_dir = os.path.join(processed_dir, initial_ranker + '.data.test')
if os.path.isfile(test_dir):
test_lists = pkl.load(open(test_dir, 'rb'))
else:
test_lists = construct_list(os.path.join(processed_dir, initial_ranker + '.rankings.test'), max_time_len)
pkl.dump(test_lists, open(test_dir, 'wb'))
train(train_lists, test_lists, num_ft, max_time_len, itm_spar_fnum, itm_dens_fnum, profile_fnum, parse)
|
StarcoderdataPython
|
3315014
|
from pytest import LogCaptureFixture
from .util import get_main_output
def test_content_types(caplog: LogCaptureFixture) -> None:
for _ in ("js_output.cwl", "js_output_workflow.cwl"):
commands = [
"https://raw.githubusercontent.com/common-workflow-language/common-workflow-language/main/v1.0/v1.0/test-cwl-out2.cwl",
"https://github.com/common-workflow-language/common-workflow-language/blob/main/v1.0/v1.0/empty.json",
]
error_code, _, stderr = get_main_output(commands)
found = False
for record in caplog.records:
if (
record.name == "salad"
and "got content-type of 'text/html'" in record.message
):
found = True
break
assert found
assert error_code == 1, stderr
|
StarcoderdataPython
|
128150
|
<reponame>NunoEdgarGFlowHub/katecheo
"""
python QuestionDetector_Test.py
"""
import unittest
import json
import os
os.environ[
'KATECHEO_NER'] = 'health=https://storage.googleapis.com/pachyderm-neuralbot/ner_models/health.zip,faith=https://storage.googleapis.com/pachyderm-neuralbot/ner_models/faith.zip'
import TargetClassifier
class TargetClassifier_Test(unittest.TestCase):
def setUp(self):
self.classifier = TargetClassifier.TargetClassifier()
def test_get_topic(self):
params = ['Does some food increase pollen allergy symptoms?']
response = self.classifier.predict(params, "features", {'tags': {'proceed': True}})
self.assertIsNotNone(response)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
170009
|
<gh_stars>0
import yaml
import json
from configparser import ConfigParser
def to_flatdict(file_path):
raw_file_path = file_path.replace("\t","\\t").replace("\n","\\n").replace("\b","\\b").replace("\f","\\f").replace("\r","\\r")
if file_path.endswith(".yaml") or file_path.endswith(".yml"):
return yaml_to_flatdict(raw_file_path)
elif file_path.endswith(".cfg"):
return cfg_to_flatdict(raw_file_path)
# yaml to flatdict
def yaml_to_flatdict(file_path):
d1 = {}
with open(file_path) as file:
documents = yaml.safe_load(file)
# value_yaml = flatdict.FlatDict(documents)
return convert_flatdict(documents)
# .cfg(.ini) to flatdict
def cfg_to_flatdict(file_path):
config = ConfigParser()
config.read(file_path)
sections = config._sections
return convert_flatdict(sections)
def convert_flatdict(file_obj):
d1 = {}
for x in file_obj:
for y in file_obj[x]:
k = x+"_"+y
d1[k] = file_obj[x][y]
return d1
"""out = to_flatdict("D:\Aishwarya_Work\pythonProject5\examples\conf2.yml")
print(out)
out1 = to_flatdict("D:\Aishwarya_Work\pythonProject5\examples\conf1.cfg")
print(out1)"""
|
StarcoderdataPython
|
4836665
|
__version__ = '1.1.0+smth'
import numpy as np
import pyrep
pr_v = np.array(pyrep.__version__.split('.'), dtype=int)
if pr_v.size < 4 or np.any(pr_v < np.array([4, 1, 0, 2])):
raise ImportError(
'PyRep version must be greater than 4.1.0.2. Please update PyRep.')
from rlbench.environment import Environment
from rlbench.action_modes import ArmActionMode
from rlbench.observation_config import ObservationConfig
from rlbench.observation_config import CameraConfig
from rlbench.sim2real.domain_randomization import RandomizeEvery
from rlbench.sim2real.domain_randomization import VisualRandomizationConfig
from rlbench.sim2real.domain_randomization_environment import DomainRandomizationEnvironment
|
StarcoderdataPython
|
3340148
|
<filename>FSJ_django20_project/FSJ/forms/forms_student.py
"""All ModelForms based on the Student model"""
from ..models import Student
from .forms_modelform import ModelForm
from django.forms import TextInput
class StudentForm(ModelForm):
"""Unrestricted student form available to coordinators creating a new student"""
class Meta:
model = Student
fields = ('ccid', 'first_name', 'middle_name', 'last_name', 'email', 'lang_pref', 'program', 'year', 'student_id', 'gpa', 'credits')
def __init__(self, *args, **kwargs):
super(StudentForm, self).__init__(*args, **kwargs)
class StudentEditForm(StudentForm):
"""Semi-restricted form available to coordinators who are editing students"""
def __init__(self, *args, **kwargs):
super(StudentEditForm, self).__init__(*args, **kwargs)
self.fields['student_id'].disabled=True
class StudentRestrictedForm(ModelForm):
"""Restricted form available to students who are editing their own profiles"""
class Meta:
model = Student
fields = ('ccid', 'first_name', 'middle_name', 'last_name', 'email', 'lang_pref', 'program', 'year', 'student_id', 'gpa', 'credits')
widgets = {
"student_id": TextInput
}
def __init__(self, *args, **kwargs):
super(StudentRestrictedForm, self).__init__(*args, **kwargs)
self.fields['ccid'].disabled=True
self.fields['email'].disabled=True
self.fields['year'].disabled=True
self.fields['student_id'].disabled=True
self.fields['gpa'].disabled=True
self.fields['credits'].disabled=True
self.fields['first_name'].disabled=True
self.fields['middle_name'].disabled=True
self.fields['last_name'].disabled=True
class StudentReadOnlyForm(StudentForm):
"""Read-only form available to coordinators and adjudicators who are viewing students"""
def __init__(self, *args, **kwargs):
super(StudentReadOnlyForm, self).__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].disabled = True
|
StarcoderdataPython
|
166325
|
<filename>src/game/law_scope.py<gh_stars>1-10
from enum import Enum, auto
class LawScope(Enum):
state = auto()
city = auto()
|
StarcoderdataPython
|
199682
|
import re
from collections import deque
from contextlib import closing
from cStringIO import StringIO
from flanker.mime.message.headers.parsing import parse_stream
from flanker.mime.message.headers import MimeHeaders
def detect(message):
headers = collect(message)
return Result(
score=len(headers) / float(len(HEADERS)),
status=get_status(headers),
notification=get_notification(message),
diagnostic_code=headers.get('Diagnostic-Code'))
def collect(message):
collected = deque()
for p in message.walk(with_self=True):
for h in HEADERS:
if h in p.headers:
collected.append((h, p.headers[h]))
if p.content_type.is_delivery_status():
collected += collect_from_status(p.body)
return MimeHeaders(collected)
def collect_from_status(body):
out = deque()
with closing(StringIO(body)) as stream:
for i in xrange(3):
out += parse_stream(stream)
return out
def get_status(headers):
for v in headers.getall('Status'):
if RE_STATUS.match(v.strip()):
return v
def get_notification(message):
for part in message.walk():
if part.headers.get('Content-Description',
'').lower() == 'notification':
return part.body
HEADERS = ('Action',
'Content-Description',
'Diagnostic-Code',
'Final-Recipient',
'Received',
'Remote-Mta',
'Reporting-Mta',
'Status')
RE_STATUS = re.compile(r'\d\.\d+\.\d+', re.IGNORECASE)
class Result(object):
def __init__(self, score, status, notification, diagnostic_code):
self.score = score
self.status = status
self.notification = notification
self.diagnostic_code = diagnostic_code
def __repr__(self):
return (u'bounce.Result(status={}, score={}, notification={},'
u' diag_code={})'.format(self.status, self.score,
self.notification,
self.diagnostic_code))
|
StarcoderdataPython
|
3212542
|
<filename>airbyte-integrations/connectors/source-mailgun/source_mailgun/__init__.py
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from .source import SourceMailgun
__all__ = ["SourceMailgun"]
|
StarcoderdataPython
|
3395541
|
from aiogram import types, Dispatcher
from aiogram.dispatcher.storage import FSMContext
from app.states import CheckState
from app.db_worker import db_worker
from asyncio import sleep
# дает пользовтелю инструкции
async def begining(message: types.Message, state: FSMContext):
await state.finish()
db_worker.add_user(message.from_user.id)
# await message.answer('Привет! )))))', reply_markup= types.ReplyKeyboardRemove())
# await message.answer(
# 'Начнем. Вначале я должен узнать, куда ты хочешь поступить. '
# 'Ты можешь посмотреть, какие ВУЗы есть в базе данных и указать их. ',
# reply_markup= keyboard_inline
# )
# await message.answer(
# 'Если какого-то ВУЗа нет, то надо будет указать 3 ссылки на него. '
# 'С этих 3-х сайтов:\n'
# 'https://tabiturient.ru\n'
# 'https://vuzopedia.ru\n'
# 'https://ucheba.ru/for-abiturients/vuz\n'
# "<i>(Надо зайти на сайт, ввести название ВУЗа, зайти на страничку и скопировать ссылку, "
# "потом просто отправить сюда. Некоторое не понимают, но <b>1 ВУЗ = 3 ссылки</b>, к сожалению для вас.)</i>\n\n"
# "<i>Этим ты поможешь нам расширять базу данных.</i>",
# disable_web_page_preview= True
# )
# await sleep(0.2)
# await message.answer('Когда закончишь указывать введи команду -> /finish1_0.')
# await sleep(0.2)
# await message.answer('А можешь вообще все ВУЗы указать, нажав сюда -> /finish1_1.')
await message.answer(
'Привет! )))))\n'
'Для начала, давай я узнаю, куда ты хочешь поступить.',
reply_markup= types.ReplyKeyboardRemove()
)
await message.answer(
'Есть 3 способа указать ВУЗы.'
)
await message.answer(
'<b>Первый.</b>\nПросто посмотреть, какие ВУЗы есть в базе, и указать их.'
)
await message.answer(
'<b>Второй.</b>\nМожно указать ВУЗ по 3 ссылкам:\n'
'https://tabiturient.ru\n'
'https://vuzopedia.ru\n'
'https://ucheba.ru/for-abiturients/vuz\n'
'<i>(Уточню, что 1 ВУЗ = 3 ссылки. Таким образом, ты поможешь пополнять базу.)</i>',
disable_web_page_preview= True
)
await message.answer(
'Для 1 и 2 способа надо нажать -> /finish1, когда закончишь вводить <i>ВСЕ</i> ВУЗы'
)
keyboard_inline = types.InlineKeyboardMarkup()
keyboard_inline.add(
types.InlineKeyboardButton(
text= 'Посмотреть ВУЗы в базе',
switch_inline_query_current_chat=''
)
)
await message.answer(
'<b>Третий.</b>\nМожно выбрать <i>абсолютно</i> все ВУЗы из моей базы, нажав сюда -> /all.',
reply_markup= keyboard_inline
)
# запоминаение того, что ввел пользователь
await state.update_data(chosen_vuzes_in_base = [])
await state.update_data(chosen_vuzes_tabi = [])
await state.update_data(chosen_vuzes_vuzo = [])
await state.update_data(chosen_vuzes_uche = [])
await CheckState.waiting_for_put_vuz_in_mem.set()
await state.update_data(check1 = False)
await state.update_data(check2 = False)
await state.update_data(check3 = False)
def register_introduction(dp: Dispatcher):
dp.register_message_handler(begining, commands= 'start', state= '*')
|
StarcoderdataPython
|
29772
|
from logging import getLogger
from typing import Dict, List, Optional
from tmtrader.entity.order import FilledBasicOrder
from tmtrader.entity.position import ClosedPosition, Position, Positions, \
PositionsRef
from tmtrader.exchange_for_backtest.usecase.order_to_share import from_order
logger = getLogger(__name__)
PositionsDict = Dict[int, Positions]
ClosedPositions = List[ClosedPosition]
class PositionManager:
def __init__(self):
self.__positions_dic: PositionsDict = dict()
def current_positions_of(self, product_id: int) -> Optional[PositionsRef]:
if product_id in self.__positions_dic:
return self.__positions_dic[product_id].to_ref()
else:
return None
def current_positions(self) -> Dict[int, PositionsRef]:
return {k: v.to_ref() for k, v in self.__positions_dic.items() if
v.len}
def update_position(self,
order: FilledBasicOrder) -> ClosedPositions:
logger.debug(f'Got filled order at PositionManager: {order}')
pid = order.product_id
if pid in self.__positions_dic:
logger.debug(f'position size before update: {self.__positions_dic[pid].len}')
else:
logger.debug(f'position size before update: 0')
new_shares = from_order(order)
positions = None
if pid in self.__positions_dic:
positions = self.__positions_dic.pop(pid)
closed_pos = []
if positions and positions.is_long:
if order.is_buy:
self.__add_positions(pid, positions, new_shares)
else:
closed_pos = self.__close_and_may_open(pid, positions,
new_shares)
elif positions:
if order.is_buy:
closed_pos = self.__close_and_may_open(pid, positions,
new_shares)
else:
self.__add_positions(pid, positions, new_shares)
else:
self.__positions_dic[pid] = Positions(pid, new_shares,
order.is_buy)
if pid in self.__positions_dic:
logger.debug(
f'position size after update: {self.__positions_dic[pid].len}')
else:
logger.debug('position size after update: 0')
return closed_pos
def __add_positions(self, pid: int, positions: Positions,
new_shares: List[Position]):
positions.add_positions(new_shares)
self.__positions_dic[pid] = positions
def __close_and_may_open(self, pid: int, positions: Positions,
new_shares: List[Position]) -> ClosedPositions:
closed = positions.close_positions(new_shares)
if closed.remaining_contracts:
self.__positions_dic[pid] = Positions(pid,
closed.remaining_contracts,
is_long=not
positions.is_long)
else:
if positions.len:
self.__positions_dic[pid] = positions
return closed.closed
|
StarcoderdataPython
|
1703347
|
<gh_stars>1000+
"""Test getting __version__ for VTK package
"""
import vtkmodules
from vtkmodules.vtkCommonCore import vtkVersion
from vtkmodules.test import Testing
class TestVersion(Testing.vtkTest):
def testVersionAttribute(self):
"""Test the __version__ attribute
"""
x,y,z = vtkmodules.__version__.split(".")
self.assertEqual(x, str(vtkVersion.GetVTKMajorVersion()))
self.assertEqual(y, str(vtkVersion.GetVTKMinorVersion()))
self.assertEqual(z, str(vtkVersion.GetVTKBuildVersion()))
if __name__ == "__main__":
Testing.main([(TestVersion, 'test')])
|
StarcoderdataPython
|
3320633
|
<reponame>ragibson/ModularityPruning
from .shared_testing_functions import generate_connected_multilayer_ER, generate_random_partitions
from modularitypruning.champ_utilities import partition_coefficients_3D
from modularitypruning.louvain_utilities import multilayer_louvain_part_with_membership, \
check_multilayer_louvain_capabilities, louvain_part_with_membership
from random import seed
import unittest
class TestCHAMPCoefficients3D(unittest.TestCase):
def assert_partition_coefficient_correctness(self, G_intralayer, G_interlayer, layer_membership,
partitions, coefficients):
if not check_multilayer_louvain_capabilities(fatal=False):
# just return since this version of louvain is unable to perform multilayer parameter estimation anyway
return
A_hats, P_hats, C_hats = coefficients
for membership, A_hat, P_hat, C_hat in zip(partitions, A_hats, P_hats, C_hats):
intralayer_part, interlayer_part = multilayer_louvain_part_with_membership(G_intralayer, G_interlayer,
layer_membership,
community_membership=membership)
# Q_intralayer(gamma=0) = sum_{ij} A_{ij} delta(c_i, c_j) = A_hat
louvain_A_hat = intralayer_part.quality(resolution_parameter=0)
# Q_intralayer(gamma=0) - Q_intralayer(gamma=1)
# = sum_{ij} (A_{ij} - gamma*P_{ij} - A_{ij}) delta(c_i, c_j)
# = sum_{ij} P_{ij} delta(c_i, c_j)
# = P_hat
louvain_P_hat = louvain_A_hat - intralayer_part.quality(resolution_parameter=1)
# Q_interlayer(omega=0)
# = sum_{ij} (C_{ij} - omega*P{ij}) delta(c_i, c_j)
# = sum_{ij} C_{ij} delta(c_i, c_j)
# = C_hat
louvain_C_hat = interlayer_part.quality(resolution_parameter=0)
self.assertAlmostEqual(A_hat, louvain_A_hat, places=10)
self.assertAlmostEqual(P_hat, louvain_P_hat, places=10)
self.assertAlmostEqual(C_hat, louvain_C_hat, places=10)
# Also test against an alternate, per-layer calculation of P_hat
alternate_P_hat = 0
for layer in set(layer_membership):
this_layer_indices = [i for i, l in enumerate(layer_membership) if layer == l]
layer_subgraph = G_intralayer.subgraph(this_layer_indices)
layer_community_membership = [community for i, community in enumerate(membership)
if layer_membership[i] == layer]
layer_part = louvain_part_with_membership(layer_subgraph, layer_community_membership)
alternate_P_hat += (layer_part.quality(resolution_parameter=0.0) -
layer_part.quality(resolution_parameter=1.0))
self.assertAlmostEqual(alternate_P_hat, P_hat, places=10)
def assert_partition_coefficient_correctness_unweighted_ER(self, num_nodes_per_layer=100, m=25000, num_layers=10,
directed=False, num_partitions=10, K_max=10):
G_intralayer, G_interlayer, layer_membership = generate_connected_multilayer_ER(
num_nodes_per_layer=num_nodes_per_layer, m=m, num_layers=num_layers, directed=directed)
partitions = generate_random_partitions(num_nodes=G_intralayer.vcount(), num_partitions=num_partitions,
K_max=K_max)
coefficients = partition_coefficients_3D(G_intralayer, G_interlayer, layer_membership, partitions)
self.assert_partition_coefficient_correctness(G_intralayer, G_interlayer, layer_membership, partitions,
coefficients)
def test_partition_coefficient_correctness_undirected_unweighted_varying_num_nodes_per_layer(self):
for num_nodes_per_layer in [50, 100, 250, 500]:
self.assert_partition_coefficient_correctness_unweighted_ER(num_nodes_per_layer=num_nodes_per_layer,
m=50 * num_nodes_per_layer)
def test_partition_coefficient_correctness_undirected_unweighted_varying_m(self):
for m in [5000, 10000, 15000, 20000]:
self.assert_partition_coefficient_correctness_unweighted_ER(m=m)
def test_partition_coefficient_correctness_undirected_unweighted_varying_num_layers(self):
for num_layers in [5, 10, 20, 30]:
self.assert_partition_coefficient_correctness_unweighted_ER(num_layers=num_layers)
def test_partition_coefficient_correctness_undirected_unweighted_varying_num_partitions(self):
for num_partitions in [5, 10, 100, 250]:
self.assert_partition_coefficient_correctness_unweighted_ER(num_partitions=num_partitions)
def test_partition_coefficient_correctness_undirected_unweighted_varying_K_max(self):
for K_max in [2, 5, 10, 25]:
self.assert_partition_coefficient_correctness_unweighted_ER(K_max=K_max)
def test_partition_coefficient_correctness_directed_unweighted_varying_num_nodes_per_layer(self):
for num_nodes_per_layer in [50, 100, 250, 500]:
self.assert_partition_coefficient_correctness_unweighted_ER(num_nodes_per_layer=num_nodes_per_layer,
m=100 * num_nodes_per_layer, directed=True)
def test_partition_coefficient_correctness_directed_unweighted_varying_m(self):
for m in [10000, 20000, 30000, 40000]:
self.assert_partition_coefficient_correctness_unweighted_ER(m=m, directed=True)
def test_partition_coefficient_correctness_directed_unweighted_varying_num_layers(self):
for num_layers in [5, 10, 20, 30]:
self.assert_partition_coefficient_correctness_unweighted_ER(num_layers=num_layers, directed=True)
def test_partition_coefficient_correctness_directed_unweighted_varying_num_partitions(self):
for num_partitions in [5, 10, 100, 250]:
self.assert_partition_coefficient_correctness_unweighted_ER(directed=True, num_partitions=num_partitions)
def test_partition_coefficient_correctness_directed_unweighted_varying_K_max(self):
for K_max in [2, 5, 10, 25]:
self.assert_partition_coefficient_correctness_unweighted_ER(directed=True, K_max=K_max)
def test_partition_coefficient_correctness_interleaved_directedness(self):
"""Test partition coefficient correctness when directedness of interlayer and intralayer edges do not match."""
if not check_multilayer_louvain_capabilities(fatal=False):
# just return since this version of louvain is unable to perform multilayer parameter estimation anyway
return
# Intralayer directed edges, but interlayer undirected ones
G_intralayer, G_interlayer, layer_membership = generate_connected_multilayer_ER(num_nodes_per_layer=100, m=5000,
num_layers=10, directed=False)
G_intralayer.to_directed()
partitions = generate_random_partitions(num_nodes=G_intralayer.vcount(), num_partitions=10, K_max=10)
coefficients = partition_coefficients_3D(G_intralayer, G_interlayer, layer_membership, partitions)
self.assert_partition_coefficient_correctness(G_intralayer, G_interlayer, layer_membership, partitions,
coefficients)
# Interlayer directed edges, but intralayer undirected ones
G_intralayer, G_interlayer, layer_membership = generate_connected_multilayer_ER(num_nodes_per_layer=100, m=5000,
num_layers=10, directed=False)
G_interlayer.to_directed()
partitions = generate_random_partitions(num_nodes=G_intralayer.vcount(), num_partitions=10, K_max=10)
coefficients = partition_coefficients_3D(G_intralayer, G_interlayer, layer_membership, partitions)
self.assert_partition_coefficient_correctness(G_intralayer, G_interlayer, layer_membership, partitions,
coefficients)
if __name__ == "__main__":
seed(0)
unittest.main()
|
StarcoderdataPython
|
3356475
|
<gh_stars>0
from typing import Optional, Dict
from id_definition.error_codes import VizErrorCode
class VizException(Exception):
status_code = 400
code = VizErrorCode.GENERAL_ERROR
message = "Exception Occured"
def __init__(
self, message: Optional[str] = None, status_code: Optional[int] = None, code: Optional[int] = None,
):
super().__init__()
self.status_code = status_code or self.status_code
self.code = code or self.code
self.message = message or self.message
def __str__(self) -> str:
return f"error_code: {self.code}, message: {self.message}"
def to_dict(self) -> Dict:
return {
"status_code": self.status_code,
"code": self.code,
"message": self.message,
}
class BranchNotExists(VizException):
code = VizErrorCode.BRANCH_NOT_EXISTS
message = "branch not found"
class ModelNotExists(VizException):
code = VizErrorCode.MODEL_NOT_EXISTS
message = "model not found"
|
StarcoderdataPython
|
1693027
|
from CvPythonExtensions import *
import CvUtil
gc = CyGlobalContext()
class CvPediaProject:
def __init__(self, main):
self.iProject = -1
self.top = main
self.X_INFO_PANE = self.top.X_PEDIA_PAGE
self.Y_INFO_PANE = self.top.Y_PEDIA_PAGE
self.W_INFO_PANE = 380 #290
self.H_INFO_PANE = 120
self.W_ICON = 100
self.H_ICON = 100
self.X_ICON = self.X_INFO_PANE + 10
self.Y_ICON = self.Y_INFO_PANE + 10
self.ICON_SIZE = 64
self.X_INFO_TEXT = self.X_INFO_PANE + 110
self.Y_INFO_TEXT = self.Y_ICON + 15
self.W_INFO_TEXT = self.W_INFO_PANE - 70
self.H_INFO_TEXT = self.H_INFO_PANE - 20
self.X_REQUIRES = self.X_INFO_PANE + self.W_INFO_PANE + 10
self.W_REQUIRES = self.top.R_PEDIA_PAGE - self.X_REQUIRES
self.H_REQUIRES = 110
self.Y_REQUIRES = self.Y_INFO_PANE + self.H_INFO_PANE - self.H_REQUIRES
self.X_DETAILS = self.X_INFO_PANE
self.Y_DETAILS = self.Y_INFO_PANE + self.H_INFO_PANE + 10
self.W_DETAILS = self.top.R_PEDIA_PAGE - self.X_DETAILS
self.H_DETAILS = 210
self.X_HISTORY = self.X_DETAILS
self.W_HISTORY = self.top.R_PEDIA_PAGE - self.X_HISTORY
self.Y_HISTORY = self.Y_DETAILS + self.H_DETAILS + 10
self.H_HISTORY = self.top.B_PEDIA_PAGE - self.Y_HISTORY
def interfaceScreen(self, iProject):
self.iProject = iProject
screen = self.top.getScreen()
self.placeInfo()
self.placeRequires()
self.placeDetails()
self.placeHistory()
def placeInfo(self):
screen = self.top.getScreen()
panel = self.top.getNextWidgetName()
info = gc.getProjectInfo(self.iProject)
screen.addPanel(self.top.getNextWidgetName(), "", "", False, False, self.X_INFO_PANE, self.Y_INFO_PANE, self.W_INFO_PANE, self.H_INFO_PANE, PanelStyles.PANEL_STYLE_BLUE50)
screen.addPanel(self.top.getNextWidgetName(), "", "", False, False, self.X_ICON, self.Y_ICON, self.W_ICON, self.H_ICON, PanelStyles.PANEL_STYLE_MAIN)
screen.addDDSGFC(self.top.getNextWidgetName(), info.getButton(), self.X_ICON + self.W_ICON / 2 - self.ICON_SIZE / 2, self.Y_ICON + self.H_ICON / 2 - self.ICON_SIZE / 2, self.ICON_SIZE, self.ICON_SIZE, WidgetTypes.WIDGET_GENERAL, -1, -1)
screen.addListBoxGFC(panel, "", self.X_INFO_TEXT, self.Y_INFO_TEXT, self.W_INFO_TEXT, self.H_INFO_TEXT, TableStyles.TABLE_STYLE_EMPTY)
screen.enableSelect(panel, False)
screen.appendListBoxString(panel, u"<font=4b>" + info.getDescription() + u"</font>", WidgetTypes.WIDGET_GENERAL, 0, 0, CvUtil.FONT_LEFT_JUSTIFY)
screen.appendListBoxString(panel, u"<font=3>Project</font>", WidgetTypes.WIDGET_GENERAL, 0, 0, CvUtil.FONT_LEFT_JUSTIFY)
if info.getProductionCost() >= 0:
if self.top.iActivePlayer == -1:
iCost = (info.getProductionCost() * gc.getDefineINT('PROJECT_PRODUCTION_PERCENT')) / 100
else:
iCost = gc.getActivePlayer().getProjectProductionNeeded(self.iProject)
szCost = u"Cost: %d%c" % (iCost, gc.getYieldInfo(YieldTypes.YIELD_PRODUCTION).getChar())
screen.appendListBoxString(panel, u"<font=3>" + szCost + u"</font>", WidgetTypes.WIDGET_GENERAL, 0, 0, CvUtil.FONT_LEFT_JUSTIFY)
def placeRequires(self):
screen = self.top.getScreen()
panel = self.top.getNextWidgetName()
info = gc.getProjectInfo(self.iProject)
screen.addPanel(panel, CyTranslator().getText("TXT_KEY_PEDIA_REQUIRES", ()), "", False, True, self.X_REQUIRES, self.Y_REQUIRES, self.W_REQUIRES, self.H_REQUIRES, PanelStyles.PANEL_STYLE_BLUE50)
screen.attachLabel(panel, "", " ")
iTech = info.getTechPrereq()
if iTech >= -1:
screen.attachImageButton(panel, "", gc.getTechInfo(iTech).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_TECH, iTech, 1, False)
iAnyoneProjectPrereq = info.getAnyoneProjectPrereq()
if iAnyoneProjectPrereq != -1:
screen.attachImageButton(panel, "", gc.getProjectInfo(iAnyoneProjectPrereq).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_PROJECT, iAnyoneProjectPrereq, 1, False)
for iProject in range(gc.getNumProjectInfos()):
if info.getProjectsNeeded(iProject) > 0:
screen.attachImageButton(panel, "", gc.getProjectInfo(iProject).getButton(), GenericButtonSizes.BUTTON_SIZE_CUSTOM, WidgetTypes.WIDGET_PEDIA_JUMP_TO_PROJECT, iProject, 1, False)
def placeDetails(self):
screen = self.top.getScreen()
panel = self.top.getNextWidgetName()
text = self.top.getNextWidgetName()
info = gc.getProjectInfo(self.iProject)
screen.addPanel(panel, CyTranslator().getText("TXT_KEY_PEDIA_DETAILS", ()), "", True, False, self.X_DETAILS, self.Y_DETAILS, self.W_DETAILS, self.H_DETAILS, PanelStyles.PANEL_STYLE_BLUE50)
szText = CyGameTextMgr().getProjectHelp(self.iProject, True, None)[1:]
screen.addMultilineText(text, szText, self.X_DETAILS + 5, self.Y_DETAILS + 30, self.W_DETAILS - 10, self.H_DETAILS - 35, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
def placeHistory(self):
screen = self.top.getScreen()
panel = self.top.getNextWidgetName()
text = self.top.getNextWidgetName()
info = gc.getProjectInfo(self.iProject)
screen.addPanel(panel, CyTranslator().getText("TXT_KEY_CIVILOPEDIA_HISTORY", ()), "", True, True, self.X_HISTORY, self.Y_HISTORY, self.W_HISTORY, self.H_HISTORY, PanelStyles.PANEL_STYLE_BLUE50 )
szText = info.getCivilopedia()
screen.addMultilineText(text, szText, self.X_HISTORY + 10, self.Y_HISTORY + 30, self.W_HISTORY - 20, self.H_HISTORY - 40, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
def handleInput (self, inputClass):
return 0
|
StarcoderdataPython
|
20079
|
<reponame>bateman-research/search-sifter
import pytest
import searchsifter.relationships.minhash as mh
import searchsifter.relationships.jaccard as jc
@pytest.mark.parametrize("a, b, result", [
({1, 2}, {2}, 0.5),
({1, 2}, {2, 3}, 1/3),
({1}, {2}, 0),
({1}, {1}, 1)
])
def test_jaccard(a, b, result):
assert jc.jaccard(a, b) == result
assert jc.jaccard(b, a) == result
@pytest.mark.parametrize("a, b, result", [
({1, 2}, {2}, 0.5),
({1}, {1, 2}, 1),
({1, 2}, {2, 3}, 0.5),
({1}, {2}, 0),
({1}, {1}, 1),
])
def test_jaccard_containment(a, b, result):
assert jc.jaccard_containment(a, b) == result
@pytest.mark.parametrize("a, b, result", [
({1, 2}, {2}, 0.5),
({1, 2}, {2, 3}, 1/3),
({1}, {2}, 0),
({1}, {1}, 1)
])
def test_minhash(a, b, result):
s, t = mh.signature(a, 5), mh.signature(b, 5)
assert mh.minhash(s, t, 5) == result
assert mh.minhash(t, s, 5) == result
@pytest.mark.parametrize("a, b, result", [
({1, 2}, {2}, 0.5),
({1}, {1, 2}, 1),
({1, 2}, {2, 3}, 0.5),
({1}, {2}, 0),
({1}, {1}, 1),
])
def test_minhash_containment(a, b, result):
s, t = mh.signature(a, 5), mh.signature(b, 5)
assert mh.minhash_containment(s, t) == result
@pytest.fixture
def a():
return set(range(1, 100))
@pytest.fixture
def b():
return set(range(50, 100))
@pytest.fixture
def c():
return set(range(75, 100))
def test_intersection(a, b, c):
assert mh.intersection_signature(a, b) == set(range(50, 100))
assert mh.intersection_signature(a, b, c) == set(range(75, 100))
def test_union(a, b):
assert mh.union_signature(a, b, 100) == a
assert len(mh.union_signature(a, b, 20)) == 20
|
StarcoderdataPython
|
3291864
|
from scipy.stats import scoreatpercentile
import re
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
try:
from . import plotting
except ImportError:
print( 'WARNING: unable to import "plotting" in stats. some functions may be disabled' )
from . import reshape
class BlockMath( reshape.BlockReshaper ):
"""
Child class of BlockReshaper that adds relevant mathematics capabilities to reshaping functionality.
Routines herein will return an array of size
( data.shape[0]/roi_R, data.shape[1]/roi_C ) = ( numR, numC )
Note the blockR, blockC nomenclature change to roi_R, roi_C for the averaging block size, to avoid confusion with chip
block[R,C] in chipinfo.
goodpix is a boolean array of the same size as data where pixels to be averaged are True. By default all pixels are averaged
raising finite sets all nans and infs to 0
For large data sets on computers with limited memory, it may help to set superR and superC to break the data into chunks
This is especially true for masked reshapes where we now have 2x the data size needing to be in memory.
"""
def massage_output( self, output_obj ):
""" Applies finite correction and also deals with whether the object is of type np.ma.core.MaskedArray """
if isinstance( output_obj, ma.core.MaskedArray ):
output = output_obj.data
else:
output = output_obj
if self.finite:
output[ np.isnan(output) ] = 0
output[ np.isinf(output) ] = 0
return output
def get_mean( self ):
return self.massage_output( self.reshaped.mean(2) )
get_avg = get_mean
get_average = get_mean
def get_std( self ):
return self.massage_output( self.reshaped.std(2) )
def get_sum( self ):
return self.massage_output( self.reshaped.sum(2) )
def get_var( self ):
return self.massage_output( self.reshaped.var(2) )
def get_vmr( self ):
'''
Calculates variance-mean-ratio . . . sigma^2 / mean
Note that here we are going to define superpixels with zero means to be 0 VMR, not np.nan.
'''
means = self.get_mean()
msk = (means == 0)
means[msk] = 1
variance = self.get_var()
vmr = variance / means
vmr[msk] = 0
return self.massage_output( vmr )
def named_stats( data, name='', histDir='', metricType='', maxsize=None, histlims=None ):
'''
This is a higher level version of percentiles
Calculates data distribution properties and returns a dictionary
of metrics, each labeled with "name". If name is unspecified,
no flags will be applied.
If you specify histDir, then a histogram is generated and saved.
If you don't, then the mode will not be returned
metricType is the type of metric (e.g. "buffering", not "W2_buffering")
and is only used for making histograms. Defaults to name if unspecified
For very large datasets, you may want to specify a maxsize to speed up the calculation
'''
# Make sure the data is 1D
flat = data.flatten()
# Downsample large data sets
if maxsize:
scale = int(max( 1, np.ceil( len(flat)/maxsize ) ))
flat = flat[::scale]
# Append an underscore to the end of the name to separate from metrics
if name:
if name[-1] != '_':
name += '_'
# Calculate the metrics
metrics = {}
metrics[ '%smean' % name ] = flat[ ~np.isnan( flat ) ].mean()
metrics[ '%sstd' % name ] = flat[ ~np.isnan( flat ) ].std()
percs = percentiles( flat )
metrics[ '%sP10' % name ] = float( percs['p10'] )
metrics[ '%sP90' % name ] = float( percs['p90'] )
metrics[ '%sq1' % name ] = float( percs['q1'] )
metrics[ '%sq2' % name ] = float( percs['q2'] )
metrics[ '%sq3' % name ] = float( percs['q3'] )
metrics[ '%siqr' % name ] = float( percs['iqr'] )
# Plot the histogram
if histDir:
# Set limits for histogram if free limits were used
if not metricType:
metricType = name
lims, units = plotting.plot_params( metricType, adjust=metrics[ '%smean' % name ] )
if histlims:
lims = histlims
if lims[0] is None:
lims[0] = flat.min()
if lims[1] is None:
lims[1] = flat.max()
if not len( flat ):
flat = np.zeros(1)
if re.search( r'phslopes$', name ):
bins = 41
else:
bins = 101
hn,hb,_ = plt.hist ( flat , bins = np.linspace( lims[0] , lims[1] , bins ) , align='mid' , hold=False )
metrics[ '%smode' % name ] = hist_mode( hn , hb )
plt.xlabel ( '%s (%s)' % (metricType, units) )
plt.xlim ( lims[0] , lims[1] )
try:
if name[-1] == '_':
plotname = name[:-1]
else:
plotname = name
except IndexError:
plotname = name
plt.title ( '%s | (Mean, SD, P90) = (%.1f, %.1f, %.1f) %s\n(Q2, IQR, Mode) = (%.1f, %.1f, %.1f) %s' %
( plotname,
metrics['%smean' % name],
metrics['%sstd' % name],
metrics['%sP90' % name],
units ,
metrics['%sq2' % name],
metrics['%siqr' % name],
metrics['%smode' % name],
units ))
plt.savefig ( '%s/%shistogram.png' % ( histDir, name ))
plt.close ( )
return metrics
def hist_mode( n , bins ):
"""
Simple routine to get the mode of the distribution of a histogram.
In the rare case that more than one modes are found, we take the average.
If its a bimodal distribution, we wouldn't believe this value anyway.
"""
mode = bins[ np.where( n == n.max() ) ]
# Non-ideal solution
if len( mode ) > 1:
return mode.mean()
else:
return mode[0]
def percentiles( data , extras=[]):
"""
This used to be "ecc_tools.pp"
This gains much faster speeds over running percentile functions in parallel, being able to do the sorting all at once rather than 6 times in a row for 6 different numbers...
"""
data.sort()
percs = [ 10., 25., 50., 75., 90. ]
for i in extras:
percs.append( float( i ) )
names = ('q1','q2','q3','iqr','p10','p90','m80')
output = dict.fromkeys(names,0)
N = float( len( data ) )
#print ('N = %.0f' % N )
for i in range( len(percs) ):
#print percs[i]
r = percs[i] / 100. * N - 0.5
#print( 'r = %.2f' % r )
if r%1 == 0:
p = data[int(r)]
else:
try:
p = (data[int(np.ceil(r))] - data[int(np.floor(r))] ) * (r - np.floor(r)) + data[int(np.floor(r))]
#print p
except:
p = np.nan
#print p
if not np.isnan(p):
if percs[i] == 25:
output['q1'] = p
elif percs[i] == 50:
output['q2'] = p
elif percs[i] == 75:
output['q3'] = p
else:
output['p%i' % percs[i]] = p
output['iqr'] = output['q3'] - output['q1']
output['m80'] = output['p90'] - output['p10']
return output
def calc_iqr( data ):
""" Single function that will return the IQR of a given dataset. """
return percentiles(data)['iqr']
def calc_blocksize( data, nominal=(100,100) ):
'''Returns block dimensions that are as close to log10(nominal) as possible'''
X, Y = data.shape
x_nom, y_nom = nominal
x_low = None
y_low = None
x_high = None
y_high = None
x_fin = None
y_fin = None
def set_low_and_high( nominal, array_dim ):
low = None
high = None
for v in range( nominal, 0, -1 ):
if array_dim%v == 0:
low = v
break
for v in range( nominal+1, array_dim, 1 ):
if array_dim%v == 0:
high = v
break
return (low,high,)
def set_final( low, high, nominal ):
final = None
def compare_to_nominal( low, high, nominal ):
dif_log_low = np.absolute( np.log10( nominal ) - np.log10( low ) )
dif_log_high = np.absolute( np.log10( nominal ) - np.log10( high ) )
if dif_log_low < dif_log_high:
return low
else:
return high
if ( low is not None) and ( high is not None):
final = compare_to_nominal( low, high, nominal )
elif low is not None:
final = low
elif high is not None:
final = high
return final
x_low, x_high = set_low_and_high( x_nom, X )
y_low, y_high = set_low_and_high( y_nom, Y )
x_fin = set_final( x_low, x_high, x_nom )
y_fin = set_final( y_low, y_high, y_nom )
return (x_fin, y_fin,)
def uniformity( data, blocksize, exclude=None, only_values=False, iqr=True, std=False ):
'''
Calculates the uniformity of the input data.
block types are specified in chips.csv
Usual values are ( 'mini', 'micro', chip' )
Returns a dictionary
'q2' : Median of full chip array
'iqr' : IQR of full chip array
'blocks_q2' : 2D array of local medians
'blocks_iqr' : 2D array of local IQRs
'blocks_var' : IQR of blocks_q2
'iqr_ratio' : blocks_var / iqr
'blocks_iqrvar' : IQR of blocks_iqr
'mean' : mean of full chip array
'std' : std of full chip array
'blocks_mean' : 2D array of local means
'blocks_std' : 2D array of local stds
'blocks_mvar' : std of blocks_mean
'std_ratio' : blocks_mvar / std
'blocks_stdmvar' : std of blocks_mean
if stdonly is selcted, then q2 and iqr properties are not calculated. This may be faster
'''
def block_reshape( data, blocksize ):
rows, cols = data.shape
numR = rows/blocksize[0]
numC = cols/blocksize[1]
return data.reshape(rows , numC , -1 ).transpose((1,0,2)).reshape(numC,numR,-1).transpose((1,0,2))
output = {}
if exclude is not None:
data = data.astype( np.float )
data[ exclude.astype( np.bool ) ] = np.nan
data[ np.isinf(data) ] = np.nan
# Divide into miniblocks
blocks = block_reshape( data, blocksize )
newsize = ( data.shape[0] / blocksize[0] ,
data.shape[1] / blocksize[1] )
if iqr:
percs = [ 25, 50, 75 ]
# Calculate the full data properties
flat = data[ ~np.isnan( data ) ].flatten()
flat.sort()
numwells = len( flat )
levels = {}
for perc in percs:
r = perc / 100. * numwells - 0.5
if r%1 == 0:
p = flat[int(r)]
else:
try:
p = (flat[np.ceil(r)] - flat[np.floor(r)] ) * (r - np.floor(r)) + flat[np.floor(r)]
except:
p = np.nan
levels[perc] = p
output[ 'q2' ] = levels[50]
output[ 'iqr' ] = levels[75] - levels[25]
# Calculate the local properties
blocks.sort( axis=2 )
numwells = (~np.isnan( blocks )).sum( axis=2 )
regions = {}
def get_inds( blocks, inds2d, inds ):
selected_blocks = blocks[inds2d]
selected_wells = inds.astype( np.int )[inds2d]
rows = range( selected_wells.size )
output = selected_blocks[ rows, selected_wells ]
return output
for perc in percs:
r = perc / 100. * numwells - 0.5
r[ r < 0 ] = 0
r[ r > numwells ] > numwells[ r > numwells ]
inds = r%1 == 0
p = np.empty( r.shape )
p[:] = np.nan
p[ inds ] = get_inds( blocks, inds, r )
ceil_vals = get_inds( blocks, ~inds, np.ceil(r) )
floor_vals = get_inds( blocks, ~inds, np.floor(r) )
deltas = r[~inds] - np.floor(r[~inds] )
p[ ~inds ] = ( ceil_vals - floor_vals ) * deltas + floor_vals
p = p.reshape( newsize )
regions[perc] = p
blocks_iqr = regions[75] - regions[25]
if not only_values:
output[ 'blocks_q2' ] = regions[50]
output[ 'blocks_iqr' ] = blocks_iqr
# Calculate the propertis of the regional averages
flat = regions[50][ ~np.isnan( regions[50] ) ].flatten()
flat.sort()
numwells = len( flat )
levels = {}
for perc in percs:
r = perc / 100. * numwells - 0.5
if r%1 == 0:
p = flat[int(r)]
else:
try:
p = ( flat[np.ceil(r)] - flat[np.floor(r)] ) * (r - np.floor(r)) + flat[np.floor(r)]
except:
p = np.nan
levels[perc] = p
output[ 'blocks_var' ] = levels[75] - levels[25]
output['iqr_ratio'] = output[ 'blocks_var' ] / output[ 'iqr' ]
# Calculate the variability of the regional variability
flat = blocks_iqr[ ~np.isnan( regions[50] ) ].flatten()
flat.sort()
numwells = len( flat )
levels = {}
for perc in percs:
r = perc / 100. * numwells - 0.5
if r%1 == 0:
p = flat[int(r)]
else:
try:
p = ( flat[np.ceil(r)] - flat[np.floor(r)] ) * (r - np.floor(r)) + flat[np.floor(r)]
except:
p = np.nan
levels[perc] = p
output[ 'blocks_iqrvar' ] = levels[75] - levels[25]
if std:
output[ 'mean' ] = np.nanmean( data )
output[ 'std' ] = np.nanstd( data )
block_mean = np.nanmean( blocks, axis=2 ).reshape( newsize )
block_std = np.nanstd( blocks, axis=2 ).reshape( newsize )
if not only_values:
output[ 'blocks_mean' ] = block_mean
output[ 'blocks_std' ] = block_std
output[ 'blocks_mvar' ] = np.nanstd( block_mean )
output[ 'std_ratio' ] = output[ 'blocks_mvar' ] / output[ 'std' ]
output[ 'blocks_stdmvar' ] = np.nanstd( block_std )
return output
def chip_uniformity( data, chiptype, block='mini', exclude=None, only_values=False, iqr=True, std=False ):
'''
A wrapper on uniformity, designed to take a chiptype as an input instead of a block size
'''
blocksize = ( getattr( chiptype, '%sR' % block ),
getattr( chiptype, '%sC' % block ) )
output = uniformity( data, blocksize, exclude=exclude, only_values=only_values, iqr=iqr, std=std )
return output
################################################################################
# Averaging functions, include those brought over from average.py
################################################################################
def block_avg( data , blockR, blockC, goodpix=None, finite=False, superR=None, superC=None ):
"""
Averages the specified data, averaging regions of blockR x blockC
This returns a data of size
( data.shape[0]/blockR, data.shape[1]/blockC )
*****Note that blockR and blockC are not the same as blockR and blockC defaults by chiptype (fpga block rc shape)*****
goodpix is a boolean array of same size as data where pixels to be averaged are True. By default all pixels are averaged
raising finite sets all nans and infs to 0
For large data sets on computers with limited memory, it may help to set superR and superC to break the data into chunks
"""
bm = BlockMath( data , blockR, blockC, goodpix=goodpix, finite=finite, superR=superR, superC=superC )
return bm.get_mean()
def block_std( data, blockR, blockC, goodpix=None, finite=False, superR=None, superC=None ):
"""
Analog of block_avg that spits back the std.
*****Note that blockR and blockC are not the same as blockR and blockC defaults by chiptype (fpga block rc shape)*****
goodpix is a boolean array of same size as data where pixels to be averaged are True. By default all pixels are averaged
"""
bm = BlockMath( data , blockR, blockC, goodpix=goodpix, finite=finite, superR=superR, superC=superC )
return bm.get_std()
# This function works but needs some help
def BlockAvg3D( data , blocksize , mask ):
"""
3-D version of block averaging. Mainly applicable to making superpixel averages of datfile traces.
Not sure non-averaging calcs makes sense?
mask is a currently built for a 2d boolean array of same size as (data[0], data[1]) where pixels to be averaged are True.
"""
rows = data.shape[0]
cols = data.shape[1]
frames = data.shape[2]
if np.mod(rows,blocksize[0]) == 0 and np.mod(cols,blocksize[1]) == 0:
blockR = rows / blocksize[0]
blockC = cols / blocksize[1]
else:
print( 'Error, blocksize not evenly divisible into data size.')
return None
output = np.zeros((blockR,blockC,frames))
# Previous algorithm was slow and used annoying looping
# Improved algorithm that doeesn't need any looping. takes about 1.4 seconds instead of 60.
msk = np.array( mask , float )
msk.resize(rows, cols , 1 )
masked = np.array( data , float ) * np.tile( msk , ( 1 , 1 , frames ) )
step1 = masked.reshape(rows , blockC , -1 , frames).sum(2)
step2 = np.transpose(step1 , (1,0,2)).reshape(blockC , blockR , -1 , frames).sum(2)
step3 = np.transpose(step2 , (1,0,2))
mask1 = mask.reshape(rows , blockC , -1 ).sum(2)
count = mask1.transpose().reshape(blockC , blockR , -1).sum(2).transpose()
#mask1 = mask.reshape(rows , blockC , -1 , frames).sum(2)
#count = mask1.transpose().reshape(blockC , blockR , -1 , frames).sum(2).transpose()
output = step3 / count[:,:,np.newaxis]
output[ np.isnan(output) ] = 0
output[ np.isinf(output) ] = 0
return output
# I think these should just go away into their only calling modules. Perhaps. - PW 9/18/2019
# Pulled in from the average.py module. Only called by datfile.py
def masked_avg( image, pinned ):
'''
Calculates average trace while excluding pinned pixels.
If pinned = True, that pixel is excluded from the average.
Note: This is opposite compared to matlab functionality.
'''
avgtrace = np.mean ( image[ ~pinned ] , axis=0 )
return avgtrace
# Pulled in from average.py. Only called by chip.py.
def stripe_avg( data, ax ):
"""
Custom averaging algorithm to deal with pinned pixels
"""
# EDIT 10-11-13
# Utilize numpy.masked_array to do this much more quickly
# Not so custom anymore!
return ma.masked_array( data , (data == 0) ).mean( ax ).data
def top_bottom_diff( data, mask=None, force_sum=False ):
"""
Function to calculate the difference of a given chip data array between top and bottom.
Despite our conventions of plotting origin='lower' suggesting rows in [0,rows/2] as being the bottom, this is
physically chip top by layout. As such, we tie this metric to the physical chip layout and design since
when this different becomes very large, we would look to the design or FA to identify root cause.
This function returns the top - bottom difference, meaning if the bottom is larger, the output is negative.
We will by default treat the data as a float or int that should be averaged on each half before taking the
difference.For booleans, like pinned pixels, this would mean that we would get a decimal percentage.
To instead add up the values top and bottom (e.g. difference in absolute # of pinned pixels), set force_sum=True
"""
if force_sum:
op = np.sum
else:
op = np.mean
r,c = data.shape
mid = int( r/2 )
if mask is None:
top = op( data[:mid,:] )
bot = op( data[mid:,:] )
else:
if mask.shape != (r,c):
raise ValueError( "Input mask {} is not of the same shape as the data {}!".format( mask.shape,
data.shape ) )
top = op( data[:mid,:][ mask[:mid,:] ] )
bot = op( data[mid:,:][ mask[mid:,:] ] )
return top - bot
###########################################
# rename functions for historical reasons #
###########################################
HistMode = hist_mode
pp = percentiles
|
StarcoderdataPython
|
3313403
|
"""Create BayesCMD configuration file for PLOS simulated data."""
import pandas as pd
import json
from pathlib import Path
import os.path as op
p = Path(op.abspath(__file__))
from bayescmd.abc import priors_creator
current_file = Path(op.abspath(__file__))
param_df = pd.read_csv(op.join(current_file.parents[2],
'pdist_files',
'pdists_BS_PLOS_wide.csv'
),
header=None,
names=['Parameter', 'Dist. Type',
'Min', 'Max', 'Default'],
index_col=0)
# chosen_params = ['sigma_coll',
# 'R_auto',
# 'n_h',
# 'r_t',
# 'mu_max',
# 'n_m',
# 'r_m',
# 'P_v',
# 'phi',
# 'Xtot']
# experimental_params = [
# 'P_ic',
# 'v_on',
# 'n_m',
# 'T_max0',
# 'E_2',
# 'h_0',
# 'K_sigma',
# 'v_un',
# 'R_autc',
# 'v_cn'
# ]
filtered_params = ['n_m',
'r_m',
'K_sigma',
'p_tot',
'k_aut',
'v_cn',
'sigma_e0',
'k2_n',
'Xtot',
'R_autc']
prior_dict = priors_creator(param_df.loc[filtered_params, 'Default'].to_dict(),
0.5)
config_dict = {"model_name": "BS1-1",
"inputs": ["SaO2sup", "P_a", "Pa_CO2"],
"create_params": False,
"priors": prior_dict,
"targets": ["TOI", "CCO", "HbT", "HbD"],
"zero_flag": {
"TOI": False,
"CCO": True,
"HbD": True,
"HbT": True
},
"batch_debug": False,
"store_simulations": False
}
with open(op.join(current_file.parents[2],
'configuration_files',
'filtered_hypoxia_config.json'
),
'w') as f:
json.dump(config_dict, f)
|
StarcoderdataPython
|
1740763
|
from monolithe.generators.lib import TemplateFileWriter
from monolithe.specifications import SpecificationAttribute
from monolithe.lib import Printer
import os
import shutil
import json
class APIVersionWriter(TemplateFileWriter):
""" This class is reponsible to write files for a particular api version. """
def __init__(self, monolithe_config, api_info):
super(APIVersionWriter, self).__init__(package="monolithe.generators.lang.javascript")
output = monolithe_config.get_option("output", "transformer")
self.locale_on = monolithe_config.get_option("locale", "transformer", fallback=True)
self.model_directory = "%s/javascript/%s/models" % (output, api_info["version"])
self.abstract_directory = "%s/abstract" % self.model_directory
self.enum_directory = "%s/enums" % self.model_directory
self.locale_directory = "%s/javascript/%s/locales/en" % (output, api_info["version"])
if os.path.exists(self.model_directory):
shutil.rmtree(self.model_directory)
if os.path.exists(self.locale_directory):
shutil.rmtree(self.locale_directory)
self.api_root = api_info["root"]
self._class_prefix = monolithe_config.get_option("class_prefix", "transformer")
self._read_config()
def _read_config(self):
""" This method reads provided json config file.
"""
this_dir = os.path.dirname(__file__)
config_file = os.path.abspath(os.path.join(this_dir, "..", "config", "config.json"))
self.generic_enum_attrs = []
self.base_attrs = []
self.generic_enums = []
self.named_entity_attrs = []
self.overide_generic_enums = []
self.enum_attrs_for_locale = {}
self.generic_enum_attrs_for_locale = {}
Printer.log("Configuration file: %s" % (config_file))
if (os.path.isfile(config_file)):
with open(config_file, 'r') as input_json:
json_config_data = json.load(input_json)
self.base_attrs = json_config_data['base_attrs']
self.generic_enums = json_config_data['generic_enums']
self.named_entity_attrs = json_config_data['named_entity_attrs']
self.overide_generic_enums = json_config_data['overide_generic_enums']
for enum_name, values in self.generic_enums.iteritems():
enum_attr = SpecificationAttribute()
enum_attr.name = enum_name
enum_attr.allowed_choices = values
self.generic_enum_attrs.append(enum_attr)
else:
Printer.log("Configuration file missing: %s" % (config_file))
def perform(self, specifications):
""" This method is the entry point of javascript code writer. Monolithe will call it when
the javascript plugin is to generate code.
"""
self.enum_list = []
self.model_list = []
self.job_commands = filter(lambda attr: attr.name == 'command', specifications.get("job").attributes)[0].allowed_choices
#Printer.log("job_commands: %s" % (self.job_commands))
self._write_abstract_named_entity()
for rest_name, specification in specifications.iteritems():
self._write_model(specification=specification)
#self._write_generic_enums()
self.write(destination = self.model_directory,
filename="index.js",
template_name="model_index.js.tpl",
class_prefix = self._class_prefix,
model_list = self.model_list)
self.write(destination = self.enum_directory,
filename="index.js",
template_name="enum_index.js.tpl",
class_prefix = self._class_prefix,
enum_list = self.enum_list)
self._write_locales(specifications)
def _write_locales(self, specifications):
if self.locale_on:
for rest_name, specification in specifications.items():
#enum_attrs_for_locale_template = {}
#enum_attrs = self.enum_attrs_for_locale[specification.entity_name]
#
#generic_enum_attrs = self.generic_enum_attrs_for_locale[specification.entity_name];
#if (generic_enum_attrs):
# enum_attrs.extend(generic_enum_attrs)
#
#if (enum_attrs):
# for attribute in enum_attrs:
# enum_name = "%s%s%sEnum" % (self._class_prefix, specification.entity_name, attribute.name[0].upper() + attribute.name[1:])
# enum_attrs_for_locale_template[enum_name] = attribute.allowed_choices
self._format_description_text(specification)
filename = "%s.json" % (rest_name)
self.write(destination = self.locale_directory,
filename=filename,
template_name="locale_entity.json.tpl",
specification = specification,
enum_attrs = {})
def _format_description_text(self, specification):
if specification.description:
specification.description = specification.description.replace('"', "'")
for attribute in specification.attributes:
if attribute.description:
attribute.description = attribute.description.replace('"', "'")
def _write_abstract_named_entity(self):
""" This method generates AbstractNamedEntity class js file.
"""
filename = "%sAbstractNamedEntity.js" % (self._class_prefix)
superclass_name = "%sEntity" % (self._class_prefix)
# write will write a file using a template.
# mandatory params: destination directory, destination file name, template file name
# optional params: whatever that is needed from inside the Jinja template
self.write(destination = self.abstract_directory,
filename = filename,
template_name = "abstract_named_entity.js.tpl",
class_prefix = self._class_prefix,
superclass_name = superclass_name)
def _write_model(self, specification):
""" This method writes the ouput for a particular specification.
"""
if specification.allowed_job_commands and not (set(specification.allowed_job_commands).issubset(self.job_commands)):
raise Exception("Invalid allowed_job_commands %s specified in entity %s" % (specification.allowed_job_commands, specification.entity_name))
specification.supportsAlarms = len(filter(lambda child_api : child_api.rest_name == "alarm", specification.child_apis)) == 1
filename = "%s%s.js" % (self._class_prefix, specification.entity_name)
self.model_list.append("%s%s" %(self._class_prefix, specification.entity_name))
isNamedEntity = self._isNamedEntity(attributes=specification.attributes)
superclass_name = "RootEntity" if specification.rest_name == self.api_root else "AbstractNamedEntity" if isNamedEntity else "Entity"
# write will write a file using a template.
# mandatory params: destination directory, destination file name, template file name
# optional params: whatever that is needed from inside the Jinja template
specification.attributes_modified = [attribute for attribute in specification.attributes if (attribute.name not in self.base_attrs and (not isNamedEntity or attribute.name not in self.named_entity_attrs))]
enum_attributes=[attribute for attribute in specification.attributes_modified if attribute.allowed_choices]
enum_attrs_to_import = enum_attributes[:]
generic_enum_attrs_in_entity = {}
generic_enum_attributes_to_import = []
for attr in enum_attributes:
if specification.rest_name in self.overide_generic_enums and attr.name in self.overide_generic_enums[specification.rest_name]:
continue
for generic_enum_attr in self.generic_enum_attrs:
if set(attr.allowed_choices) & set(generic_enum_attr.allowed_choices):
generic_enum_attrs_in_entity[attr.name] = generic_enum_attr
enum_attrs_to_import.remove(attr)
generic_enum_attributes_to_import.append(generic_enum_attr.name)
self._write_enums(entity_name=specification.entity_name, attributes=enum_attrs_to_import)
self.generic_enum_attrs_for_locale[specification.entity_name] = generic_enum_attrs_in_entity.values()
self.write(destination = self.model_directory,
filename = filename,
template_name = "entity.js.tpl",
class_prefix = self._class_prefix,
specification = specification,
superclass_name = superclass_name,
enum_attrs_to_import = enum_attrs_to_import,
generic_enum_attributes = generic_enum_attrs_in_entity,
generic_enum_attributes_to_import = set(generic_enum_attributes_to_import))
def _isNamedEntity(self, attributes):
attr_names = [attr.name for attr in attributes]
return (len(self.named_entity_attrs) > 0 and set(self.named_entity_attrs).issubset(attr_names))
def _write_enums(self, entity_name, attributes):
""" This method writes the ouput for a particular specification.
"""
self.enum_attrs_for_locale[entity_name] = attributes;
for attribute in attributes:
enum_name = "%s%sEnum" % (entity_name, attribute.name[0].upper() + attribute.name[1:])
self.enum_list.append(enum_name)
filename = "%s%s.js" % (self._class_prefix, enum_name)
self.write(destination = self.enum_directory,
filename=filename,
template_name="enum.js.tpl",
class_prefix = self._class_prefix,
enum_name = enum_name,
allowed_choices = set(attribute.allowed_choices))
def _write_generic_enums(self):
""" This method generates generic enum classes.
"""
self._write_enums(entity_name='', attributes=self.generic_enum_attrs)
|
StarcoderdataPython
|
1834
|
import os
from datetime import timedelta
basedir = os.path.abspath(os.path.dirname(__file__))
API_DATA_URL = "https://invest-public-api.tinkoff.ru/rest/tinkoff.public.invest.api.contract.v1.InstrumentsService/"
API_LASTPRICES_URL = "https://invest-public-api.tinkoff.ru/rest/\
tinkoff.public.invest.api.contract.v1.MarketDataService/GetLastPrices"
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, '..', 'webapp.db')
REMEMBER_COOKIE_DURATION = timedelta(days=1)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = ""
API_TOKEN = ""
|
StarcoderdataPython
|
4839779
|
from django.apps import AppConfig
class NewswebsiteConfig(AppConfig):
name = 'newsWebsite'
|
StarcoderdataPython
|
3342993
|
<reponame>IamMayankThakur/test-bigdata<filename>adminmgr/media/code/python/map1/BD_188_1000_1767_mapper.py<gh_stars>1-10
#!/usr/bin/python3
import sys
import csv
infile = sys.stdin
#next(infile)
#fuel column index 8
for line in infile:
line = line.strip()
my_list = line.split(',')
isBall = my_list[0]
if(isBall == 'ball'):
batsman = my_list[4]
bowler = my_list[6]
runs = my_list[7]
try:
nod = my_list[9]
bd = my_list[10]
except ValueError:
continue
if( len(nod)==2 or nod == "run out" or nod == "retired hurt"):
print('%s,%s\t%s' % (batsman,bowler,'0'))
else:
print('%s,%s\t%s' % (batsman,bowler,'1'))
|
StarcoderdataPython
|
1744306
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ..representation import CartesianRepresentation
from ..baseframe import BaseCoordinateFrame, TimeFrameAttribute, frame_transform_graph
from ..transformations import FunctionTransform
from .utils import DEFAULT_OBSTIME
class ITRS(BaseCoordinateFrame):
"""
A coordinate or frame in the International Terrestrial Reference System
(ITRS). This is approximately a geocentric system, although strictly it is
defined by a series of reference locations near the surface of the Earth.
For more background on the ITRS, see the references provided in the
:ref:`astropy-coordinates-seealso` section of the documentation.
"""
default_representation = CartesianRepresentation
obstime = TimeFrameAttribute(default=DEFAULT_OBSTIME)
@property
def earth_location(self):
"""
The data in this frame as an `~astropy.coordinates.EarthLocation` class.
"""
from ..earth import EarthLocation
cart = self.represent_as(CartesianRepresentation)
return EarthLocation(x=cart.x, y=cart.y, z=cart.z)
# Self-transform is in intermediate_rotation_transforms.py with all the other
# ITRS transforms
|
StarcoderdataPython
|
3259770
|
<filename>util/stock.py
'''This module includes utility functions related to stock operation.
'''
'''
Copyright (c) 2017, WinQuant Information and Technology Co. Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
# built-in modules
import numpy as np
# third-party modules
# customized modules
def adjustPrice( position, cash, dividend10=0.0, right10=0, rightIssue10=0,
rightIssuePrice=0 ):
'''Calculate the ex-dividend, ex-right price of a stock. If rationed shares are needed, cash
should be able to cover the cost of the rationed shares. Otherwise, there will
be net loss.
Parameters
----------
position : int
the position of a stock;
cash : float
cash left or available, in case we need to buy rationed shares;
dividend10 : float
dividend per 10 shares;
right10 : float
ex-right per 10 shares;
rightIssue10 : float
right issued shares per 10 shares;
rightIssuePrice : float
right issued price.
Returns
-------
( newPosition, newCash ) : tuple of ( int, float )
The new position after the dividend and the cash left after.
'''
if rightIssue10 > 0:
x = 1
if right10 > 0:
x = 1
unit = int( position / 10 )
newCash = cash + dividend10 * unit
newPosition = position + int( unit * right10 )
rightIssueVolume = int( rightIssue10 * unit )
if cash > rightIssueVolume * rightIssuePrice:
newCash -= rightIssueVolume * rightIssuePrice
newPosition += rightIssueVolume
return ( newPosition, newCash )
def roundVolume( volume, lotSize=100 ):
'''Round the trading volume to lots.
Parameters
----------
volume : int
total volume to round;
lotSize : int
volume lot size, by default, 100 shares per lot.
Returns
-------
lotShares : int
rounded volumes.
'''
return int( volume / lotSize ) * lotSize
def getWindExchId( secId ):
'''Get exchange identifier for the given securities.
Parameters
----------
secId : str
Securities identifier in a normal string.
Returns
-------
exchId : str
exchange identifier. The rules are as below
* 6XXXXX - Shanghai Stock Exchange;
* 0XXXXX - Shenzhen Stock Exchange;
* 3XXXXX - Shenzhen Stock Exchange;
* 2XXXXX - Shenzhen Stock Exchange.
'''
prefix = secId[ 0 ]
return 'SH' if prefix == '6' else 'SZ'
|
StarcoderdataPython
|
1755350
|
import os.path as osp
from mmcv.runner import HOOKS, Hook, master_only
from mmcv.runner.checkpoint import save_checkpoint, get_state_dict, weights_to_cpu
from torch.optim.swa_utils import AveragedModel
from mmdet.core import EvalHook, DistEvalHook
import torch
import mmcv
@HOOKS.register_module()
class SWAHook(Hook):
r"""SWA Object Detection Hook.
This hook works together with SWA training config files to train
SWA object detectors <https://arxiv.org/abs/2012.12645>.
Args:
swa_eval (bool): Whether to evaluate the swa model.
Defaults to True.
eval_hook (Hook): Hook class that contains evaluation functions.
Defaults to None.
"""
def __init__(self, swa_eval=True, eval_hook=None):
if not isinstance(swa_eval, bool):
raise TypeError('swa_eval must be a bool, but got'
f'{type(swa_eval)}')
if swa_eval:
if not isinstance(eval_hook, EvalHook) or \
isinstance(eval_hook, DistEvalHook):
raise TypeError('eval_hook must be either a EvalHook or a '
'DistEvalHook when swa_eval = True, but got'
f'{type(eval_hook)}')
self.swa_eval = swa_eval
self.eval_hook = eval_hook
def before_run(self, runner):
"""Construct the averaged model which will keep track of the running
averages of the parameters of the model."""
model = runner.model
self.model = AveragedModel(model)
self.meta = runner.meta
if self.meta is None:
self.meta = dict()
self.meta.setdefault('hook_msgs', dict())
if not 'hook_msgs' in self.meta.keys():
self.meta.setdefault('hook_msgs', dict())
@master_only
def _save_ckpt(self, model, filepath, meta, runner):
save_checkpoint(model, filepath, runner.optimizer, meta)
for i in range(20):
try:
ckpt = torch.load(filepath, map_location='cpu')
runner.logger.info(
f'Success Saving swa model at swa-training {runner.epoch + 1} epoch'
)
break
except Exception as e:
save_checkpoint(model, filepath, runner.optimizer, meta)
continue
def after_train_epoch(self, runner):
"""Update the parameters of the averaged model, save and evaluate the
updated averaged model."""
model = runner.model
# update the parameters of the averaged model
self.model.update_parameters(model)
# save the swa model
runner.logger.info(
f'Saving swa model at swa-training {runner.epoch + 1} epoch')
filename = 'swa_model_{}.pth'.format(runner.epoch + 1)
filepath = osp.join(runner.work_dir, filename)
optimizer = runner.optimizer
self.meta['hook_msgs']['last_ckpt'] = filepath
self._save_ckpt(self.model.module, filepath, self.meta, runner)
# evaluate the swa model
if self.swa_eval:
self.work_dir = runner.work_dir
self.rank = runner.rank
self.epoch = runner.epoch
self.logger = runner.logger
self.log_buffer = runner.log_buffer
self.meta['hook_msgs']['last_ckpt'] = filename
self.eval_hook.after_train_epoch(self)
def after_run(self, runner):
# since BN layers in the backbone are frozen,
# we do not need to update the BN for the swa model
pass
def before_epoch(self, runner):
pass
|
StarcoderdataPython
|
35414
|
<reponame>osaizar/sand
import random
import numpy as np
MATRIX = [(7, 6, 2, 1, 0, 3, 5, 4),
(6, 5, 0, 1, 3, 2, 4, 7),
(1, 0, 3, 7, 5, 4, 6, 2),
(7, 5, 2, 6, 1, 3, 0, 4),
(0, 4, 2, 3, 7, 1, 6, 5),
(7, 1, 0, 2, 3, 5, 6, 4),
(3, 4, 2, 6, 0, 7, 5, 1),
(6, 1, 5, 2, 7, 4, 0, 3),
(3, 1, 4, 5, 0, 7, 2, 6),
(3, 2, 6, 5, 0, 4, 1, 7),
(3, 0, 6, 1, 7, 5, 4, 2),
(0, 6, 1, 7, 4, 2, 5, 3),
(3, 5, 2, 0, 7, 4, 6, 1),
(5, 4, 0, 3, 1, 7, 2, 6),
(4, 1, 6, 3, 2, 7, 0, 5),
(3, 7, 5, 1, 2, 0, 6, 4),
(4, 5, 2, 7, 6, 0, 3, 1),
(7, 2, 4, 6, 0, 3, 1, 5),
(3, 7, 5, 6, 1, 0, 4, 2),
(0, 5, 4, 3, 7, 2, 1, 6),
(4, 0, 2, 3, 1, 6, 5, 7),
(1, 7, 6, 3, 4, 0, 2, 5),
(5, 7, 3, 2, 6, 1, 4, 0),
(1, 3, 0, 5, 2, 7, 4, 6),
(7, 4, 3, 5, 1, 6, 0, 2),
(5, 1, 3, 0, 4, 2, 6, 7),
(7, 0, 2, 3, 1, 5, 6, 4),
(4, 0, 7, 6, 1, 5, 3, 2),
(5, 3, 6, 1, 4, 7, 2, 0),
(2, 4, 5, 1, 7, 0, 6, 3),
(1, 2, 4, 3, 6, 5, 0, 7),
(4, 7, 6, 5, 0, 2, 3, 1),
(4, 5, 3, 0, 6, 2, 7, 1),
(5, 7, 6, 3, 2, 1, 0, 4),
(5, 6, 7, 0, 4, 2, 1, 3),
(0, 7, 2, 6, 5, 4, 3, 1),
(6, 0, 5, 1, 3, 4, 2, 7),
(7, 1, 5, 2, 3, 4, 6, 0),
(2, 5, 4, 7, 0, 1, 3, 6),
(4, 5, 0, 6, 1, 2, 3, 7),
(3, 2, 0, 6, 7, 4, 5, 1),
(2, 6, 3, 1, 5, 0, 4, 7),
(7, 4, 2, 1, 6, 3, 5, 0),
(5, 3, 2, 6, 1, 0, 4, 7),
(6, 5, 4, 0, 3, 7, 2, 1),
(6, 2, 7, 3, 5, 1, 4, 0),
(3, 4, 2, 7, 6, 0, 1, 5),
(1, 6, 0, 3, 7, 2, 4, 5),
(2, 7, 4, 1, 5, 3, 0, 6),
(3, 1, 0, 5, 4, 2, 6, 7),
(6, 1, 2, 7, 5, 4, 0, 3),
(7, 5, 6, 2, 0, 4, 1, 3),
(5, 3, 2, 7, 0, 4, 6, 1),
(2, 6, 5, 0, 1, 3, 7, 4),
(2, 4, 5, 3, 0, 1, 7, 6),
(4, 0, 2, 1, 6, 3, 7, 5),
(5, 0, 6, 2, 3, 4, 7, 1),
(0, 2, 6, 7, 3, 5, 1, 4),
(3, 4, 0, 7, 2, 1, 6, 5),
(1, 2, 4, 3, 5, 7, 6, 0),
(5, 2, 4, 7, 1, 3, 0, 6),
(7, 5, 1, 3, 6, 0, 4, 2),
(4, 6, 1, 3, 5, 2, 7, 0),
(0, 5, 3, 1, 4, 2, 6, 7),
(2, 3, 1, 4, 5, 6, 7, 0),
(6, 1, 0, 7, 2, 5, 4, 3),
(3, 6, 1, 7, 0, 4, 5, 2),
(4, 7, 2, 3, 1, 0, 5, 6),
(1, 2, 3, 4, 6, 5, 7, 0),
(5, 4, 3, 1, 2, 0, 7, 6),
(6, 0, 4, 7, 5, 2, 3, 1),
(2, 6, 5, 4, 0, 1, 3, 7),
(1, 3, 4, 5, 0, 2, 6, 7),
(0, 5, 6, 7, 2, 1, 4, 3),
(2, 1, 6, 0, 3, 7, 4, 5),
(6, 7, 5, 3, 2, 0, 1, 4),
(0, 7, 2, 5, 6, 1, 4, 3),
(1, 6, 0, 5, 7, 2, 4, 3),
(5, 1, 6, 4, 2, 7, 0, 3),
(6, 3, 1, 0, 2, 4, 7, 5),
(3, 1, 7, 4, 5, 0, 6, 2),
(4, 6, 7, 5, 1, 2, 3, 0),
(7, 1, 2, 6, 3, 4, 0, 5),
(3, 6, 7, 2, 1, 4, 0, 5),
(7, 6, 0, 5, 1, 4, 2, 3),
(2, 5, 3, 1, 7, 4, 6, 0),
(7, 5, 2, 6, 3, 1, 4, 0),
(0, 3, 5, 2, 6, 7, 1, 4),
(1, 6, 5, 2, 3, 7, 0, 4),
(4, 0, 1, 3, 6, 7, 2, 5),
(6, 4, 3, 5, 0, 7, 1, 2),
(1, 0, 3, 7, 2, 5, 4, 6),
(2, 5, 0, 1, 3, 7, 6, 4),
(3, 2, 1, 5, 7, 4, 6, 0),
(1, 3, 6, 7, 0, 4, 2, 5),
(5, 0, 4, 3, 2, 1, 7, 6),
(6, 0, 3, 7, 4, 5, 1, 2),
(5, 1, 0, 3, 4, 2, 7, 6),
(6, 4, 0, 2, 5, 3, 1, 7),
(7, 2, 4, 5, 0, 1, 6, 3),
(0, 1, 3, 4, 2, 6, 7, 5),
(3, 6, 5, 7, 0, 2, 1, 4),
(2, 1, 4, 6, 5, 7, 0, 3),
(6, 4, 7, 0, 5, 3, 2, 1),
(6, 3, 7, 4, 1, 2, 0, 5),
(3, 4, 5, 6, 2, 7, 0, 1),
(5, 3, 1, 6, 4, 0, 7, 2),
(1, 4, 0, 3, 2, 5, 6, 7),
(3, 1, 7, 6, 4, 5, 0, 2),
(3, 4, 0, 5, 7, 6, 2, 1),
(3, 4, 0, 6, 7, 2, 1, 5),
(7, 2, 1, 3, 0, 5, 6, 4),
(2, 1, 5, 7, 0, 3, 4, 6),
(6, 3, 7, 5, 0, 1, 4, 2),
(0, 1, 2, 6, 4, 5, 7, 3),
(4, 7, 5, 6, 2, 1, 0, 3),
(3, 4, 6, 7, 1, 2, 5, 0),
(6, 0, 7, 2, 3, 4, 1, 5),
(5, 4, 6, 3, 1, 2, 0, 7),
(7, 1, 2, 4, 0, 6, 3, 5),
(7, 4, 5, 1, 3, 0, 2, 6),
(6, 2, 4, 5, 0, 7, 3, 1),
(5, 6, 3, 2, 1, 7, 4, 0),
(0, 1, 4, 7, 2, 5, 6, 3),
(7, 6, 2, 5, 3, 4, 0, 1),
(6, 5, 4, 7, 2, 1, 3, 0),
(6, 2, 1, 3, 4, 0, 7, 5),
(5, 0, 7, 3, 1, 4, 2, 6),
(5, 6, 2, 0, 7, 4, 1, 3),
(5, 7, 3, 0, 6, 2, 1, 4),
(3, 1, 7, 4, 5, 0, 2, 6),
(4, 0, 7, 6, 3, 5, 1, 2),
(5, 4, 0, 3, 2, 7, 1, 6),
(5, 3, 2, 1, 6, 0, 4, 7),
(3, 1, 7, 6, 4, 2, 5, 0),
(0, 3, 5, 1, 7, 6, 2, 4),
(6, 4, 1, 7, 2, 5, 0, 3),
(7, 2, 6, 4, 5, 3, 0, 1),
(5, 1, 3, 4, 2, 6, 7, 0),
(6, 1, 7, 0, 5, 3, 2, 4),
(4, 6, 0, 1, 2, 3, 5, 7),
(3, 4, 0, 2, 7, 1, 6, 5),
(5, 1, 2, 0, 4, 3, 7, 6),
(5, 1, 3, 0, 4, 7, 2, 6),
(3, 7, 1, 2, 5, 0, 6, 4),
(6, 5, 2, 1, 3, 4, 0, 7),
(3, 4, 7, 6, 5, 0, 2, 1),
(5, 1, 3, 7, 4, 2, 6, 0),
(6, 4, 0, 7, 3, 2, 5, 1),
(0, 2, 4, 3, 6, 7, 1, 5),
(5, 0, 7, 6, 4, 1, 3, 2),
(4, 6, 7, 2, 3, 5, 1, 0),
(1, 0, 5, 3, 6, 7, 4, 2),
(1, 4, 7, 6, 0, 3, 5, 2),
(7, 3, 4, 6, 5, 1, 2, 0),
(1, 3, 0, 6, 7, 4, 2, 5),
(5, 4, 6, 1, 2, 0, 7, 3),
(5, 3, 1, 0, 4, 2, 6, 7),
(7, 1, 4, 0, 3, 2, 5, 6),
(1, 2, 3, 7, 5, 6, 0, 4),
(7, 6, 3, 4, 5, 0, 1, 2),
(7, 0, 5, 6, 1, 4, 2, 3),
(0, 3, 7, 1, 6, 4, 2, 5),
(6, 0, 3, 1, 5, 2, 4, 7),
(7, 1, 4, 6, 3, 2, 0, 5),
(4, 5, 2, 7, 6, 0, 1, 3),
(3, 4, 0, 5, 7, 2, 6, 1),
(4, 3, 7, 6, 2, 5, 0, 1),
(4, 2, 1, 3, 7, 6, 5, 0),
(6, 0, 2, 5, 7, 3, 4, 1),
(4, 1, 5, 6, 7, 0, 3, 2),
(5, 6, 0, 2, 4, 3, 1, 7),
(0, 1, 2, 4, 5, 3, 7, 6),
(7, 0, 5, 1, 4, 6, 3, 2),
(5, 6, 7, 4, 0, 1, 3, 2),
(4, 6, 1, 5, 7, 3, 0, 2),
(1, 2, 7, 4, 0, 3, 5, 6),
(5, 6, 3, 0, 1, 2, 4, 7),
(3, 4, 6, 2, 7, 5, 0, 1),
(3, 4, 0, 6, 5, 2, 1, 7),
(6, 5, 0, 7, 1, 3, 4, 2),
(3, 0, 4, 6, 7, 2, 5, 1),
(1, 0, 2, 3, 4, 5, 6, 7),
(6, 7, 1, 0, 4, 3, 2, 5),
(1, 5, 3, 6, 4, 0, 2, 7),
(1, 0, 2, 3, 7, 4, 6, 5),
(7, 3, 2, 6, 5, 1, 4, 0),
(7, 5, 0, 6, 1, 2, 3, 4),
(0, 5, 3, 7, 2, 6, 4, 1),
(5, 7, 3, 4, 0, 2, 1, 6),
(1, 0, 4, 5, 2, 7, 3, 6),
(4, 5, 1, 3, 6, 0, 2, 7),
(6, 3, 7, 0, 4, 2, 5, 1),
(2, 4, 3, 6, 5, 1, 0, 7),
(4, 0, 7, 3, 5, 1, 6, 2),
(7, 3, 6, 4, 1, 2, 5, 0),
(7, 6, 5, 3, 1, 0, 4, 2),
(5, 4, 0, 6, 7, 2, 3, 1),
(7, 4, 0, 3, 1, 5, 6, 2),
(5, 6, 3, 0, 7, 2, 4, 1),
(0, 2, 7, 5, 6, 4, 3, 1),
(1, 6, 4, 2, 5, 3, 7, 0),
(6, 2, 3, 7, 0, 4, 5, 1),
(4, 7, 5, 2, 1, 6, 3, 0),
(4, 3, 0, 5, 1, 6, 7, 2),
(1, 6, 0, 7, 4, 3, 2, 5),
(0, 2, 6, 5, 4, 3, 1, 7),
(2, 5, 6, 7, 1, 0, 4, 3),
(2, 3, 6, 4, 1, 5, 0, 7),
(4, 7, 3, 6, 0, 1, 5, 2),
(5, 2, 6, 3, 7, 4, 1, 0),
(0, 3, 6, 5, 1, 7, 2, 4),
(0, 7, 6, 1, 4, 5, 3, 2),
(4, 3, 2, 1, 6, 7, 0, 5),
(7, 1, 6, 3, 0, 5, 4, 2),
(2, 6, 3, 7, 4, 0, 5, 1),
(7, 1, 2, 5, 3, 0, 4, 6),
(2, 5, 6, 7, 4, 1, 3, 0),
(6, 0, 1, 2, 4, 5, 7, 3),
(5, 3, 1, 2, 0, 7, 4, 6),
(7, 5, 2, 3, 6, 0, 4, 1),
(1, 7, 4, 3, 5, 6, 0, 2),
(5, 0, 4, 2, 6, 3, 7, 1),
(0, 5, 3, 7, 2, 4, 6, 1),
(1, 4, 0, 7, 3, 5, 2, 6),
(6, 1, 7, 3, 4, 0, 2, 5),
(6, 0, 5, 2, 3, 4, 1, 7),
(0, 3, 1, 4, 2, 6, 7, 5),
(3, 4, 1, 5, 7, 0, 2, 6),
(3, 7, 2, 4, 5, 6, 0, 1),
(1, 2, 3, 4, 5, 7, 6, 0),
(2, 4, 3, 7, 1, 0, 6, 5),
(3, 7, 4, 2, 0, 5, 6, 1),
(3, 1, 0, 6, 7, 2, 5, 4),
(5, 7, 0, 6, 3, 4, 2, 1),
(5, 1, 3, 0, 7, 2, 4, 6),
(3, 2, 6, 7, 5, 4, 0, 1),
(7, 2, 6, 3, 5, 0, 4, 1),
(4, 3, 6, 5, 1, 0, 7, 2),
(2, 4, 6, 1, 3, 5, 0, 7),
(2, 0, 5, 3, 6, 4, 1, 7),
(0, 3, 1, 2, 6, 4, 7, 5),
(6, 2, 5, 3, 0, 4, 1, 7),
(3, 7, 0, 4, 6, 1, 5, 2),
(2, 7, 3, 6, 0, 5, 1, 4),
(1, 4, 3, 5, 6, 2, 7, 0),
(7, 2, 6, 0, 4, 1, 5, 3),
(4, 2, 7, 5, 0, 6, 3, 1),
(1, 2, 7, 0, 3, 4, 5, 6),
(2, 4, 7, 6, 5, 1, 0, 3),
(5, 7, 0, 2, 6, 3, 4, 1),
(2, 5, 6, 4, 3, 1, 7, 0),
(1, 6, 4, 0, 7, 3, 5, 2),
(1, 4, 5, 7, 6, 3, 2, 0),
(6, 0, 1, 2, 7, 4, 3, 5),
(2, 4, 7, 1, 0, 5, 3, 6)]
def generate_permatrix():
global MATRIX
l = list(range(8))
perm_set = set()
while len(perm_set) < 256:
random.shuffle(l)
perm_set.add(tuple(l))
MATRIX = list(perm_set)
for i in MATRIX:
print(str(i) + ",")
def generate_server_permatrix():
global MATRIX
for i in range(len(MATRIX)):
elem = np.zeros((8,8), dtype=np.uint8)
for j in range(8):
elem[j, MATRIX[i].index(j)] = 1
MATRIX[i] = elem
generate_server_permatrix()
|
StarcoderdataPython
|
1653857
|
from asyncio import iscoroutinefunction
from functools import wraps
from types import MethodType
from typing import Callable
from fastapi import Request
from ..utils import run_sync
from .backends.base import BaseCacheBackend
from .helpers import get_cache_backend, get_request_object
from .key import get_cache_key
def auto_connect(method: MethodType):
"""
Decorator to ensure the backend cache is connected to the server.
"""
if iscoroutinefunction(method):
@wraps(method)
async def wrapper(*args, **kwargs):
self: BaseCacheBackend = args[0]
if not self.client:
await self.connect()
return await method(*args, **kwargs)
else:
@wraps(method)
def wrapper(*args, **kwargs):
self: BaseCacheBackend = args[0]
if not self.client:
self.connect()
return method(*args, **kwargs)
return wrapper
def cached(
timeout: int = 15,
*,
key: Callable[[Request], str] = get_cache_key,
cache: str = "default"
) -> Callable:
"""
Decorator to cache the result of a function.
Args:
timeout: Timeout in seconds.
key: Function to generate the cache key.
cache: The cache name.
"""
assert callable(key), "key must be callable"
def decorator(func: Callable) -> Callable:
if iscoroutinefunction(func):
@wraps(func)
async def wrapper(*args, **kwargs):
request = get_request_object(kwargs)
if not request:
return await func(*args, **kwargs)
cache_backend = get_cache_backend(request, cache)
cache_key = key(request)
cache_get = cache_backend.get
if iscoroutinefunction(cache_get):
value = await cache_get(cache_key)
else:
value = cache_get(cache_key)
if value is None:
value = await func(*args, **kwargs)
cache_set = cache_backend.set
if iscoroutinefunction(cache_set):
await cache_set(cache_key, value, timeout)
else:
cache_set(cache_key, value, timeout)
return value
else:
@wraps(func)
def wrapper(*args, **kwargs):
request = get_request_object(kwargs)
if not request:
return func(*args, **kwargs)
cache_backend = get_cache_backend(request, cache)
cache_key = key(request)
cache_get = cache_backend.get
if iscoroutinefunction(cache_get):
value = run_sync(cache_get, cache_key)
else:
value = cache_get(cache_key)
if value is None:
value = func(*args, **kwargs)
cache_set = cache_backend.set
if iscoroutinefunction(cache_set):
run_sync(cache_set, cache_key, value, timeout)
else:
cache_set(cache_key, value, timeout)
return value
return wrapper
return decorator
|
StarcoderdataPython
|
3272190
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import json
from os import path
import jsonschema.exceptions
from jsonschema import validate
def getDeploymentConfig(file: str) -> dict:
"""
Load and validate the deployment configuration json file
:param file: configuration json file
:return: configuration as dict
"""
if not path.exists(file):
raise FileNotFoundError("Missing config.json file")
try:
with open(file, "r") as f:
config = json.load(f)
except json.JSONDecodeError as err:
raise IOError(err)
schema = _get_schema()
try:
validate(instance=config, schema=schema)
except jsonschema.exceptions.ValidationError as err:
raise SyntaxError(err)
return config
def _get_schema():
"""
The validation Schema provider
:return: the schema validation as a dict
"""
return {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Config",
"description": "Stack deployment configuration",
"type": "object",
"properties": {
"region": {"type": "string"},
"tags": {
"type": "object",
"maxItems": 10
},
"layers": {
"type": "object",
"properties": {
"pillow": {
"type": "object",
"properties": {
"always_refresh": {"type": "boolean"},
"url": {"type": "string"},
"hash": {"type": "string"}
}
}
}
}
},
"required": [
"region"
],
"additionalProperties": False
}
|
StarcoderdataPython
|
1611478
|
<filename>genetic algorithm/conference building/hidden genes genetic algorithm/finlandia_talo_ga_stochastic.py<gh_stars>1-10
import numpy as np
from shapely.geometry import Polygon, MultiPolygon, LineString, MultiLineString, Point, LinearRing
from shapely.ops import polygonize, cascaded_union
from scipy.spatial.qhull import Delaunay
from crowddynamics.core.distance import distance_circle_line
from crowddynamics.core.sampling import triangle_area_cumsum, random_sample_triangle
from crowddynamics.core.vector2D import length
from crowddynamics.core.geometry import geom_to_linear_obstacles
from crowddynamics.simulation.agents import Circular, ThreeCircle, NO_TARGET, \
Agents, AgentGroup
from crowddynamics.simulation.field import Field
from crowddynamics.simulation.logic import Reset, InsideDomain, Integrator, \
Fluctuation, Adjusting, Navigation, ExitDetection, \
Orientation, AgentAgentInteractions, AgentObstacleInteractions, \
LeaderFollower, TargetReached
from crowddynamics.simulation.multiagent import MultiAgentSimulation
from shapely.geometry import Polygon
from shapely.geometry.linestring import LineString
from traitlets.traitlets import Enum, Int, default
class FinlandiaTalo2ndFloorField(Field):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def f(value, scale=10 / 1.5):
if value:
return tuple(map(lambda x: scale * x, value))
else:
return None
A = list(map(f, [
None,
(19.226, 4.194),
(19.444, 3.887),
(21.368, 1.179),
(1.567, 1.179),
(1.574, 2.682),
(1.565, 4.603),
(4.012, 5.296),
(2.736, 2.689),
]))
B = list(map(f, [
None,
(4.421, 3.862),
(3.881, 5.755),
(4.998, 6.024),
(5.209, 5.169),
(4.677, 5.041),
(4.787, 4.587),
(5.394, 5.168),
(4.858, 7.154),
(6.048, 7.667),
(5.993, 7.823),
(6.71, 8.026),
]))
C = list(map(f, [
None,
(6.916, 8.038),
(7.043, 8.08),
(7.291, 7.926),
(7.411, 7.45),
(7.669, 7.408),
(7.709, 7.224),
(8.026, 7.317),
(8.257, 6.494),
(8.141, 6.472),
None,
(8.387, 4.775),
(6.416, 4.501),
(6.372, 4.631),
(6.047, 4.587),
(6.336, 4.992),
(5.889, 4.955),
(5.549, 6.147),
]))
D = list(map(f, [
(),
(8.198, 6.705),
(10.513, 7.321),
(10.598, 7.06),
(10.705, 6.803),
(10.441, 6.747),
(10.806, 5.387),
(12.112, 5.329),
(8.915, 4.884),
(8.518, 6.328),
(9.929, 6.703),
(10.304, 5.281),
(11.936, 3.715),
(12.488, 8.787),
(15.002, 9.438),
(18.288, 4.784),
(18.634, 4.98),
(18.906, 4.63),
(19.6, 5.093),
(21.673, 2.116),
]))
E = list(map(f, [
(),
(17.693, 4.928),
(18.233, 4.09),
(16.799, 2.954),
(16.457, 3.582),
(15.73, 3.979),
(15.998, 3.024),
(14.23, 2.634),
(14.142, 3.571),
(13.579, 3.582),
(13.568, 3.064),
(12.311, 3.34),
(12.311, 3.88),
(8.859, 2.682),
(8.435, 2.625),
(8.266, 3.685),
(8.718, 3.741),
(8.333, 3.265),
(8.208, 3.21),
(8.267, 2.861),
(7.978, 2.827),
(7.816, 2.708),
(5.787, 5.313),
(6.217, 2.716),
(5.732, 2.734),
(4.432, 2.722),
(4.332, 2.923),
(4.24, 3.134),
(4.07, 3.656),
(2.375, 3.656),
(2.375, 3.233),
(3.675, 3.247),
(16.487, 1.687),
(18.101, 1.682),
(18.107, 1.179),
(18.734, 1.652),
(19.087, 3.491),
(6.295, 3.232),
(4.571, 3.225),
(4.444, 3.696),
]))
G = list(map(f, [
(),
(12.327, 4.117),
(12.356, 4.281),
(12.374, 4.428),
(12.4, 4.6),
(12.428, 4.747),
(12.456, 4.905),
(12.484, 5.059),
(12.509, 5.22),
(12.537, 5.396),
(12.558, 5.551),
(12.588, 5.718),
(12.616, 5.887),
(12.644, 6.035),
(12.66, 6.204),
(12.702, 6.358),
(12.715, 6.554),
(12.748, 6.719),
]))
H = list(map(f, [
(),
(12.996, 3.897),
(13.024, 4.053),
(13.044, 4.209),
(13.069, 4.362),
(13.06, 4.526),
(13.113, 4.679),
(13.119, 4.843),
(13.137, 4.995),
(13.177, 5.169),
(13.214, 5.308),
(13.239, 5.461),
(13.253, 5.62),
(13.297, 5.78),
(13.313, 5.936),
(13.364, 6.088),
(13.385, 6.241),
(13.4, 6.4),
]))
I = list(map(f, [
(),
(13.572, 3.769),
(13.595, 3.93),
(13.608, 4.092),
(13.613, 4.259),
(13.631, 4.412),
(13.626, 4.574),
(13.64, 4.74),
(13.649, 4.884),
(13.671, 5.046),
(13.676, 5.217),
(13.689, 5.374),
(13.703, 5.523),
(13.698, 5.671),
(13.716, 5.846),
(13.73, 6.004),
(13.743, 6.166),
(13.748, 6.323),
]))
J = list(map(f, [
(),
(16.789, 4.838),
(16.713, 4.98),
(16.666, 5.117),
(16.6, 5.282),
(16.506, 5.428),
(16.465, 5.592),
(16.36, 5.72),
(16.301, 5.89),
(16.205, 6.039),
(16.083, 6.12),
(16.044, 6.314),
(15.987, 6.454),
(15.895, 6.624),
(15.796, 6.734),
(15.692, 6.866),
(15.6, 7),
(15.516, 7.161),
]))
K = list(map(f, [
(),
(17.339, 5.382),
(17.263, 5.524),
(17.16, 5.672),
(17.067, 5.798),
(16.99, 5.941),
(16.888, 6.081),
(16.8, 6.2),
(16.703, 6.367),
(16.59, 6.484),
(16.495, 6.624),
(16.396, 6.761),
(16.31, 6.881),
(16.217, 7.027),
(16.113, 7.179),
(16.005, 7.31),
(15.898, 7.471),
(15.793, 7.635),
]))
O = list(map(f, [
(),
(5.152, 6.067),
(6.837, 7.333),
(7.07, 6.03),
(8.192, 6.119),
(12.288, 6.949),
(8.895, 1.179),
(12.027, 1.179),
(16.478, 1.179),
(3.672, 3.656),
(4.249, 4.467),
(17.815, 5.455),
(11.97, 4.027),
(14.846, 6.717),
(14.097, 6.472),
(12.699, 6.912),
(15.987, 8.042),
]))
U = list(map(f, [
(),
(14.169, 3.788),
(14.153, 3.954),
(14.159, 4.103),
(14.167, 4.264),
(14.162, 4.431),
(14.176, 4.573),
(14.177, 4.743),
(14.179, 4.894),
(14.176, 5.052),
(14.187, 5.239),
(14.2, 5.4),
(14.19, 5.543),
(14.192, 5.697),
(14.195, 5.848),
(14.195, 6.025),
(14.2, 6.168),
(14.2, 6.322),
]))
V = list(map(f, [
(),
(14.908, 3.879),
(14.855, 4.029),
(14.897, 4.216),
(14.83, 4.347),
(14.847, 4.523),
(14.763, 4.649),
(14.735, 4.807),
(14.745, 4.969),
(14.739, 5.133),
(14.737, 5.301),
(14.702, 5.456),
(14.656, 5.618),
(14.634, 5.769),
(14.594, 5.907),
(14.613, 6.079),
(14.599, 6.246),
(14.564, 6.397),
]))
W = list(map(f, [
(),
(15.676, 4.123),
(15.644, 4.269),
(15.588, 4.431),
(15.549, 4.576),
(15.496, 4.723),
(15.449, 4.865),
(15.399, 5.031),
(15.359, 5.176),
(15.297, 5.332),
(15.259, 5.484),
(15.203, 5.629),
(15.151, 5.78),
(15.119, 5.928),
(15.063, 6.087),
(15.009, 6.252),
(14.963, 6.386),
(14.914, 6.553),
]))
X = list(map(f, [
(),
(6.007, 7.828),
(5.869, 8.313),
(12.146, 9.886),
(12.447, 8.775),
(12.41, 8.381),
(12.308, 7.364),
(10.598, 7.06),
(10.552, 7.294),
(9.632, 7.087),
(9.575, 7.309),
(8.878, 7.138),
(8.926, 6.899),
(8.205, 6.707),
(8.028, 7.31),
(7.76, 7.316),
(7.462, 7.42),
(7.291, 7.926),
(7.046, 8.071),
(6.71, 8.026),
]))
Y = list(map(f, [
(),
(6.588, 8.493),
]))
Z = list(map(f, [
(),
(16.176, 4.36),
(16.099, 4.502),
(16.053, 4.639),
(15.986, 4.804),
(15.947, 4.949),
(15.876, 5.106),
(15.84, 5.303),
(15.758, 5.436),
(15.704, 5.573),
(15.662, 5.743),
(15.6, 5.876),
(15.559, 6.06),
(15.495, 6.244),
(15.421, 6.379),
(15.374, 6.527),
(15.316, 6.659),
(15.239, 6.822),
]))
A[3] = (21.368 * 10 / 1.5, 1.55 * 10 / 1.5)
A[4] = (1.567 * 10 / 1.5, 1.55 * 10 / 1.5)
# E[32] = ()
# E[33] = ()
E[34] = (18.107 * 10 / 1.5, 1.55 * 10 / 1.5)
E[35] = (18.734 * 10 / 1.5, 2.3 * 10 / 1.5)
O[6] = (8.895 * 10 / 1.5, 1.55 * 10 / 1.5)
O[7] = (12.027 * 10 / 1.5, 1.55 * 10 / 1.5)
O[8] = (16.478 * 10 / 1.5, 1.55 * 10 / 1.5)
rest_midpoint = (6 * 10 / 1.5, 1.55 * 10 / 1.5)
# New points for "Traffic Jam Puzzle"
rest1 = (4*10/1.5, 1.55*10/1.5)
rest2 = (8*10/1.5, 1.55*10/1.5)
obstacles = Polygon()
obstacles |= LineString(A[1:5 + 1] + [A[8]])
obstacles |= LineString(A[5:7 + 1])
obstacles |= LineString(B[1:6 + 1])
obstacles |= LineString(B[7:11 + 1])
# obstacles |= LineString(C[1:10] + C[11:14 + 1])
obstacles |= LineString(C[1:6]) # INCLUDE
obstacles |= LineString(C[7:10] + C[11:15]) # EXCLUDE?
obstacles |= LineString(C[15:19]) # EXCLUDE?
# obstacles |= LineString(D[1:7 + 1])
obstacles |= LineString(D[2:7 + 1]) # INCLUDE
obstacles |= LineString(D[8:11 + 1]) # INCLUDE
# obstacles |= LineString(D[12:19 + 1])
obstacles |= LineString([D[12]] + [X[5]]) # INCLUDE
obstacles |= LineString(D[13:19 + 1]) # INCLUDE
obstacles |= LineString(E[1:4 + 1])
obstacles |= LineString(E[5:8 + 1])
obstacles |= LineString(E[9:12 + 1])
obstacles |= LineString(E[13:16 + 1])
obstacles |= LineString(E[17:21 + 1] + [E[23]])
obstacles |= LineString(E[24:26 + 1])
obstacles |= LineString(E[27:31 + 1])
# obstacles |= LineString(E[32:34 + 1])
obstacles |= LineString(E[35:36 + 1])
obstacles |= LineString(E[35:36 + 1])
obstacles |= LineString(E[37:39 + 1])
obstacles |= LineString(X[1:4] + [D[13]]) # INCLUDE
# obstacles |= LineString(X[6:19]) # EXCLUDE?
obstacles |= LineString(D[1:3] + [X[6]]) # INCLUDE
obstacles |= LineString(X[9:12]) # INCLUDE
# New obstacles for "Traffic Jam Puzzle"
obstacles |= LineString([C[11]] + [E[16]])
obstacles |= LineString([E[39]] + [B[1]])
# These are the obstacles for the agents that start outside the Finlandia hall
obstacles_finlandia = Polygon()
# changes start
# obstacles |= LineString(C[1:10] + C[11:14 + 1])
obstacles_finlandia |= LineString(C[1:6]) # INCLUDE
obstacles_finlandia |= LineString(C[7:10] + C[11:15]) # EXCLUDE?
obstacles_finlandia |= LineString(C[15:19]) # EXCLUDE?
# obstacles |= LineString(D[1:7 + 1])
obstacles_finlandia |= LineString(D[2:7 + 1]) # INCLUDE
obstacles_finlandia |= LineString(D[8:11 + 1]) # INCLUDE
# obstacles_finlandia |= LineString(D[12:19 + 1])
obstacles_finlandia |= LineString([D[12]] + [X[5]]) # INCLUDE
obstacles_finlandia |= LineString(D[13:19 + 1]) # INCLUDE
obstacles_finlandia |= LineString(X[1:4] + [D[13]]) # INCLUDE
# obstacles |= LineString(X[6:19]) # EXCLUDE?
obstacles_finlandia |= LineString(D[1:3] + [X[6]]) # INCLUDE
obstacles_finlandia |= LineString(X[9:12]) # INCLUDE
# changes end
obstacles_finlandia |= LineString(A[1:5 + 1] + [A[8]])
obstacles_finlandia |= LineString(A[5:7 + 1])
obstacles_finlandia |= LineString(B[1:6 + 1])
obstacles_finlandia |= LineString(B[7:11 + 1])
# obstacles_finlandia |= LineString(D[1:7 + 1])
# obstacles_finlandia |= LineString(D[8:11 + 1])
# obstacles_finlandia |= LineString(D[12:19 + 1])
obstacles_finlandia |= LineString(E[1:4 + 1])
obstacles_finlandia |= LineString(E[5:8 + 1])
obstacles_finlandia |= LineString(E[9:12 + 1])
obstacles_finlandia |= LineString(E[13:16 + 1])
obstacles_finlandia |= LineString(E[17:21 + 1] + [E[23]])
obstacles_finlandia |= LineString(E[24:26 + 1])
obstacles_finlandia |= LineString(E[27:31 + 1])
obstacles_finlandia |= LineString(E[32:34 + 1])
obstacles_finlandia |= LineString(E[35:36 + 1])
obstacles_finlandia |= LineString(E[35:36 + 1])
obstacles_finlandia |= LineString(E[37:39 + 1])
obstacles_finlandia |= LineString([D[12]] + [E[11]] + [E[10]] + [E[7]] + [E[6]] + [E[3]] + [E[2]])
# Benchrows
# for i in range(1, 18):
# obstacles |= LineString([G[i], H[i], I[i]])
# obstacles |= LineString([U[i], V[i], W[i]])
# obstacles |= LineString([Z[i], J[i], K[i]])
finlandiahall = Polygon(
[O[12], E[12], E[9], E[8], E[5], E[1], O[11], O[16],
O[13], O[14], O[15], O[5]])
foyer = Polygon([B[6], C[12], E[15], E[21], E[23], E[38], E[39], B[1]])
helsinkihall = Polygon([O[4], C[11], C[12], C[13], C[15], C[16], C[17],
O[1], B[8], B[9], C[3], C[4], O[2], O[3]])
piazza_1 = Polygon(
[C[11], E[16], E[13], O[6], O[7], (77.5, 24.767), (77.5, 26.847), (79, 35.527), D[6],
D[11], D[8]])
piazza_2 = Polygon(
[O[7], O[8], E[32], E[3], E[6], E[7], E[10], E[11], D[12]])
piazza_3 = Polygon(
[O[8], A[3], A[2], A[1], D[17], E[2], E[3]])
restaurant = Polygon(
[A[4], A[5], A[8], E[25], E[24], rest_midpoint]
)
outer_bubblegum_finlandia = Polygon(
[D[12], (75, 15), E[3], E[2], D[15], D[14], D[13]]
)
inner_bubblegum_finlandia = Polygon(
[D[12], E[11], E[10], E[7], E[6], E[3], E[2], D[15], D[14], D[13]])
orchestra_foyer = Polygon([X[1], X[2], X[3], D[13], X[5], X[6], D[2], X[13], X[14], C[5], C[4],
C[3], C[2], C[1], X[19]])
# New spawn areas for "Traffic jam puzzle"
finlandia_spawn = Polygon([O[12], O[5], O[13], E[6], E[8], E[10], E[12]])
piazza_3_spawn = Polygon([D[17], E[2], E[3], E[32], E[33], E[36]])
piazza_1_spawn = Polygon([D[12], E[11], O[7], O[6], E[13]])
restaurant_spawn = Polygon([rest1, rest2, E[21], E[25]])
foyer_spawn = Polygon([E[15], C[11], C[12], B[1], E[38], E[37]])
helsinki_spawn = Polygon([C[4], C[6], C[9], C[11], C[15], C[17], B[9]])
orchestra_spawn = Polygon([X[3], X[4], X[5], X[6], X[8]])
exit1 = LineString([D[17], A[1]])
exit2 = LineString([D[8], D[11]])
exit3 = LineString([E[31], O[9]])
exit4 = LineString([O[10], B[6]])
exit5 = LineString([Y[1], X[19]])
exit6 = LineString([X[11], X[12]])
fex = np.array([
[11.936, 3.715],
[12.311, 3.34],
[13.568, 3.064],
[14.23, 2.634],
[15.998, 3.024],
[16.799, 2.954],
[18.288, 4.784],
[18.233, 4.09]])
fex = fex * 10 / 1.5
slopes = np.array([
(fex[1][1] - fex[0][1]) / (fex[1][0] - fex[0][0]),
(fex[3][1] - fex[2][1]) / (fex[3][0] - fex[2][0]),
(fex[5][1] - fex[4][1]) / (fex[5][0] - fex[4][0]),
(fex[7][1] - fex[6][1]) / (fex[7][0] - fex[6][0])
])
gradient_vectors = np.array([
[-1, -slopes[0]],
[-1, -slopes[1]],
[-1, -slopes[2]],
[-1, -slopes[3]]
])
norms = np.hypot([gradient_vectors[0][0], gradient_vectors[1][0], gradient_vectors[2][0],
gradient_vectors[3][0]], [gradient_vectors[0][1], gradient_vectors[1][1],
gradient_vectors[2][1], gradient_vectors[3][1]])
gradient_vectors = np.array([
[slopes[0] / norms[0], -1 / norms[0]],
[slopes[1] / norms[1], -1 / norms[1]],
[slopes[2] / norms[2], -1 / norms[2]],
[slopes[3] / norms[3], 1 / norms[3]]
])
dx = 0.2
fex = np.array([
[11.936 + dx * gradient_vectors[0][0], 3.715 + dx * gradient_vectors[0][1]],
[12.311 + dx * gradient_vectors[0][0], 3.34 + dx * gradient_vectors[0][1]],
[13.568 + dx * gradient_vectors[1][0], 3.064 + dx * gradient_vectors[1][1]],
[14.23 + dx * gradient_vectors[1][0], 2.634 + dx * gradient_vectors[1][1]],
[15.998 + 0.3 * dx * gradient_vectors[2][0], 3.024 + 0.3 * dx * gradient_vectors[2][1]],
[16.799 + 0.3 * dx * gradient_vectors[2][0], 2.954 + 0.3 * dx * gradient_vectors[2][1]],
[18.288 + dx * gradient_vectors[3][0], 4.784 + dx * gradient_vectors[3][1]],
[18.233 + dx * gradient_vectors[3][0], 4.09 + dx * gradient_vectors[3][1]]])
fex = fex * 10 / 1.5
fexit1 = LineString([fex[0], fex[1]])
fexit2 = LineString([fex[2], fex[3]])
fexit3 = LineString([fex[4], fex[5]])
fexit4 = LineString([fex[6], fex[7]])
# fexit1 = LineString([D[12], E[11]])
# fexit2 = LineString([E[10], E[7]])
# fexit3 = LineString([E[6], E[3]])
# fexit4 = LineString([D[15], E[2]])
# Spawns
# Guides can be spawned anywhere (finlandiahall, foyer, helsinkihall, piazza_1, piazza_2, piazza_3, restaurant,
# orchestra_foyer), and followers to the "spawn areas" (finlandia_spawn, piazza_3_spawn, piazza_1_spawn,
# restaurant_spawn, foyer_spawn, helsinki_spawn, orchestra_spawn).
spawns = [
finlandiahall,
foyer,
helsinkihall,
piazza_1,
piazza_2,
piazza_3,
restaurant,
orchestra_foyer,
finlandia_spawn,
piazza_3_spawn,
piazza_1_spawn,
restaurant_spawn,
foyer_spawn,
helsinki_spawn,
orchestra_spawn
]
# Targets (exits)
targets = [exit1, exit3, exit4, exit5, exit6]
#targets = [exit1, exit2, exit3, exit4, exit5, fexit1, fexit2, fexit3, fexit4]
#targets = [exit1, exit2, exit3, exit4, exit5, exit6, fexit1, fexit2, fexit3, fexit4]
#targets = [exit1, exit2, exit3, exit4, exit5, exit6]
self.obstacles = obstacles # obstacles
self.obstacles_finlandia = obstacles_finlandia # obstacles_finlandia
self.targets = targets
self.spawns = spawns
#self.domain_f = self.convex_hull()
#self.domain = self.domain_f.difference(finlandiahall)
self.domain = self.convex_hull()
self.finlandiahall_extended = outer_bubblegum_finlandia # this should be made as small as possible
self.finlandiahall = inner_bubblegum_finlandia
self.helsinkihall = helsinkihall
self.orchestra_foyer = orchestra_foyer
self.piazza_2 = piazza_2
self.piazza_3 = piazza_3
class FinlandiaTalo2ndFloor(MultiAgentSimulation):
agent_type = Enum(
default_value=Circular,
values=(Circular, ThreeCircle))
body_type = Enum(
default_value='adult',
values=('adult',))
def attributes(self, familiar, in_finlandia: bool = False, in_finlandia_extended: bool = False,
has_target: bool = True, is_follower: bool = True):
def wrapper():
target = familiar if has_target else NO_TARGET
orientation = np.random.uniform(-np.pi, np.pi)
d = dict(
target=target,
is_leader=not is_follower,
is_follower=is_follower,
body_type=self.body_type,
orientation=orientation,
velocity=np.zeros(2),
angular_velocity=0.0,
target_direction=np.zeros(2),
target_orientation=orientation,
familiar_exit=familiar,
in_finlandia_extended=in_finlandia_extended,
in_finlandia=in_finlandia,
in_orchestra = False,
in_helsinki = False,
in_piazza_2 = False,
in_piazza_3 = False
)
return d
return wrapper
def attributes_leader(self, fin_ext_iter, fin_iter, target_iter, has_target: bool = True,
is_follower: bool = False):
def wrapper():
target = next(target_iter)
in_finlandia_extended = next(fin_ext_iter)
in_finlandia = next(fin_iter)
orientation = np.random.uniform(-np.pi, np.pi)
d = dict(
target=target,
is_leader=not is_follower,
is_follower=is_follower,
body_type=self.body_type,
orientation=orientation,
velocity=np.zeros(2),
angular_velocity=0.0,
target_direction=np.zeros(2),
target_orientation=orientation,
familiar_exit=4,
in_finlandia_extended=in_finlandia_extended,
in_finlandia=in_finlandia,
in_orchestra = False,
in_helsinki = False,
in_piazza_2 = False,
in_piazza_3 = False)
return d
return wrapper
@default('logic')
def _default_logic(self):
return Reset(self) << \
TargetReached(self) << (
Integrator(self) << (
Fluctuation(self),
Adjusting(self) << (
Navigation(self) << ExitDetection(
self) << LeaderFollower(self),
Orientation(self)),
AgentAgentInteractions(self),
AgentObstacleInteractions(self)))
@default('field')
def _default_field(self):
return FinlandiaTalo2ndFloorField()
@default('agents')
def _default_agents(self):
agents = Agents(agent_type=self.agent_type)
return agents
|
StarcoderdataPython
|
3293378
|
<filename>nipy/core/image/image_spaces.py
""" Utilities for working with Images and common neuroimaging spaces
>>> from nipy.core.api import Image, vox2mni, img_rollaxis, xyz_affine, as_xyz_affable
Make a standard 4D xyzt image in MNI space.
First the data and affine:
>>> data = np.arange(24).reshape((1,2,3,4))
>>> affine = np.diag([2,3,4,1])
We can add the TR (==2.0) to make the full 5x5 affine we need
>>> img = Image(data, vox2mni(affine, 2.0))
>>> img.affine
array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 2., 0.],
[ 0., 0., 0., 0., 1.]])
In this case the neuroimaging 'xyz_affine' is just the 4x4 from the 5x5 in the image
>>> xyz_affine(img)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
However, if we roll time first in the image array, we can't any longer get an
xyz_affine that makes sense in relationship to the voxel data:
>>> img_t0 = img_rollaxis(img, 't')
>>> xyz_affine(img_t0)
Traceback (most recent call last):
...
AffineError: Dropped dimensions not orthogonal to xyz
But we can fix this:
>>> img_t0_affable = as_xyz_affable(img_t0)
>>> xyz_affine(img_t0_affable)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
It also works with nibabel images, which can only have xyz_affines:
>>> import nibabel as nib
>>> nimg = nib.Nifti1Image(data, affine)
>>> xyz_affine(nimg)
array([[2, 0, 0, 0],
[0, 3, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 1]])
"""
import numpy as np
from nibabel.orientations import io_orientation
from ..reference import spaces as rsp
def xyz_affine(img, name2xyz=None):
""" Return xyz affine from image `img` if possible, or raise error
Parameters
----------
img : ``Image`` instance or nibabel image
It has a ``coordmap`` or method ``get_affine``
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Not used for nibabel `img` input.
Returns
-------
xyz_aff : (4,4) array
voxel to X, Y, Z affine mapping
Raises
------
SpaceTypeError : if `img` does not have an affine coordinate map
AxesError : if not all of x, y, z recognized in `img` ``coordmap`` range
AffineError : if axes dropped from the affine contribute to x, y, z
coordinates
Examples
--------
>>> from nipy.core.api import vox2mni, Image
>>> arr = np.arange(24).reshape((2,3,4,1))
>>> img = Image(arr, vox2mni(np.diag([2,3,4,5,1])))
>>> img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='array', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x', 'mni-y', 'mni-z', 't'), name='mni', coord_dtype=float64),
affine=array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 5., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> xyz_affine(img)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
Nibabel images always have xyz affines
>>> import nibabel as nib
>>> nimg = nib.Nifti1Image(arr, np.diag([2,3,4,1]))
>>> xyz_affine(nimg)
array([[2, 0, 0, 0],
[0, 3, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 1]])
"""
try:
return img.get_affine()
except AttributeError:
return rsp.xyz_affine(img.coordmap, name2xyz)
def is_xyz_affable(img, name2xyz=None):
""" Return True if the image `img` has an xyz affine
Parameters
----------
img : ``Image`` or nibabel ``SpatialImage``
If ``Image`` test ``img.coordmap``. If a nibabel image, return True
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Not used for nibabel `img` input.
Returns
-------
tf : bool
True if `img` has an xyz affine, False otherwise
Examples
--------
>>> from nipy.core.api import vox2mni, Image, img_rollaxis
>>> arr = np.arange(24).reshape((2,3,4,1))
>>> img = Image(arr, vox2mni(np.diag([2,3,4,5,1])))
>>> img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='array', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x', 'mni-y', 'mni-z', 't'), name='mni', coord_dtype=float64),
affine=array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 5., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> is_xyz_affable(img)
True
>>> time0_img = img_rollaxis(img, 't')
>>> time0_img.coordmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='array', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('t', 'mni-x', 'mni-y', 'mni-z'), name='mni', coord_dtype=float64),
affine=array([[ 5., 0., 0., 0., 0.],
[ 0., 2., 0., 0., 0.],
[ 0., 0., 3., 0., 0.],
[ 0., 0., 0., 4., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> is_xyz_affable(time0_img)
False
Nibabel images always have xyz affines
>>> import nibabel as nib
>>> nimg = nib.Nifti1Image(arr, np.diag([2,3,4,1]))
>>> is_xyz_affable(nimg)
True
"""
try:
xyz_affine(img, name2xyz)
except rsp.SpaceError:
return False
return True
def as_xyz_affable(img, name2xyz=None):
""" Return version of `img` that has a valid xyz affine, or raise error
Parameters
----------
img : ``Image`` instance or nibabel image
It has a ``coordmap`` attribute (``Image``) or a ``get_affine`` method
(nibabel image object)
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Not used for nibabel `img` input.
Returns
-------
reo_img : ``Image`` instance or nibabel image
Returns image of same type as `img` input. If necessary, `reo_img` has
its data and coordmap changed to allow it to return an xyz affine. If
`img` is already xyz affable we return the input unchanged (``img is
reo_img``).
Raises
------
SpaceTypeError : if `img` does not have an affine coordinate map
AxesError : if not all of x, y, z recognized in `img` ``coordmap`` range
AffineError : if axes dropped from the affine contribute to x, y, z
coordinates
"""
try:
aff = xyz_affine(img, name2xyz)
except rsp.AffineError:
pass
else:
return img
cmap = img.coordmap
order = rsp.xyz_order(cmap.function_range, name2xyz)
# Reorder reference to canonical order
reo_img = img.reordered_reference(order)
# Which input axes correspond?
ornt = io_orientation(reo_img.coordmap.affine)
desired_input_order = np.argsort(ornt[:,0])
reo_img = reo_img.reordered_axes(list(desired_input_order))
try:
aff = xyz_affine(reo_img, name2xyz)
except rsp.AffineError:
raise rsp.AffineError("Could not reorder so xyz coordinates did not "
"depend on the other axis coordinates")
return reo_img
|
StarcoderdataPython
|
33355
|
from __future__ import absolute_import
import unittest
import sys
from testutils import ADMIN_CLIENT
from testutils import harbor_server
from library.project import Project
from library.user import User
from library.repository import Repository
from library.repository import push_image_to_project
from library.registry import Registry
from library.artifact import Artifact
from library.tag_immutability import Tag_Immutability
from library.repository import push_special_image_to_project
class TestTagImmutability(unittest.TestCase):
@classmethod
def setUpClass(self):
self.url = ADMIN_CLIENT["endpoint"]
self.user_password = "<PASSWORD>"
self.project= Project()
self.user= User()
self.repo= Repository()
self.registry = Registry()
self.artifact = Artifact()
self.tag_immutability = Tag_Immutability()
self.project_id, self.project_name, self.user_id, self.user_name = [None] * 4
self.user_id, self.user_name = self.user.create_user(user_password = self.user_password, **ADMIN_CLIENT)
self.USER_CLIENT = dict(with_signature = True, with_immutable_status = True, endpoint = self.url, username = self.user_name, password = self.user_password)
self.exsiting_rule = dict(selector_repository="rel*", selector_tag="v2.*")
self.project_id, self.project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
def check_tag_immutability(self, artifact, tag_name, status = True):
for tag in artifact.tags:
if tag.name == tag_name:
self.assertTrue(tag.immutable == status)
return
raise Exception("No tag {} found in artifact {}".format(tag, artifact))
def test_disability_of_rules(self):
"""
Test case:
Test Disability Of Rules
Test step and expected result:
1. Create a new project;
2. Push image A to the project with 2 tags A and B;
3. Create a disabled rule matched image A with tag A;
4. Both tags of image A should not be immutable;
5. Enable this rule;
6. image A with tag A should be immutable.
"""
image_a = dict(name="image_disability_a", tag1="latest", tag2="6.2.2")
#1. Create a new project;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A to the project with 2 tags;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a disabled rule matched image A;
rule_id = self.tag_immutability.create_rule(project_id, disabled = True, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Both tags of image A should not be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_disability_of_rules] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = False)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Enable this rule;
self.tag_immutability.update_tag_immutability_policy_rule(project_id, rule_id, disabled = False, **self.USER_CLIENT)
#6. image A with tag A should be immutable.
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_disability_of_rules] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
def test_artifact_and_repo_is_undeletable(self):
"""
Test case:
Test Artifact And Repo is Undeleteable
Test step and expected result:
1. Create a new project;
2. Push image A to the project with 2 tags A and B;
3. Create a enabled rule matched image A with tag A;
4. Tag A should be immutable;
5. Artifact is undeletable;
6. Repository is undeletable.
"""
image_a = dict(name="image_repo_undeletable_a", tag1="latest", tag2="1.3.2")
#1. Create a new project;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A to the project with 2 tags A and B;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_artifact_and_repo_is_undeletable] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Artifact is undeletable;
self.artifact.delete_artifact(project_name, image_a["name"], image_a["tag1"], expect_status_code = 412,expect_response_body = "configured as immutable, cannot be deleted", **self.USER_CLIENT)
#6. Repository is undeletable.
self.repo.delete_repoitory(project_name, image_a["name"], expect_status_code = 412, expect_response_body = "configured as immutable, cannot be deleted", **self.USER_CLIENT)
def test_tag_is_undeletable(self):
"""
Test case:
Test Tag is Undeleteable
Test step and expected result:
1. Push image A to the project with 2 tags A and B;
2. Create a enabled rule matched image A with tag A;
3. Tag A should be immutable;
4. Tag A is undeletable;
5. Tag B is deletable.
"""
image_a = dict(name="image_undeletable_a", tag1="latest", tag2="9.3.2")
#1. Push image A to the project with 2 tags A and B;
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#2. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(self.project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag2"])[0:2] + "*", **self.USER_CLIENT)
#3. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(self.project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_tag_is_undeletable] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = True)
#4. Tag A is undeletable;
self.artifact.delete_tag(self.project_name, image_a["name"], image_a["tag1"], image_a["tag2"], expect_status_code = 412, **self.USER_CLIENT)
#5. Tag B is deletable.
self.artifact.delete_tag(self.project_name, image_a["name"], image_a["tag1"], image_a["tag1"], **self.USER_CLIENT)
def test_image_is_unpushable(self):
"""
Test case:
Test Image is Unpushable
Test step and expected result:
1. Create a new project;
2. Push image A to the project with 2 tags A and B;
3. Create a enabled rule matched image A with tag A;
4. Tag A should be immutable;
5. Can not push image with the same image name and with the same tag name.
"""
image_a = dict(name="image_unpushable_a", tag1="latest", tag2="1.3.2")
#1. Create a new project;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A to the project with 2 tags A and B;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_image_is_unpushable] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Can not push image with the same image name and with the same tag name.
push_image_to_project(project_name, harbor_server, self.user_name, self.user_password, "<PASSWORD>", image_a["tag1"],
new_image = image_a["name"], expected_error_message = "configured as immutable")
def test_copy_disability(self):
"""
Test case:
Test Copy Disability
Test step and expected result:
1. Create 2 projects;
2. Push image A with tag A and B to project A, push image B which has the same image name and tag name to project B;
3. Create a enabled rule matched image A with tag A;
4. Tag A should be immutable;
5. Can not copy artifact from project A to project B with the same repository name.
"""
image_a = dict(name="image_copy_disability_a", tag1="latest", tag2="1.3.2")
#1. Create 2 projects;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
_, project_name_src = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A with tag A and B to project A, push image B which has the same image name and tag name to project B;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
push_special_image_to_project(project_name_src, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_copy_disability] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Can not copy artifact from project A to project B with the same repository name.
artifact_a_src = self.artifact.get_reference_info(project_name_src, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_copy_disability] - artifact_a_src:{}".format(artifact_a_src))
self.artifact.copy_artifact(project_name, image_a["name"], project_name_src+"/"+ image_a["name"] + "@" + artifact_a_src.digest, expect_status_code=412, expect_response_body = "configured as immutable, cannot be updated", **self.USER_CLIENT)
#def test_replication_disability(self):
# pass
def test_priority_of_rules(self):
"""
Test case:
Test Priority Of Rules(excluding rule will not affect matching rule)
Test step and expected result:
1. Push image A, B and C, image A has only 1 tag named tag1;
2. Create a matching rule that matches image A and tag named tag2 which is not exist;
3. Create a excluding rule to exlude image A and B;
4. Add a tag named tag2 to image A, tag2 should be immutable;
5. Tag2 should be immutable;
6. All tags in image B should be immutable;
7. All tags in image C should not be immutable;
8. Disable all rules.
"""
image_a = dict(name="image_priority_a", tag1="latest", tag2="6.3.2")
image_b = dict(name="image_priority_b", tag1="latest", tag2="0.12.0")
image_c = dict(name="image_priority_c", tag1="latest", tag2="3.12.0")
#1. Push image A, B and C, image A has only 1 tag named tag1;
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"]])
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_b["name"], [image_b["tag1"],image_b["tag2"]])
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_c["name"], [image_c["tag1"],image_c["tag2"]])
#2. Create a matching rule that matches image A and tag named tag2 which is not exist;
rule_id_1 = self.tag_immutability.create_rule(self.project_id, selector_repository=image_a["name"], selector_tag=image_a["tag2"], **self.USER_CLIENT)
#3. Create a excluding rule to exlude image A and B;
rule_id_2 = self.tag_immutability.create_rule(self.project_id, selector_repository_decoration = "repoExcludes",
selector_repository="{image_priority_a,image_priority_b}", selector_tag="**", **self.USER_CLIENT)
#4. Add a tag named tag2 to image A, tag2 should be immutable;
self.artifact.create_tag(self.project_name, image_a["name"], image_a["tag1"], image_a["tag2"], **self.USER_CLIENT)
#5. Tag2 should be immutable;
artifact_a = self.artifact.get_reference_info(self.project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_priority_of_rules] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = False)
#6. All tags in image B should be immutable;
artifact_b = self.artifact.get_reference_info(self.project_name, image_b["name"], image_b["tag2"], **self.USER_CLIENT)
print("[test_priority_of_rules] - artifact:{}".format(artifact_b))
self.assertTrue(artifact_b)
self.check_tag_immutability(artifact_b, image_b["tag2"], status = False)
self.check_tag_immutability(artifact_b, image_b["tag1"], status = False)
#7. All tags in image C should not be immutable;
artifact_c = self.artifact.get_reference_info(self.project_name, image_c["name"], image_c["tag2"], **self.USER_CLIENT)
print("[test_priority_of_rules] - artifact:{}".format(artifact_c))
self.assertTrue(artifact_c)
self.check_tag_immutability(artifact_c, image_c["tag2"], status = True)
self.check_tag_immutability(artifact_c, image_c["tag1"], status = True)
#8. Disable all rules.
self.tag_immutability.update_tag_immutability_policy_rule(self.project_id, rule_id_1, disabled = True, **self.USER_CLIENT)
self.tag_immutability.update_tag_immutability_policy_rule(self.project_id, rule_id_2, disabled = True, **self.USER_CLIENT)
def test_add_exsiting_rule(self):
"""
Test case:
Test Priority Of Rules(excluding rule will not affect matching rule)
Test step and expected result:
1. Push image A and B with no tag;
2. Create a immutability policy rule A;
3. Fail to create rule B which has the same config as rule A;
"""
self.tag_immutability.create_tag_immutability_policy_rule(self.project_id, **self.exsiting_rule, **self.USER_CLIENT)
self.tag_immutability.create_tag_immutability_policy_rule(self.project_id, **self.exsiting_rule, expect_status_code = 409, **self.USER_CLIENT)
@classmethod
def tearDownClass(self):
print("Case completed")
if __name__ == '__main__':
suite = unittest.TestSuite(unittest.makeSuite(TestTagImmutability))
result = unittest.TextTestRunner(sys.stdout, verbosity=2, failfast=True).run(suite)
if not result.wasSuccessful():
raise Exception(r"Tag immutability test failed: {}".format(result))
|
StarcoderdataPython
|
3295389
|
<reponame>TheDevAtlas/IRIS
# <NAME> 2021 #
# Create Or Load Bot For Use #
# Imports For Data Display And Manipulation
import datetime as dt # The Date And Time
import numpy as np # Basic Functions
import pandas as pd # Data Manipulation And Translations
import matplotlib.pyplot as plt # Visualization
# Imports From The Web
import pandas_datareader as web # Yahoo Finance, World Bank, Ect
# Imports For Machine Learning
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
def createNewBot():
print("New Bot")
# Create Bot From Atrabutes Given From botMain
# Save The Bot With A New Name To A File
def trainBot():
print("Train Bot")
# Load Bot Name Given From botMain
# Setup The Training Numbers Given From botMain
# Run The Training
# Save The Bot
def runBot():
print("Run Bot")
# Load Bot Name Given From botMain
# Setup The Training Numbers Given From botMain
# Run The Prediction
# Output Data To Text File
# Output Data To A Graph
|
StarcoderdataPython
|
1654492
|
<filename>cdk-layer-factory/functions/start_layer_creation.py
import boto3
import datetime
import os
import sys
import hashlib
ec2_client = boto3.client('ec2')
iam_client = boto3.client('iam')
ddb_client = boto3.client('dynamodb')
#ami_id = 'ami-0a8b4cd432b1c3063'
ami_id = 'ami-0ef2003049dd4c459'
instance_type = 't3.small'
key_name = 'key.pem'
subnet_id = 'subnet-c06751cf'
instance_profile_arn = os.environ['INSTANCE_PROFILE_ARN']
layer_dest_bucket = os.environ['LAYER_DEST_BUCKET']
def get_python_commands(runtimes, dependencies, layer_name, token, datetime_str):
init_script = ['#!/bin/bash','cd ~\n']
for dependency in dependencies:
init_script.append(f'echo "{dependency}" >> requirements.txt\n')
#layer_publish_command = f'aws lambda publish-layer-version --layer-name {layer_name}-layer-factory --description "{layer_name} created by Layer Factory" --zip-file fileb://archive.zip --compatible-runtimes'
esc_quote = r'\"'
for runtime in runtimes:
init_script.append(f'mkdir -p "python/lib/{runtime}/site-packages/"')
init_script.append(f'docker run -v "$PWD":/var/task "public.ecr.aws/sam/build-{runtime}" /bin/sh -c "pip install -r requirements.txt -t python/lib/{runtime}/site-packages/; exit"')
#layer_publish_command += f' "{runtime}"'
#layer_publish_command += ' --region "us-east-1"'
init_script_wrapup = [
'zip -r archive.zip python > /dev/null',
f'aws s3 cp archive.zip s3://{layer_dest_bucket}/{layer_name}-{datetime_str}.zip',
f'aws s3 presign "s3://{layer_dest_bucket}/{layer_name}' + f'-' + f'{datetime_str}.zip" --region us-east-1 --expires-in 604800 >> presigned\n'
'export PRESIGNED_URL=$(cat presigned)',
#layer_publish_command,
# TODO: make region configurable
f'aws stepfunctions send-task-success --task-token "{token}" --task-output "{{{esc_quote}result{esc_quote}: {esc_quote}Success!{esc_quote}, {esc_quote}presigned_url{esc_quote}: {esc_quote}$PRESIGNED_URL{esc_quote}, {esc_quote}layer_name{esc_quote}: {esc_quote}{layer_name}{esc_quote}, {esc_quote}s3_key{esc_quote}: {esc_quote}{layer_name}-{datetime_str}.zip{esc_quote}}}" --region us-east-1',
'shutdown -h now'
]
init_script.extend(init_script_wrapup)
init_script = '\n\n'.join(init_script)
return init_script
# init_script = [
# '#!/bin/bash',
#'sleep 60\n'
# 'cd ~\n'
#'sudo yum update -y\n',
#'sudo yum install ec2-instance-connect -y',
#'sudo yum search docker',
#'sudo yum install docker -y',
#'sudo systemctl enable docker.service',
#'sudo systemctl start docker.service',
#'sudo systemctl status docker.service',
# ]
def lambda_handler(event, context):
token = event['token']
my_input = event['input']
now = datetime.datetime.now()
datetime_str = f'{now.year}-{now.month}-{now.day}-{now.hour}:{now.minute}:{now.second}'
# e.g. 'ulid-py'
dependencies = my_input['dependencies'].split(',')
layer_name = my_input['layer_name']
#colloquial_name = event['colloquial_name']
# e.g. '1.1.0'
runtimes = my_input['runtimes']
language = my_input['language']
#print(f'{language=}')
if language == 'python':
init_script = get_python_commands(runtimes, dependencies, layer_name, token, datetime_str)
response = ec2_client.run_instances(
BlockDeviceMappings=[
{
'DeviceName': '/dev/xvda',
'Ebs': {
'Encrypted': True,
'DeleteOnTermination': True,
'VolumeSize': 16,
'VolumeType': 'gp2'
},
},
],
ImageId=ami_id,
InstanceType=instance_type,
IamInstanceProfile={
'Arn': instance_profile_arn
},
#KeyName=key_name,
SubnetId=subnet_id,
MaxCount=1,
MinCount=1,
InstanceInitiatedShutdownBehavior='terminate',
UserData=init_script,
TagSpecifications=[
{
'ResourceType': 'instance',
'Tags': [
{
'Key': 'APPLICATION',
'Value': 'CDK_LAMBDA_LAYER_FACTORY'
},
]
},
],
)
instance = response['Instances'][0]
instance_id = instance['InstanceId']
return {"result": "success creating EC2 instance", "instance_id": instance_id}
|
StarcoderdataPython
|
3282051
|
# coding:utf-8
from tasks.workers import app
from page_get import user as user_get
from db.seed_ids import get_seed_ids, get_seed_by_id, insert_seeds, set_seed_other_crawled
@app.task(ignore_result=True)
def crawl_follower_fans(uid):
seed = get_seed_by_id(uid)
if seed.other_crawled == 0:
rs = user_get.get_fans_or_followers_ids(uid, 1)
rs.extend(user_get.get_fans_or_followers_ids(uid, 2))
datas = set(rs)
# If data already exits, just skip it
if datas:
insert_seeds(datas)
set_seed_other_crawled(uid)
@app.task(ignore_result=True)
def crawl_person_infos(uid):
"""
Crawl user info and their fans and followers
For the limit of weibo's backend, we can only crawl 5 pages of the fans and followers.
We also have no permissions to view enterprise's followers and fans info
:param uid: current user id
:return: None
"""
if not uid:
return
user = user_get.get_profile(uid)
# If it's enterprise user, just skip it
if user.verify_type == 2:
set_seed_other_crawled(uid)
return
# Crawl fans and followers
app.send_task('tasks.user.crawl_follower_fans', args=(uid,), queue='fans_followers',
routing_key='for_fans_followers')
@app.task(ignore_result=True)
def excute_user_task():
seeds = get_seed_ids()
if seeds:
for seed in seeds:
app.send_task('tasks.user.crawl_person_infos', args=(seed.uid,), queue='user_crawler',
routing_key='for_user_info')
|
StarcoderdataPython
|
1612359
|
<filename>PyPI/package_analysis.py
#!/usr/bin/env python3
"""Analysis of Python packages."""
import json
import logging
import os
import re
import shutil
import sys
import tarfile
from os import walk
import pymysql.cursors
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s",
level=logging.DEBUG,
stream=sys.stdout,
)
def main(package_name, package_url, release_id=None):
"""
Parameters
----------
package_name : str
package_url : str
Path to a Python package.
package_id : int, optional
"""
pkg_name = package_name
if release_id is None:
raise NotImplementedError("Look up the release id.")
filepaths, download_dir = download(package_url)
if download_dir is None:
return
with open("secret.json") as f:
mysql = json.load(f)
package_id = get_pkg_id_by_name(pkg_name, mysql)
if package_id is None:
logging.info("Package id of '%s' could not be determined", pkg_name)
sys.exit(1)
required_packages = get_requirements(filepaths, pkg_name)
imported_packages = get_imports(filepaths, pkg_name)
setup_packages = get_setup_packages(filepaths, pkg_name)
store_dependencies(
mysql,
package_id,
required_packages,
imported_packages,
setup_packages,
package_url,
release_id,
)
remove_unpacked(download_dir)
def store_dependencies(
mysql,
package_id,
required_packages,
imported_packages,
setup_packages,
package_url,
release_id,
):
"""
Parameters
----------
mysql : dict
MySQL database connection information
package_id : int
required_packages : list
imported_packages : list
setup_packages : list
package_url : str
release_id : int
"""
connection = pymysql.connect(
host=mysql["host"],
user=mysql["user"],
passwd=mysql["<PASSWORD>"],
db=mysql["db"],
cursorclass=pymysql.cursors.DictCursor,
charset="utf8",
)
insert_dependency_db(imported_packages, "imported", package_id, mysql, connection)
insert_dependency_db(
required_packages, "requirements.txt", package_id, mysql, connection
)
insert_dependency_db(setup_packages, "setup.py", package_id, mysql, connection)
# Store that the package was downloaded
# and analyzed
cursor = connection.cursor()
sql = "UPDATE `releases` SET `downloaded_bytes` = %s WHERE `id` = %s;"
pkg_name = os.path.basename(package_url)
target_dir = "pypipackages"
target = os.path.join(target_dir, pkg_name)
downloaded_bytes = os.path.getsize(target)
cursor.execute(sql, (downloaded_bytes, release_id))
connection.commit()
cursor.close()
connection.close()
def insert_dependency_db(imported_packages, req_type, package_id, mysql, connection):
"""
Parameters
----------
imported_packages : list
req_type : str
'setup.py', 'requirements.txt' or 'imported'
package_id : int
mysql : dict
credentials for the connection
connection : pymysql connection object
"""
cursor = connection.cursor()
for pkg, times in imported_packages.items():
package_info = {
"package": package_id,
"needs_package": get_pkg_id_by_name(pkg, mysql),
"times": times,
"req_type": req_type,
}
if package_info["needs_package"] is not None:
try:
sql = (
"INSERT INTO `dependencies` "
"(`package`, `needs_package`, `req_type`, `times`) "
" VALUES "
"('{package}', '{needs_package}', '{req_type}', "
"'{times}');"
).format(**package_info)
cursor.execute(sql)
connection.commit()
except pymysql.err.IntegrityError as e:
if "Duplicate entry" not in str(e):
logging.warning(e)
else:
# Packages which were imported, but not found on PyPI
# TODO: This still needs work. 21737 imports were not found on PyPI
# amongst them:
# mysqlDB
# mySQLdb
# MySQLdb
with open("not-found.csv", "a") as f:
f.write("%s\n" % pkg)
def get_pkg_id_by_name(pkg_name, mysql):
"""
Parameters
----------
pkg_name : str
mysql : dict
MySQL database connection information
Returns
-------
int or None
"""
connection = pymysql.connect(
host=mysql["host"],
user=mysql["user"],
passwd=mysql["<PASSWORD>"],
db=mysql["db"],
cursorclass=pymysql.cursors.DictCursor,
charset="utf8",
)
cursor = connection.cursor()
sql = "SELECT id FROM `packages` WHERE `name` = %s"
cursor.execute(sql, (pkg_name,))
id_number = cursor.fetchone()
if id_number is not None and "id" in id_number:
return id_number["id"]
else:
return None
def get_pkg_extension(package_url):
"""
Parameters
----------
package_url : str
Returns
-------
str
File extension of the package given by url
"""
not_implemented_fileending = [".msi", ".rpm", ".deb", ".tgz", ".dmg"]
if package_url.endswith(".tar.gz"):
return ".tar.gz"
elif package_url.endswith(".tar.bz"):
return ".tar.bz"
elif package_url.endswith(".tar.bz2"):
return ".tar.bz2"
elif package_url.endswith(".whl"):
return ".whl"
elif package_url.endswith(".zip"):
return ".zip"
elif package_url.endswith(".egg"):
return ".egg"
elif package_url.endswith(".exe"):
logging.info("Skip '%s' for safty reasons.", package_url)
return None
elif any(package_url.endswith(x) for x in not_implemented_fileending):
pass # TODO: Implement
else:
with open("todo-unknown-pkg-extension.csv", "a") as f:
f.write("%s\n" % package_url)
return None
def download(package_url):
"""
Parameters
----------
package_url : str
URL of a Python package
Returns
-------
tuple
(List of paths to all unpackaged files, folder where it got extracted)
"""
extension = get_pkg_extension(package_url)
if extension is None:
return ([], None)
file_ending_len = len(extension)
target_dir = "pypipackages"
if not os.path.exists(target_dir):
os.makedirs(target_dir)
pkg_name = os.path.basename(package_url)
target = os.path.join(target_dir, pkg_name)
if not os.path.exists(target):
try:
urlretrieve(package_url, target)
logging.info("Package '%s' downloaded.", pkg_name)
except:
return ([], None)
else:
pass
# logging.info("Package '%s' was already downloaded.", pkg_name)
# Unpack it
if not os.path.exists(target[:-file_ending_len]):
is_tarfile = (
package_url.endswith(".tar.gz")
or package_url.endswith(".tar.bz")
or package_url.endswith(".tar.bz2")
)
if is_tarfile:
try:
with tarfile.open(target) as tar:
tar.extractall(target[:-file_ending_len])
except:
# Something is wrong with the tar file
return ([], None)
elif (
package_url.endswith(".whl")
or package_url.endswith(".zip")
or package_url.endswith(".egg")
):
import zipfile
try:
with zipfile.ZipFile(target) as tar:
tar.extractall(target[:-file_ending_len])
except:
# Something is wrong with the zip file
return ([], None)
else:
raise NotImplementedError
filepaths = []
for (dirpath, dirnames, filenames) in walk(target[:-file_ending_len]):
filepaths.extend([os.path.join(dirpath, f) for f in filenames])
return (filepaths, target[:-file_ending_len])
def remove_unpacked(download_dir):
"""
Clean up the folders to prevent HDD of getting full.
"""
shutil.rmtree(download_dir)
def get_requirements(filepaths, pkg_name):
"""
Get a list of all "officially" set requirements.
Parameters
----------
filepaths : list
Paths to files of a package
pkg_name : str
Name of the currently parsed package.
Returns
-------
list
"Officially" set requirements
"""
imports = {}
requirements_file = [f for f in filepaths if f.endswith("requirements.txt")]
if len(requirements_file) > 0:
requirements_file = requirements_file[0]
# TODO: parse requirements.txt
else:
# logging.debug("Package '%s' has no requirements.txt.",
# pkg_name)
pass
return imports
def get_imports(filepaths, pkg_name):
"""
Get a list of all imported packages.
Parameters
----------
filepaths : list
Paths to files of a package
pkg_name : str
Name of the currently parsed package.
Returns
-------
dict
Names of packages which got imported and how often
"""
# TODO: Not all python files end with .py. We loose some.
filepaths = [f for f in filepaths if f.endswith(".py")]
simple_pattern = re.compile(r"^\s*import\s+([a-zA-Z][a-zA-Z0-9_]*)", re.MULTILINE)
from_pattern = re.compile(
r"^\s*from\s+import\s+([a-zA-Z][a-zA-Z0-9_]*)", re.MULTILINE
)
imports = {}
for filep in filepaths:
try:
with open(filep) as f:
content = f.read()
except:
# there is something wrong with a file encoding. Or the file cannot
# be opened
# Ignore it.
return imports
imported = simple_pattern.findall(content) + from_pattern.findall(content)
for import_pkg_name in imported:
if import_pkg_name in imports:
imports[import_pkg_name] += 1
else:
imports[import_pkg_name] = 1
return imports
def get_setup_packages(filepaths, pkg_name):
"""
Get a list of all imported packages.
Parameters
----------
filepaths : list
Paths to files of a package
pkg_name : str
Name of the currently parsed package.
Returns
-------
dict
Names of packages which got imported and how often
"""
setup_py_file = [f for f in filepaths if f.endswith("setup.py")]
imports = {}
if len(setup_py_file) > 0:
setup_py_file = setup_py_file[0]
# logging.info(setup_py_file)
# TODO: parse setup.py
# can be dangerous
# look for 'install_requires' and 'dependency_links'
# ... may the force be with you
# RegEx is complicated:
# setup\(.*?(install_requires\s*=\s*\[.*?").*?\) <--- doesn't work,
# as you can have variables
else:
# logging.debug("Package '%s' has no setup.py.",
# pkg_name)
pass
logging.debug
return imports
def get_parser():
"""The parser object for this script."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(
description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--name", dest="name", help="name of the package", required=True
)
parser.add_argument(
"-p",
"--package_url",
dest="package_url",
help="url where the package is",
required=True,
)
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.name, args.package_url)
|
StarcoderdataPython
|
4836520
|
#! usr/bin/python
# coding: utf8
from tools import MsSQL
connection = MsSQL()
def get_color_dict():
"""
key is color_code and value is color_name
:return:
"""
color_dict = dict()
sql = "select color_code,color from joom_color where color_code is not Null"
with connection as con:
cur = con.cursor()
cur.execute(sql)
ret = cur.fetchall()
for row in ret:
color_dict[row[0]] = row[1]
return color_dict
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.