filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_21160
|
import requests
import xml.etree.ElementTree as ET
URL = "http://www.perseus.tufts.edu/hopper/xmlmorph?lang=greek&lookup={0}"
def doRequest(w):
r = requests.get(URL.format(w))
xml = ET.fromstring(r.text)
forms = {}
for x in xml.iter('analysis'):
lemma = x.find('lemma').text
expanded = x.find('expandedForm').text
form = x.find('form').text
if form in forms:
if not(lemma in forms[form]):
forms[form].append(lemma)
else:
forms[form] = [lemma]
return forms
def get_unknown_words(ws):
out = {}
for w in ws:
print('getting: ' + w)
res = doRequest(w)
print("recieved data")
for k, v in res.items():
if k in out:
out[k] = list(set(out[k] + v))
else:
out[k] = v
return out
def format_output(data):
out = [x + "\t" + ",".join(y) for x,y in data.items()]
return "\n".join(out)
if __name__ == '__main__':
import sys
args = sys.argv[1:]
print("input file should be in beta codes, one word per line")
words = None
with open(args[0], 'r') as f:
words = map(lambda x: x.strip(), f.read().split("\n"))
output = format_output(get_unknown_words(words))
with open(args[1], 'w', encoding = 'utf-8') as f:
f.write(output)
print("done")
|
the-stack_106_21161
|
import requests
from urllib.parse import urljoin
BASE_URL = 'https://hangman-api.herokuapp.com'
def createGame():
try:
url = urljoin(BASE_URL, '/hangman')
response = requests.post(url)
print(response.json())
return response.json()
except Exception as err:
print(err)
def sendGuess(token, letter):
try:
url = urljoin(BASE_URL, f'/hangman?token={token}&letter={letter}')
response = requests.put(url)
print(response.json())
return response.json()
except Exception as err:
print(err)
def getAnswer(token):
try:
url = urljoin(BASE_URL, f'/hangman?token={token}')
response = requests.get(url)
print(response.json())
return response.json()
except Exception as err:
print(err)
def getHint(token):
try:
url = urljoin(BASE_URL, f'/hangman/hint?token={token}')
response = requests.get(url)
print(response.json())
return response.json()
except Exception as err:
print(err)
|
the-stack_106_21163
|
import datetime
import logging
from collections import namedtuple
from decimal import Decimal
import sys
import re
import string
import argparse
import beancount.loader
import beancount.core
import time
import asyncio
import tempfile
try:
import requests
except ImportError:
requests = None
try:
import aiohttp
except ImportError:
aiohttp = None
assert requests or aiohttp, "Must have either the requests module installed or the aiohttp module installed."
API = 'https://api.youneedabudget.com/v1/budgets'
def make_budget(n):
c = n['currency_format']
n['currency_format'] = make_tuple('CurrencyFormat', c)
return make_tuple('Budget', n)
def make_transaction(n):
s = n['subtransactions']
sub = [make_tuple('Subtransaction', x) for x in s]
n['subtransactions'] = sub
return make_tuple('Transaction', n)
def make_tuple(name, d):
return namedtuple(name, d.keys())(*d.values())
# TODO: how does the default budget work?
# (“last-used” can be used to specify the last used budget and “default” can be used if default budget selection is enabled
def budget_from_json(budget, json_budgets):
if len(json_budgets) > 1:
if not budget:
raise Exception('No budget specified.', [a['name'] for a in json_budgets])
else:
b = [a for a in json_budgets if a['name'] == budget]
if len(b) != 1:
raise Exception(f'Could not find any budget with name {budget}.')
b = b[0]
return make_budget(b)
else:
b = json_budgets[0]
return make_budget(b)
def get_budget(auth, budget=None):
response = requests.get(API, headers=auth)
response.raise_for_status()
d = response.json()
return budget_from_json(budget, d['data']['budgets'])
# Unlike the other YNAB fetchers, this returns the raw JSON instead of the
# converted namedtuples. Should we change this to do the same? Make this a
# generator?
def get_transactions(auth, budget_id, since=None, account_id=None):
if account_id:
if since:
logging.info(f'Only fetching transactions for account_id {account_id}.')
logging.info(f'Only fetching transactions since {since}.')
response = requests.get(f'{API}/{budget_id}/accounts/{account_id}/transactions?since_date={since}', headers=auth)
else:
logging.info(f'Only fetching transactions for account_id {account_id}.')
response = requests.get(f'{API}/{budget_id}/accounts/{account_id}/transactions', headers=auth)
else:
if since:
logging.info(f'Only fetching transactions since {since}.')
response = requests.get(f'{API}/{budget_id}/transactions?since_date={since}', headers=auth)
else:
response = requests.get(f'{API}/{budget_id}/transactions', headers=auth)
response.raise_for_status()
transactions = response.json()
# with open('txn.json', 'w+') as f:
# f.write(response.text)
return transactions['data']['transactions']
def build_account_mapping(entries):
mapping = {}
for entry in entries:
if isinstance(entry, beancount.core.data.Open):
if 'ynab-id' in entry.meta:
mapping[entry.meta['ynab-id']] = entry.account
return mapping
def accounts_from_json(json_accounts):
result = {}
for a in json_accounts:
a['name'] = ynab_normalize(a['name'])
account = make_tuple('Account', a)
result[account.id] = account
return result
def get_ynab_accounts(auth, budget_id):
result = {}
response = requests.get(f'{API}/{budget_id}/accounts', headers=auth)
response.raise_for_status()
return accounts_from_json(response.json()['data']['accounts'])
def categories_from_json(json_categories):
category_result = {}
group_result = {}
# categories come as a nested structure with groups at the top
# and the actual categories underneath the group level
for g in json_categories:
g['name'] = ynab_normalize(g['name'])
group = make_tuple('CategoryGroup', g)
group_result[group.id] = group
for c in group.categories:
c['name'] = ynab_normalize(c['name'])
category = make_tuple('Category', c)
category_result[category.id] = category
return group_result, category_result
def get_ynab_categories(auth, budget_id):
response = requests.get(f'{API}/{budget_id}/categories', headers=auth)
response.raise_for_status()
return categories_from_json(response.json()['data']['category_groups'])
def ynab_normalize(name):
table = str.maketrans('', '', string.punctuation)
no_punctuation = name.translate(table)
no_spaces = no_punctuation.replace(' ', '-')
return no_spaces
def fmt_ynab_category(id, groups, categories):
c = categories[id]
group_id = c.category_group_id
g = groups[group_id]
n = f'{g.name}:{c.name}'
return n
def list_ynab_ids(account_mapping, accounts, groups, categories):
def pretty_print(ids, formatter):
for item in sorted(ids.items(), key=lambda x: x[1]):
print(item[0], end=' ')
print(formatter(item[1]))
bean_account = account_mapping.get(item[0], '(none)')
print(' ' * 36, bean_account)
pretty_print(accounts, formatter=lambda x: x.name)
pretty_print(categories, formatter=lambda x: fmt_ynab_category(x.id, groups, categories))
def get_target_account(txn, adjustment_account):
# subtransactions don't have a payee_name attribute, so we do this roundabout
# check instead....
if (getattr(txn, 'payee_name', None) == 'Reconciliation Balance Adjustment'
and txn.memo == 'Entered automatically by YNAB'
and adjustment_account):
logging.info(f'Using {adjustment_account} for reconciliation balance adjustment on transaction {txn.id}.')
return adjustment_account
elif txn.category_id:
return to_bean(txn.category_id)
elif txn.transfer_account_id:
return to_bean(txn.transfer_account_id)
else:
# This can only happen with YNAB's Tracking accounts. We can't generate
# a valid beancount entry, so we generate an error mesage.
return '; FIXME. Error could only generate one leg from YNAB data.'
def get_ynab_data(token, budget_name, since, account_id):
logging.info('Using regular fetcher for YNAB.')
# BENCHMARK: benchmark vanilla vs. async
start_timing = time.time()
# to actually log in to YNAB we need to add this header to all requests.
auth_header = {'Authorization': f'Bearer {token}'}
logging.info('Fetching YNAB budget metadata.')
budget = get_budget(auth_header, budget=budget_name)
logging.info('Fetching YNAB account metadata.')
ynab_accounts = get_ynab_accounts(auth_header, budget.id)
logging.info('Fetching YNAB budget category metadata.')
ynab_category_groups, ynab_categories = get_ynab_categories(auth_header, budget.id)
logging.info('Fetching YNAB transactions.')
ynab_transactions = get_transactions(auth_header, budget.id, since=since, account_id=account_id)
# BENCHMARK: benchmark vanilla vs. async
end_timing = time.time()
logging.info(f'YNAB http requests took: {end_timing - start_timing}.')
return budget, ynab_accounts, ynab_category_groups, ynab_categories, ynab_transactions
def get_ynab_data_async(token, budget_name, since, account_id):
logging.info('Using asynchronous fetcher for YNAB.')
start_timing = time.time()
# to actually log in to YNAB we need to add this header to all requests.
auth_header = {'Authorization': f'Bearer {token}'}
async def fetch(url, session):
async with session.get(url, headers=auth_header) as response:
return await response.json()
async def run(r):
budget_id = None
tasks = []
async with aiohttp.ClientSession(raise_for_status=True) as session:
# We have to load the budget metadata first, to look up the ID
# for the budget name we are given
async with session.get(API, headers=auth_header) as response:
d = await response.json()
budget = budget_from_json(budget_name, d['data']['budgets'])
# Then we can do the next 3 in parallel.
task = asyncio.ensure_future(fetch(f'{API}/{budget.id}/accounts', session))
tasks.append(task)
task = asyncio.ensure_future(fetch(f'{API}/{budget.id}/categories', session))
tasks.append(task)
if account_id:
if since:
logging.info(f'Only fetching transactions for account_id {account_id}.')
logging.info(f'Only fetching transactions since {since}.')
task = asyncio.ensure_future(fetch(f'{API}/{budget.id}/accounts/{account_id}/transactions?since_date={since}', session))
else:
logging.info(f'Only fetching transactions for account_id {account_id}.')
task = asyncio.ensure_future(fetch(f'{API}/{budget.id}/accounts/{account_id}/transactions', session))
else:
if since:
logging.info(f'Only fetching transactions since {since}.')
task = asyncio.ensure_future(fetch(f'{API}/{budget.id}/transactions?since_date={since}', session))
else:
task = asyncio.ensure_future(fetch(f'{API}/{budget.id}/transactions', session))
tasks.append(task)
responses = await asyncio.gather(*tasks)
accounts = accounts_from_json(responses[0]['data']['accounts'])
category_groups, categories = categories_from_json(responses[1]['data']['category_groups'])
transactions = responses[2]['data']['transactions']
return budget, accounts, category_groups, categories, transactions
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run(4))
budget, accounts, category_groups, categories, transactions = loop.run_until_complete(future)
# BENCHMARK: benchmark vanilla vs. async
end_timing = time.time()
logging.info(f'YNAB http requests took: {end_timing - start_timing}')
return budget, accounts, category_groups, categories, transactions
def get_existing_ynab_transaction_ids(entries):
seen = set()
for e in entries:
# We don't want to add Nones to the set
if isinstance(e, beancount.core.data.Transaction) and 'ynab-id' in e.meta:
seen.add(e.meta['ynab-id'])
return seen
class NegateAction(argparse.Action):
def __call__(self, parser, ns, values, option):
setattr(ns, self.dest, option[2:9] != 'disable')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Import from YNAB5 web app to beancount statements.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('bean', help='Path to the beancount file.', nargs='?', default=None)
parser.add_argument('--since', help='Format: YYYY-MM-DD; 2016-12-30. Only process transactions after this date. This will include transactions that occurred exactly on this date.')
parser.add_argument('--ynab-token', help='Your YNAB API token.', required=True)
parser.add_argument('--budget', help='Name of YNAB budget to use. Only needed if you have multiple budgets.')
parser.add_argument('--list-ynab-ids', action='store_true', default=False, help='Instead of running normally. Simply list the YNAB ids for each budget category.')
parser.add_argument('--skip-starting-balances', action='store_true', default=False, help='Ignore any starting balance statements in YNAB.')
parser.add_argument('--debug', action='store_true', default=False, help='Print debugging logging to stderr.')
parser.add_argument('--verbose', action='store_true', default=False, help='Mildly verbose logging to stderr.')
parser.add_argument('--enable-async-fetch', '--disable-async-fetch', dest='async_fetch', action=NegateAction, default=(aiohttp is not None), nargs=0, help='Use aiohttp to fetch YNAB data in parallel.')
parser.add_argument('--balance-adjustment-account', help='Account to assign all automatically entered reconciliation balance adjustments.')
parser.add_argument('--account_id', help='Get transactions for only the specified account')
args = parser.parse_args()
if args.since:
args.since = datetime.datetime.strptime(args.since, "%Y-%m-%d").date()
if args.async_fetch and not aiohttp:
logging.error('Cannot specify --async-fetch if aiohttp is not installed.')
sys.exit(1)
if not args.bean:
# Beancount-ynab5 requires a bean file to be passed on the CLI.
# It passes this file to beancount.loader.load_file and
# expects a 3-tuple returned, [entries,errors,options].
# Changing to accommodate no file is tricky
# The following provides a workaround.
# beancount.loader.load_file can handle an empty file, so this passes
# handling of the no-file problem to beancount
tempfile = tempfile.NamedTemporaryFile()
args.bean = tempfile.name
# structuring it this way means we can specify --verbose AND --debug and it will
# end up picking the most verbose (i.e. debug)
log_level = logging.WARN
if args.verbose:
log_level = logging.INFO
if args.debug:
log_level = logging.DEBUG
logging.basicConfig(format='%(asctime)-15s %(message)s', level=log_level)
logging.debug(f'Parsing beancount file {args.bean}')
beancount_entries, beancount_errors, beancount_options = beancount.loader.load_file(args.bean, log_errors=sys.stderr)
if beancount_errors:
sys.exit(1)
asset_prefix = beancount_options['name_assets']
liability_prefix = beancount_options['name_liabilities']
expense_prefix = beancount_options['name_expenses']
income_prefix = beancount_options['name_income']
logging.debug('Loading YNAB IDs for existing transactions in beancount')
seen_transactions = get_existing_ynab_transaction_ids(beancount_entries)
logging.debug('Loading YNAB account UUIDs from beancount file')
account_mapping = build_account_mapping(beancount_entries)
if args.async_fetch:
fetcher = get_ynab_data_async
else:
fetcher = get_ynab_data
budget, ynab_accounts, ynab_category_groups, ynab_categories, ynab_transactions = fetcher(args.ynab_token, args.budget, args.since, args.account_id)
if args.list_ynab_ids:
list_ynab_ids(account_mapping, ynab_accounts, ynab_category_groups, ynab_categories)
sys.exit(0)
# TODO: we can reuse this to make future fetches incremental. Where should we stash this?
# server_knowledge = ynab_transactions['server_knowledge']
# TODO: how do we get this from YNAB and compare against beancount?
commodity = budget.currency_format.iso_code
# all amounts are "milliunits" and need to be converted
def from_milli(n):
return Decimal(n)/1000
def fmt_memo(memo):
if memo:
return f'"{memo}"'
else:
return ''
r = [x.id for x in ynab_category_groups.values() if x.name == ynab_normalize('Internal Master Category')]
assert len(r) == 1
ynab_internal_master_category_id = r[0]
r = [x.id for x in ynab_categories.values() if x.name == 'Inflow-To-be-Budgeted' and x.category_group_id == ynab_internal_master_category_id]
assert len(r) == 1
inflows_category_id = r[0]
def to_bean(id):
if id in ynab_accounts and ynab_accounts[id].type in ['checking', 'savings', 'otherAsset', 'cash']:
bean_default = f'{asset_prefix}:{ynab_accounts[id].name}'
elif id in ynab_accounts and ynab_accounts[id].type in ['creditCard', 'lineOfCredit', 'otherLiability']:
bean_default = f'{liability_prefix}:{ynab_accounts[id].name}'
elif id == inflows_category_id:
# special case for the inflows category id
bean_default = f'{income_prefix}:{fmt_ynab_category(id, ynab_category_groups, ynab_categories)}'
elif id in ynab_categories:
bean_default = f'{expense_prefix}:{fmt_ynab_category(id, ynab_category_groups, ynab_categories)}'
else:
bean_default = id
return account_mapping.get(id, bean_default)
count = 0
# We only import transactions once they have been reconciled on YNAB. This hopefully removes
# the need to update things we've already downloaded. That is, we want to treat cleared transactions as immutable
# but uncleared transactions are still mutable.
# TODO: Is it necessary to skip deleted transactions here?
for t in (t for t in ynab_transactions if t['cleared'] == 'reconciled' and not t['deleted']):
t = make_transaction(t)
if args.skip_starting_balances:
# This will skip starting balances in budget accounts but not tracking accounts
if t.payee_name == 'Starting Balance' and t.category_id == inflows_category_id:
logging.debug(f'Skipping Starting Balance statement in budget account: {t.date} {to_bean(t.account_id)}')
continue
# We also want to skip starting balances in tracking accounts. Tracking
# accounts won't have a category id
if t.payee_name == 'Starting Balance' and not t.category_id:
logging.debug(f'Skipping Starting Balance statement in tracking account: {t.date} {to_bean(t.account_id)}')
continue
if not t.category_id and not t.transfer_account_id:
logging.warning(
f'Saw a transaction without a category or transfer account id.'
f' This means the resulting beancount output will be corrupted.'
f' Manually inspect the transaction and fix it.'
f' {t.date} {to_bean(t.account_id)} "{t.payee_name}" {from_milli(t.amount)}'
)
# Deduplication -- don't process transactions we've already seen
if t.id in seen_transactions:
logging.debug(f'Skipping duplicate transaction: {t.date} {t.payee_name}')
continue
if t.transfer_transaction_id in seen_transactions:
logging.debug(f'Skipping duplicate transfer transaction: {t.date} {t.payee_name}')
continue
count += 1
print(f'{t.date} * "{t.payee_name}" {fmt_memo(t.memo)}')
print(f' ynab-id: "{t.id}"')
# To avoid duplicate imports for transfers we need to account for
# both our id and the other leg of the transfer's id
seen_transactions.add(t.id)
if t.transfer_transaction_id: seen_transactions.add(t.transfer_transaction_id)
print(f' {to_bean(t.account_id):<50}{from_milli(t.amount):>10} {commodity}')
# Next check if we are looking at a split transaction or a normal one...
if t.subtransactions:
for sub in t.subtransactions:
# we have to reverse the sign on the amount of the subtransaction because YNAB's value
# is telling us "decrease the budget by this amount" but beancount wants us to say
# "increase our expenses by this amount"
print(f' {get_target_account(sub, args.balance_adjustment_account):<50}{-from_milli(sub.amount):>10} {commodity} ; {sub.memo}')
# We need to deduplicate any transfers that happen in a subtransaction...
if sub.transfer_transaction_id: seen_transactions.add(sub.transfer_transaction_id)
else:
print(f' {get_target_account(t, args.balance_adjustment_account)}')
print()
logging.info(f'Imported {count} new transactions.')
|
the-stack_106_21167
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.modules.events.models.persons import EventPerson
from indico.modules.users import User
from indico.util.user import principal_from_identifier
def create_event_person(event, create_untrusted_persons=False, **data):
"""Create an event person from data passed as kwargs."""
from marshmallow import EXCLUDE
from indico.modules.events.persons.schemas import EventPersonSchema
event_person = EventPerson(event=event, is_untrusted=create_untrusted_persons)
event_person.populate_from_dict(EventPersonSchema(unknown=EXCLUDE).load(data))
return event_person
def get_event_person_for_user(event, user, create_untrusted_persons=False):
"""Return the event person that links to a given User/Event (if any)."""
return EventPerson.for_user(user, event, is_untrusted=create_untrusted_persons)
def get_event_person(event, data, create_untrusted_persons=False, allow_external=False):
"""Get an EventPerson from dictionary data.
If there is already an event person in the same event and for the same user,
it will be returned. Matching is done with the e-mail.
"""
person_type = data.get('_type')
if person_type is None:
if data.get('email'):
email = data['email'].lower()
user = User.query.filter(~User.is_deleted, User.all_emails == email).first()
if user:
return get_event_person_for_user(event, user, create_untrusted_persons=create_untrusted_persons)
elif event:
person = event.persons.filter_by(email=email).first()
if person:
return person
# We have no way to identify an existing event person with the provided information
return create_event_person(event, create_untrusted_persons=create_untrusted_persons, **data)
elif person_type == 'Avatar':
principal = principal_from_identifier(data['identifier'], allow_external_users=allow_external)
return get_event_person_for_user(event, principal, create_untrusted_persons=create_untrusted_persons)
elif person_type == 'EventPerson':
return event.persons.filter_by(id=data['id']).one()
elif person_type == 'PersonLink':
return event.persons.filter_by(id=data['personId']).one()
else:
raise ValueError(f"Unknown person type '{person_type}'")
|
the-stack_106_21168
|
#!/usr/bin/env python
__all__ = ['baomihua_download', 'baomihua_download_by_id']
from ..common import *
import urllib
def baomihua_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html('http://play.baomihua.com/getvideourl.aspx?flvid=%s&devicetype=phone_app' % id)
host = r1(r'host=([^&]*)', html)
assert host
type = r1(r'videofiletype=([^&]*)', html)
assert type
vid = r1(r'&stream_name=([^&]*)', html)
assert vid
url = "http://%s/pomoho_video/%s.%s" % (host, vid, type)
_, ext, size = url_info(url)
print_gui_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge)
def baomihua_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
html = get_html(url)
title = r1(r'<title>(.*)</title>', html)
assert title
id = r1(r'flvid\s*=\s*(\d+)', html)
assert id
baomihua_download_by_id(id, title, output_dir=output_dir, merge=merge, info_only=info_only)
site_info = "baomihua.com"
download = baomihua_download
download_playlist = playlist_not_supported('baomihua')
|
the-stack_106_21170
|
# coding: utf-8
"""Tasks for the ISO generation.
This module handles the creation of the final ISO, which involves:
- creating the ISO's root
- populating the ISO's tree
- creating the ISO
- computing the ISO's checksum
Overview:
┌───────────────┐
┌──────>│ images │────────┐
│ └───────────────┘ │
│ ┌───────────────┐ │
┌────────┐ ╱───>│ packaging │────╲ v
┌───────┐ │ │╱ └───────────────┘ ┌─────────┐ ┌──────────┐
│ mkdir │──────>│populate│ │ mkisofs │───>│ checksum │
└───────┘ │ │╲ ┌───────────────┐ └─────────┘ └──────────┘
└────────┘ ╲───>│ salt_tree │────╱ ^
│ └───────────────┘ │
│ ┌───────────────┐ │
├──────>│ iso_tree │────────┤
│ └───────────────┘ │
│ ┌───────────────┐ │
└──────>│ documentation │────────┘
└───────────────┘
"""
import datetime as dt
import socket
import subprocess
from pathlib import Path
from typing import Iterator, List, Tuple, Union
import doit # type: ignore
from buildchain import config
from buildchain import constants
from buildchain import coreutils
from buildchain import targets as helper
from buildchain import types
from buildchain import utils
from buildchain import versions
ISO_FILE : Path = config.BUILD_ROOT/'{}.iso'.format(config.PROJECT_NAME.lower())
FILE_TREES : Tuple[helper.FileTree, ...] = (
helper.FileTree(
basename='_iso_add_tree',
files=(
Path('examples/new-node.yaml'),
Path('examples/new-node_vagrant.yaml'),
Path('examples/prometheus-sparse.yaml'),
),
destination_directory=constants.ISO_ROOT,
task_dep=['_iso_mkdir_root']
),
helper.FileTree(
basename='_iso_add_tree',
files=(
Path('common.sh'),
Path('iso-manager.sh'),
Path('solutions.sh'),
helper.TemplateFile(
task_name='downgrade.sh',
source=constants.ROOT/'scripts'/'downgrade.sh.in',
destination=constants.ISO_ROOT/'downgrade.sh',
context={'VERSION': versions.VERSION},
file_dep=[versions.VERSION_FILE],
task_dep=['_iso_mkdir_root'],
),
helper.TemplateFile(
task_name='upgrade.sh',
source=constants.ROOT/'scripts'/'upgrade.sh.in',
destination=constants.ISO_ROOT/'upgrade.sh',
context={'VERSION': versions.VERSION},
file_dep=[versions.VERSION_FILE],
task_dep=['_iso_mkdir_root'],
),
helper.TemplateFile(
task_name='bootstrap.sh',
source=constants.ROOT/'scripts'/'bootstrap.sh.in',
destination=constants.ISO_ROOT/'bootstrap.sh',
context={
'VERSION': versions.VERSION,
'SALT_VERSION': versions.SALT_VERSION
},
file_dep=[versions.VERSION_FILE],
task_dep=['_iso_mkdir_root'],
),
helper.TemplateFile(
task_name='backup.sh',
source=constants.ROOT/'scripts'/'backup.sh.in',
destination=constants.ISO_ROOT/'backup.sh',
context={'VERSION': versions.VERSION},
file_dep=[versions.VERSION_FILE],
task_dep=['_iso_mkdir_root'],
),
helper.TemplateFile(
task_name='restore.sh',
source=constants.ROOT/'scripts'/'restore.sh.in',
destination=constants.ISO_ROOT/'restore.sh',
context={
'VERSION': versions.VERSION,
'SALT_VERSION': versions.SALT_VERSION
},
file_dep=[versions.VERSION_FILE],
task_dep=['_iso_mkdir_root'],
),
helper.SerializedData(
task_name='product.txt',
destination=constants.ISO_ROOT/'product.txt',
data={
'NAME': config.PROJECT_NAME,
'VERSION': versions.VERSION,
'SHORT_VERSION': versions.SHORT_VERSION,
'GIT': constants.GIT_REF or '',
'DEVELOPMENT_RELEASE':
'1' if versions.VERSION_SUFFIX == '-dev' else '0',
'BUILD_TIMESTAMP':
dt.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'BUILD_HOST': socket.gethostname(),
},
renderer=helper.Renderer.ENV,
file_dep=[versions.VERSION_FILE],
task_dep=['_iso_mkdir_root'],
# False because we include the build timestamp.
uptodate=[False],
),
),
destination_directory=constants.ISO_ROOT,
source_prefix=Path('scripts'),
task_dep=['_iso_mkdir_root']
)
)
def task_iso() -> types.TaskDict:
"""Build the MetalK8s image."""
return {
'actions': None,
'task_dep': [
'_iso_mkdir_root',
'populate_iso',
'_iso_build',
'_iso_digest',
],
}
def task__iso_mkdir_root() -> types.TaskDict:
"""Create the ISO root directory."""
return helper.Mkdir(
directory=constants.ISO_ROOT, task_dep=['_build_root']
).task
def task_populate_iso() -> types.TaskDict:
"""Populate the ISO_ROOT with required files."""
return {
'basename': 'populate_iso',
'actions': None,
'doc': 'Populate {} with required files.'.format(
utils.build_relpath(constants.ISO_ROOT)
),
# Aggregate here the tasks that put files into ISO_ROOT.
'task_dep': [
'_iso_mkdir_root',
'_iso_add_tree',
'images',
'salt_tree',
'packaging',
'documentation',
],
}
def task__iso_add_tree() -> Iterator[types.TaskDict]:
"""Deploy an ISO sub-tree"""
for file_tree in FILE_TREES:
yield from file_tree.execution_plan
@doit.create_after(executed='populate_iso') # type: ignore
def task__iso_build() -> types.TaskDict:
"""Create the ISO from the files in ISO_ROOT."""
def mkisofs() -> None:
"""Create an ISO file (delete on error)."""
cmd : List[Union[str, Path]] = [
config.ExtCommand.MKISOFS.value, '-output', ISO_FILE,
'-quiet',
'-rock',
'-joliet',
'-joliet-long',
'-full-iso9660-filenames',
'-volid', '{} {}'.format(config.PROJECT_NAME, versions.VERSION),
'--iso-level', '3',
'-gid', '0',
'-uid', '0',
'-input-charset', 'utf-8',
'-output-charset', 'utf-8',
constants.ISO_ROOT
]
try:
subprocess.run(cmd, check=True)
except:
utils.unlink_if_exist(ISO_FILE)
raise
doc = 'Create the ISO from the files in {}.'.format(
utils.build_relpath(constants.ISO_ROOT)
)
# Every file used for the ISO is a dependency.
depends = list(coreutils.ls_files_rec(constants.ISO_ROOT))
depends.append(versions.VERSION_FILE)
return {
'title': utils.title_with_target1('MKISOFS'),
'doc': doc,
'actions': [mkisofs],
'targets': [ISO_FILE],
'file_dep': depends,
'task_dep': ['check_for:mkisofs', '_build_root', '_iso_mkdir_root'],
'clean': True,
}
def task__iso_digest() -> types.TaskDict:
"""Compute the SHA256 digest of the ISO."""
return helper.Sha256Sum(
input_files=[ISO_FILE],
output_file=config.BUILD_ROOT/'SHA256SUM',
task_dep=['_iso_build']
).task
__all__ = utils.export_only_tasks(__name__)
|
the-stack_106_21172
|
"""
@Author : Ailitonia
@Date : 2021/06/01 22:28
@FileName : monitor.py
@Project : nonebot2_miya
@Description : Pixiv User Monitor
@GitHub : https://github.com/Ailitonia
@Software : PyCharm
"""
import asyncio
import random
from nonebot import logger, require, get_bots, get_driver
from nonebot.adapters.cqhttp import MessageSegment, Message
from omega_miya.database import DBSubscription, DBPixivUserArtwork
from omega_miya.utils.pixiv_utils import PixivUser, PixivIllust
from omega_miya.utils.omega_plugin_utils import MsgSender, PicEffector, PicEncoder, ProcessUtils
from .config import Config
__global_config = get_driver().config
plugin_config = Config(**__global_config.dict())
ENABLE_CHECK_POOL_MODE = plugin_config.enable_check_pool_mode
# 检查队列
CHECKING_POOL = []
# 启用检查Pixiv用户作品状态的定时任务
scheduler = require("nonebot_plugin_apscheduler").scheduler
# 创建用于更新数据库里面画师名称的定时任务
@scheduler.scheduled_job(
'cron',
# year=None,
# month=None,
# day='*/1',
# week=None,
# day_of_week=None,
hour='1',
minute='15',
second='50',
# start_date=None,
# end_date=None,
# timezone=None,
id='pixiv_user_db_upgrade',
coalesce=True,
misfire_grace_time=60
)
async def dynamic_db_upgrade():
logger.debug('pixiv_user_db_upgrade: started upgrade subscription info')
sub_res = await DBSubscription.list_sub_by_type(sub_type=9)
for sub_id in sub_res.result:
sub = DBSubscription(sub_type=9, sub_id=sub_id)
user_info_result = await PixivUser(uid=int(sub_id)).get_info()
if user_info_result.error:
logger.error(f'pixiv_user_db_upgrade: 获取用户信息失败, uid: {sub_id}, error: {user_info_result.info}')
continue
user_name = user_info_result.result.get('name')
_res = await sub.add(up_name=user_name, live_info='Pixiv用户作品订阅')
if not _res.success():
logger.error(f'pixiv_user_db_upgrade: 更新用户信息失败, uid: {sub_id}, error: {_res.info}')
continue
logger.debug('pixiv_user_db_upgrade: upgrade subscription info completed')
# 创建Pixiv用户作品检查函数
@scheduler.scheduled_job(
'cron',
# year=None,
# month=None,
# day='*/1',
# week=None,
# day_of_week=None,
# hour=None,
minute='*/5',
# second='*/30',
# start_date=None,
# end_date=None,
# timezone=None,
id='pixiv_user_artwork_monitor',
coalesce=True,
misfire_grace_time=30
)
async def pixiv_user_artwork_monitor():
logger.debug(f"pixiv_user_artwork_monitor: checking started")
# 获取当前bot列表
bots = [bot for bot_id, bot in get_bots().items()]
# 获取订阅表中的所有Pixiv用户订阅
sub_res = await DBSubscription.list_sub_by_type(sub_type=9)
check_sub = [int(x) for x in sub_res.result]
if not check_sub:
logger.debug(f'pixiv_user_artwork_monitor: no dynamic subscription, ignore.')
return
# 注册一个异步函数用于检查Pixiv用户作品
async def check_user_artwork(user_id: int):
# 获取pixiv用户作品内容
user_artwork_result = await PixivUser(uid=user_id).get_artworks_info()
if user_artwork_result.error:
logger.error(f'pixiv_user_artwork_monitor: 获取用户 {user_id} 作品失败, error: {user_artwork_result.info}')
all_artwork_list = user_artwork_result.result.get('illust_list')
manga_list = user_artwork_result.result.get('manga_list')
all_artwork_list.extend(manga_list)
# 用户所有的作品id
exist_artwork_result = await DBPixivUserArtwork.list_artwork_by_uid(uid=user_id)
if exist_artwork_result.error:
logger.error(f'pixiv_user_artwork_monitor: 获取用户 {user_id} 已有作品失败, error: {exist_artwork_result.info}')
return
exist_artwork_list = [int(x) for x in exist_artwork_result.result]
new_artwork = [pid for pid in all_artwork_list if pid not in exist_artwork_list]
subscription = DBSubscription(sub_type=9, sub_id=str(user_id))
for pid in new_artwork:
illust = PixivIllust(pid=pid)
illust_info_result = await illust.get_illust_data()
if illust_info_result.error:
logger.error(f'pixiv_user_artwork_monitor: 获取用户 {user_id} 作品 {pid} 信息失败, '
f'error: {illust_info_result.info}')
continue
uname = illust_info_result.result.get('uname')
title = illust_info_result.result.get('title')
is_r18 = illust_info_result.result.get('is_r18')
# 下载图片
illust_info_msg_result = await illust.get_format_info_msg()
illust_pic_bytes_result = await illust.load_illust_pic()
if illust_pic_bytes_result.error or illust_info_msg_result.error:
logger.error(f'pixiv_user_artwork_monitor: 下载用户 {user_id} 作品 {pid} 失败, '
f'error: {illust_info_msg_result.info} // {illust_pic_bytes_result.info}.')
continue
if is_r18:
# 添加图片处理模块, 模糊后发送
blur_img_result = await PicEffector(image=illust_pic_bytes_result.result).gaussian_blur()
b64_img_result = await PicEncoder.bytes_to_file(
image=blur_img_result.result, folder_flag='pixiv_monitor')
img_seg = MessageSegment.image(b64_img_result.result)
else:
b64_img_result = await PicEncoder.bytes_to_file(
image=illust_pic_bytes_result.result, folder_flag='pixiv_monitor')
img_seg = MessageSegment.image(b64_img_result.result)
intro_msg = f'【Pixiv】{uname}发布了新的作品!\n\n'
info_msg = illust_info_msg_result.result
msg = Message(intro_msg).append(img_seg).append(info_msg)
# 向群组和好友推送消息
for _bot in bots:
msg_sender = MsgSender(bot=_bot, log_flag='PixivUserArtworkNotice')
await msg_sender.safe_broadcast_groups_subscription(subscription=subscription, message=msg)
# await msg_sender.safe_broadcast_friends_subscription(subscription=subscription, message=msg)
# 更新作品内容到数据库
pixiv_user_artwork = DBPixivUserArtwork(pid=pid, uid=user_id)
_res = await pixiv_user_artwork.add(uname=uname, title=title)
if _res.success():
logger.info(f'向数据库写入pixiv用户作品信息: {pid} 成功')
else:
logger.error(f'向数据库写入pixiv用户作品信息: {pid} 失败, error: {_res.info}')
# 启用了检查池模式
if ENABLE_CHECK_POOL_MODE:
global CHECKING_POOL
# checking_pool为空则上一轮检查完了, 重新往里面放新一轮的uid
if not CHECKING_POOL:
CHECKING_POOL.extend(check_sub)
# 看下checking_pool里面还剩多少
waiting_num = len(CHECKING_POOL)
# 默认单次检查并发数为50, 默认检查间隔为5min
logger.debug(f'Pixiv user artwork checker pool mode debug info, Before checking_pool: {CHECKING_POOL}')
if waiting_num >= 50:
# 抽取检查对象
now_checking = random.sample(CHECKING_POOL, k=50)
# 更新checking_pool
CHECKING_POOL = [x for x in CHECKING_POOL if x not in now_checking]
else:
now_checking = CHECKING_POOL.copy()
CHECKING_POOL.clear()
logger.debug(f'Pixiv user artwork checker pool mode debug info, After checking_pool: {CHECKING_POOL}')
logger.debug(f'Pixiv user artwork checker pool mode debug info, now_checking: {now_checking}')
# 检查now_checking里面的直播间(异步)
tasks = []
for uid in now_checking:
tasks.append(check_user_artwork(user_id=uid))
try:
await asyncio.gather(*tasks)
logger.debug(f"pixiv_user_artwork_monitor: pool mode enable, checking completed, "
f"checked: {', '.join([str(x) for x in now_checking])}.")
except Exception as e:
logger.error(f'pixiv_user_artwork_monitor: error occurred in checking {repr(e)}')
# 没有启用检查池模式
else:
# 检查所有在订阅表里面的画师作品(异步)
tasks = []
for uid in check_sub:
tasks.append(check_user_artwork(user_id=uid))
try:
await asyncio.gather(*tasks)
logger.debug(f"pixiv_user_artwork_monitor: pool mode disable, checking completed, "
f"checked: {', '.join([str(x) for x in check_sub])}.")
except Exception as e:
logger.error(f'pixiv_user_artwork_monitor: error occurred in checking {repr(e)}')
# 用于首次订阅时刷新数据库信息
async def init_new_add_sub(user_id: int):
# 暂停计划任务避免中途检查更新
scheduler.pause()
try:
# 获取pixiv用户作品内容
user_artwork_result = await PixivUser(uid=user_id).get_artworks_info()
if user_artwork_result.error:
logger.error(f'init_new_add_sub: 获取用户 {user_id} 作品失败, error: {user_artwork_result.info}')
all_artwork_list = user_artwork_result.result.get('illust_list')
manga_list = user_artwork_result.result.get('manga_list')
all_artwork_list.extend(manga_list)
async def _handle(pid_: int):
illust = PixivIllust(pid=pid_)
illust_info_result = await illust.get_illust_data()
if illust_info_result.error:
logger.error(f'init_new_add_sub: 获取用户 {user_id} 作品 {pid_} 信息失败, error: {illust_info_result.info}')
return
uname = illust_info_result.result.get('uname')
title = illust_info_result.result.get('title')
# 更新作品内容到数据库
pixiv_user_artwork = DBPixivUserArtwork(pid=pid_, uid=user_id)
_res = await pixiv_user_artwork.add(uname=uname, title=title)
if _res.success():
logger.debug(f'向数据库写入pixiv用户作品信息: {pid_} 成功')
else:
logger.error(f'向数据库写入pixiv用户作品信息: {pid_} 失败, error: {_res.info}')
# 开始导入操作
# 全部一起并发网络撑不住, 做适当切分
tasks = [_handle(pid_=pid) for pid in all_artwork_list]
await ProcessUtils.fragment_process(tasks=tasks, fragment_size=50, log_flag='Init Pixiv User Illust')
logger.info(f'初始化pixiv用户 {user_id} 作品完成, 已将作品信息写入数据库.')
except Exception as e:
logger.error(f'初始化pixiv用户 {user_id} 作品发生错误, error: {repr(e)}.')
scheduler.resume()
__all__ = [
'scheduler',
'init_new_add_sub'
]
|
the-stack_106_21173
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from collections import OrderedDict
from fairseq import utils
from fairseq.data import (
BacktranslationDataset,
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
data_utils,
indexed_dataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.sequence_generator_rl import SequenceGenerator
from . import register_task
from .multilingual_translation import MultilingualTranslationTask
logger = logging.getLogger(__name__)
def _get_bt_dataset_key(lang_pair):
return "bt:" + lang_pair
def _get_denoising_dataset_key(lang_pair):
return "denoising:" + lang_pair
# ported from UnsupervisedMT
def parse_lambda_config(x):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
split = x.split(",")
if len(split) == 1:
return float(x), None
else:
split = [s.split(os.pathsep) for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(
int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1)
)
return float(split[0][1]), [(int(k), float(v)) for k, v in split]
@register_task("semisupervised_translation")
class SemisupervisedTranslationTask(MultilingualTranslationTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, instead of `--lang-pairs`.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
MultilingualTranslationTask.add_args(parser)
parser.add_argument('--lambda-parallel-config', default="1.0", type=str, metavar='CONFIG',
help='cross-entropy reconstruction coefficient (parallel data). '
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-denoising-config', default="0.0", type=str, metavar='CONFIG',
help='Cross-entropy reconstruction coefficient (denoising autoencoding)'
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-otf-bt-config', default="0.0", type=str, metavar='CONFIG',
help='cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)'
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--bt-max-len-a', default=1.1, type=float, metavar='N',
help='generate back-translated sequences of maximum length ax + b, where x is the '
'source length')
parser.add_argument('--bt-max-len-b', default=10.0, type=float, metavar='N',
help='generate back-translated sequences of maximum length ax + b, where x is the '
'source length')
parser.add_argument('--bt-beam-size', default=1, type=int, metavar='N',
help='beam size used in beam search of online back-translation')
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
help='word blanking probability for denoising autoencoding data generation')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args, dicts, training)
self.lambda_parallel, self.lambda_parallel_steps = parse_lambda_config(
args.lambda_parallel_config
)
self.lambda_otf_bt, self.lambda_otf_bt_steps = parse_lambda_config(
args.lambda_otf_bt_config
)
self.lambda_denoising, self.lambda_denoising_steps = parse_lambda_config(
args.lambda_denoising_config
)
if self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None:
denoising_lang_pairs = [
"%s-%s" % (tgt, tgt)
for tgt in {lang_pair.split("-")[1] for lang_pair in args.lang_pairs}
]
self.model_lang_pairs = self.model_lang_pairs + denoising_lang_pairs
self.backtranslate_datasets = {}
self.backtranslators = {}
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = MultilingualTranslationTask.prepare(args, **kwargs)
return cls(args, dicts, training)
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
def split_exists(split, src, tgt, lang):
if src is not None:
filename = os.path.join(
data_path, "{}.{}-{}.{}".format(split, src, tgt, lang)
)
else:
filename = os.path.join(
data_path, "{}.{}-None.{}".format(split, src, tgt)
)
return indexed_dataset.dataset_exists(filename, impl=self.args.dataset_impl)
def load_indexed_dataset(path, dictionary):
return data_utils.load_indexed_dataset(
path, dictionary, self.args.dataset_impl
)
# load parallel datasets
src_datasets, tgt_datasets = {}, {}
if (
self.lambda_parallel > 0.0
or self.lambda_parallel_steps is not None
or not split.startswith("train")
):
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split("-")
if split_exists(split, src, tgt, src):
prefix = os.path.join(
data_path, "{}.{}-{}.".format(split, src, tgt)
)
elif split_exists(split, tgt, src, src):
prefix = os.path.join(
data_path, "{}.{}-{}.".format(split, tgt, src)
)
else:
continue
src_datasets[lang_pair] = load_indexed_dataset(
prefix + src, self.dicts[src]
)
tgt_datasets[lang_pair] = load_indexed_dataset(
prefix + tgt, self.dicts[tgt]
)
logger.info(
"parallel-{} {} {} examples".format(
data_path, split, len(src_datasets[lang_pair])
)
)
if len(src_datasets) == 0:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
# back translation datasets
backtranslate_datasets = {}
if (
self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None
) and split.startswith("train"):
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split("-")
if not split_exists(split, tgt, None, tgt):
raise FileNotFoundError(
"Dataset not found: backtranslation {} ({})".format(
split, data_path
)
)
filename = os.path.join(
data_path, "{}.{}-None.{}".format(split, tgt, tgt)
)
dataset = load_indexed_dataset(filename, self.dicts[tgt])
lang_pair_dataset_tgt = LanguagePairDataset(
dataset,
dataset.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
lang_pair_dataset = LanguagePairDataset(
dataset,
dataset.sizes,
src_dict=self.dicts[src],
tgt=dataset,
tgt_sizes=dataset.sizes,
tgt_dict=self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
backtranslate_datasets[lang_pair] = BacktranslationDataset(
tgt_dataset=self.alter_dataset_langtok(
lang_pair_dataset_tgt,
src_eos=self.dicts[tgt].eos(),
src_lang=tgt,
tgt_lang=src,
),
backtranslation_fn=self.backtranslators[lang_pair],
src_dict=self.dicts[src],
tgt_dict=self.dicts[tgt],
output_collater=self.alter_dataset_langtok(
lang_pair_dataset=lang_pair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
).collater,
)
logger.info(
"backtranslate-{}: {} {} {} examples".format(
tgt,
data_path,
split,
len(backtranslate_datasets[lang_pair]),
)
)
self.backtranslate_datasets[lang_pair] = backtranslate_datasets[
lang_pair
]
# denoising autoencoder
noising_datasets = {}
if (
self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None
) and split.startswith("train"):
for lang_pair in self.lang_pairs:
_, tgt = lang_pair.split("-")
if not split_exists(split, tgt, None, tgt):
continue
filename = os.path.join(
data_path, "{}.{}-None.{}".format(split, tgt, tgt)
)
tgt_dataset1 = load_indexed_dataset(filename, self.dicts[tgt])
tgt_dataset2 = load_indexed_dataset(filename, self.dicts[tgt])
noising_dataset = NoisingDataset(
tgt_dataset1,
self.dicts[tgt],
seed=1,
max_word_shuffle_distance=self.args.max_word_shuffle_distance,
word_dropout_prob=self.args.word_dropout_prob,
word_blanking_prob=self.args.word_blanking_prob,
)
noising_datasets[lang_pair] = self.alter_dataset_langtok(
LanguagePairDataset(
noising_dataset,
tgt_dataset1.sizes,
self.dicts[tgt],
tgt_dataset2,
tgt_dataset2.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
),
src_eos=self.dicts[tgt].eos(),
src_lang=tgt,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
logger.info(
"denoising-{}: {} {} {} examples".format(
tgt,
data_path,
split,
len(noising_datasets[lang_pair]),
)
)
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split("-")
src_dataset, tgt_dataset = src_datasets[lang_pair], tgt_datasets[lang_pair]
return self.alter_dataset_langtok(
LanguagePairDataset(
src_dataset,
src_dataset.sizes,
self.dicts[src],
tgt_dataset,
tgt_dataset.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
),
self.dicts[src].eos(),
src,
self.dicts[tgt].eos(),
tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict(
[
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in src_datasets.keys()
]
+ [
(_get_bt_dataset_key(lang_pair), dataset)
for lang_pair, dataset in backtranslate_datasets.items()
]
+ [
(_get_denoising_dataset_key(lang_pair), dataset)
for lang_pair, dataset in noising_datasets.items()
]
),
eval_key=None
if self.training
else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_model(self, args, from_checkpoint=False):
from fairseq import models
model = models.build_model(args, self, from_checkpoint)
if not isinstance(model, FairseqMultiModel):
raise ValueError(
"SemisupervisedTranslationTask requires a FairseqMultiModel architecture"
)
# create SequenceGenerator for each model that has backtranslation dependency on it
self.sequence_generators = {}
if (
self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None
) and self.training:
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split("-")
key = "{}-{}".format(tgt, src)
self.sequence_generators[key] = SequenceGenerator(
[model.models[key]],
tgt_dict=self.dicts[src],
beam_size=args.bt_beam_size,
max_len_a=args.bt_max_len_a,
max_len_b=args.bt_max_len_b,
)
decoder_lang_tok_idx = self.get_decoder_langtok(src)
def backtranslate_fn(
sample,
model=model.models[key],
bos_token=decoder_lang_tok_idx,
sequence_generator=self.sequence_generators[key],
):
return sequence_generator.generate(
[model],
sample,
bos_token=bos_token,
)
self.backtranslators[lang_pair] = backtranslate_fn
return model
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
if update_num > 0:
self.update_step(update_num)
agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, {}
def forward_backward(model, samples, logging_output_key, weight):
nonlocal agg_loss, agg_sample_size, agg_logging_output
if samples is None or len(samples) == 0:
return
loss, sample_size, logging_output = criterion(model, samples)
if ignore_grad:
loss *= 0
else:
loss *= weight
optimizer.backward(loss)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[logging_output_key] += logging_output[k]
if self.lambda_parallel > 0.0:
for lang_pair in self.lang_pairs:
forward_backward(
model.models[lang_pair],
sample[lang_pair],
lang_pair,
self.lambda_parallel,
)
if self.lambda_otf_bt > 0.0:
for lang_pair in self.lang_pairs:
sample_key = _get_bt_dataset_key(lang_pair)
forward_backward(
model.models[lang_pair],
sample[sample_key],
sample_key,
self.lambda_otf_bt,
)
if self.lambda_denoising > 0.0:
for lang_pair in self.lang_pairs:
_, tgt = lang_pair.split("-")
sample_key = _get_denoising_dataset_key(lang_pair)
forward_backward(
model.models["{0}-{0}".format(tgt)],
sample[sample_key],
sample_key,
self.lambda_denoising,
)
return agg_loss, agg_sample_size, agg_logging_output
def update_step(self, num_updates):
def lambda_step_func(config, n_iter):
"""
Update a lambda value according to its schedule configuration.
"""
ranges = [
i
for i in range(len(config) - 1)
if config[i][0] <= n_iter < config[i + 1][0]
]
if len(ranges) == 0:
assert n_iter >= config[-1][0]
return config[-1][1]
assert len(ranges) == 1
i = ranges[0]
x_a, y_a = config[i]
x_b, y_b = config[i + 1]
return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
if self.lambda_parallel_steps is not None:
self.lambda_parallel = lambda_step_func(
self.lambda_parallel_steps, num_updates
)
if self.lambda_denoising_steps is not None:
self.lambda_denoising = lambda_step_func(
self.lambda_denoising_steps, num_updates
)
if self.lambda_otf_bt_steps is not None:
self.lambda_otf_bt = lambda_step_func(self.lambda_otf_bt_steps, num_updates)
|
the-stack_106_21176
|
"""
nodal_averaged_equivalent_elastic_strain
========================================
"""
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs, _modify_output_spec_with_one_type
from ansys.dpf.core.operators.specification import PinSpecification, Specification
"""Operators from mapdlOperatorsCore plugin, from "result" category
"""
class nodal_averaged_equivalent_elastic_strain(Operator):
"""Read nodal averaged equivalent elastic strain as averaged nodal result from rst file.
available inputs:
- time_scoping (Scoping, list) (optional)
- mesh_scoping (ScopingsContainer, Scoping, list) (optional)
- fields_container (FieldsContainer) (optional)
- streams_container (StreamsContainer, Stream) (optional)
- data_sources (DataSources)
- mesh (MeshedRegion) (optional)
available outputs:
- fields_container (FieldsContainer)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain()
>>> # Make input connections
>>> my_time_scoping = dpf.Scoping()
>>> op.inputs.time_scoping.connect(my_time_scoping)
>>> my_mesh_scoping = dpf.ScopingsContainer()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> my_streams_container = dpf.StreamsContainer()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> my_data_sources = dpf.DataSources()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain(time_scoping=my_time_scoping,mesh_scoping=my_mesh_scoping,fields_container=my_fields_container,streams_container=my_streams_container,data_sources=my_data_sources,mesh=my_mesh)
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()"""
def __init__(self, time_scoping=None, mesh_scoping=None, fields_container=None, streams_container=None, data_sources=None, mesh=None, config=None, server=None):
super().__init__(name="mapdl::rst::NPEL_EQV", config = config, server = server)
self._inputs = InputsNodalAveragedEquivalentElasticStrain(self)
self._outputs = OutputsNodalAveragedEquivalentElasticStrain(self)
if time_scoping !=None:
self.inputs.time_scoping.connect(time_scoping)
if mesh_scoping !=None:
self.inputs.mesh_scoping.connect(mesh_scoping)
if fields_container !=None:
self.inputs.fields_container.connect(fields_container)
if streams_container !=None:
self.inputs.streams_container.connect(streams_container)
if data_sources !=None:
self.inputs.data_sources.connect(data_sources)
if mesh !=None:
self.inputs.mesh.connect(mesh)
@staticmethod
def _spec():
spec = Specification(description="""Read nodal averaged equivalent elastic strain as averaged nodal result from rst file.""",
map_input_pin_spec={
0 : PinSpecification(name = "time_scoping", type_names=["scoping","vector<int32>"], optional=True, document=""""""),
1 : PinSpecification(name = "mesh_scoping", type_names=["scopings_container","scoping","vector<int32>"], optional=True, document=""""""),
2 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=True, document="""FieldsContainer already allocated modified inplace"""),
3 : PinSpecification(name = "streams_container", type_names=["streams_container","stream"], optional=True, document="""Streams containing the result file."""),
4 : PinSpecification(name = "data_sources", type_names=["data_sources"], optional=False, document="""data sources containing the result file."""),
7 : PinSpecification(name = "mesh", type_names=["abstract_meshed_region"], optional=True, document="""""")},
map_output_pin_spec={
0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""FieldsContainer filled in""")})
return spec
@staticmethod
def default_config():
return Operator.default_config(name = "mapdl::rst::NPEL_EQV")
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsNodalAveragedEquivalentElasticStrain
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsNodalAveragedEquivalentElasticStrain
"""
return super().outputs
#internal name: mapdl::rst::NPEL_EQV
#scripting name: nodal_averaged_equivalent_elastic_strain
class InputsNodalAveragedEquivalentElasticStrain(_Inputs):
"""Intermediate class used to connect user inputs to nodal_averaged_equivalent_elastic_strain operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain()
>>> my_time_scoping = dpf.Scoping()
>>> op.inputs.time_scoping.connect(my_time_scoping)
>>> my_mesh_scoping = dpf.ScopingsContainer()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> my_streams_container = dpf.StreamsContainer()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> my_data_sources = dpf.DataSources()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> my_mesh = dpf.MeshedRegion()
>>> op.inputs.mesh.connect(my_mesh)
"""
def __init__(self, op: Operator):
super().__init__(nodal_averaged_equivalent_elastic_strain._spec().inputs, op)
self._time_scoping = Input(nodal_averaged_equivalent_elastic_strain._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._time_scoping)
self._mesh_scoping = Input(nodal_averaged_equivalent_elastic_strain._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._mesh_scoping)
self._fields_container = Input(nodal_averaged_equivalent_elastic_strain._spec().input_pin(2), 2, op, -1)
self._inputs.append(self._fields_container)
self._streams_container = Input(nodal_averaged_equivalent_elastic_strain._spec().input_pin(3), 3, op, -1)
self._inputs.append(self._streams_container)
self._data_sources = Input(nodal_averaged_equivalent_elastic_strain._spec().input_pin(4), 4, op, -1)
self._inputs.append(self._data_sources)
self._mesh = Input(nodal_averaged_equivalent_elastic_strain._spec().input_pin(7), 7, op, -1)
self._inputs.append(self._mesh)
@property
def time_scoping(self):
"""Allows to connect time_scoping input to the operator
Parameters
----------
my_time_scoping : Scoping, list,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain()
>>> op.inputs.time_scoping.connect(my_time_scoping)
>>> #or
>>> op.inputs.time_scoping(my_time_scoping)
"""
return self._time_scoping
@property
def mesh_scoping(self):
"""Allows to connect mesh_scoping input to the operator
Parameters
----------
my_mesh_scoping : ScopingsContainer, Scoping, list,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain()
>>> op.inputs.mesh_scoping.connect(my_mesh_scoping)
>>> #or
>>> op.inputs.mesh_scoping(my_mesh_scoping)
"""
return self._mesh_scoping
@property
def fields_container(self):
"""Allows to connect fields_container input to the operator
- pindoc: FieldsContainer already allocated modified inplace
Parameters
----------
my_fields_container : FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> #or
>>> op.inputs.fields_container(my_fields_container)
"""
return self._fields_container
@property
def streams_container(self):
"""Allows to connect streams_container input to the operator
- pindoc: Streams containing the result file.
Parameters
----------
my_streams_container : StreamsContainer, Stream,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> #or
>>> op.inputs.streams_container(my_streams_container)
"""
return self._streams_container
@property
def data_sources(self):
"""Allows to connect data_sources input to the operator
- pindoc: data sources containing the result file.
Parameters
----------
my_data_sources : DataSources,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> #or
>>> op.inputs.data_sources(my_data_sources)
"""
return self._data_sources
@property
def mesh(self):
"""Allows to connect mesh input to the operator
Parameters
----------
my_mesh : MeshedRegion,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain()
>>> op.inputs.mesh.connect(my_mesh)
>>> #or
>>> op.inputs.mesh(my_mesh)
"""
return self._mesh
class OutputsNodalAveragedEquivalentElasticStrain(_Outputs):
"""Intermediate class used to get outputs from nodal_averaged_equivalent_elastic_strain operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(nodal_averaged_equivalent_elastic_strain._spec().outputs, op)
self._fields_container = Output(nodal_averaged_equivalent_elastic_strain._spec().output_pin(0), 0, op)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
- pindoc: FieldsContainer filled in
Returns
----------
my_fields_container : FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.result.nodal_averaged_equivalent_elastic_strain()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
return self._fields_container
|
the-stack_106_21178
|
'''
URL: https://leetcode.com/problems/find-peak-element
Time complexity: O(logn)
Space complexity: O(1)
'''
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
start = 0
end = len(nums) - 1
while start <= end:
mid = (start + end) // 2
lower = max(0, mid-1)
higher = min(len(nums)-1, mid+1)
if nums[lower] < nums[mid] > nums[higher]:
return mid
elif mid == 0 and nums[mid] > nums[higher]:
return mid
elif mid == len(nums) - 1 and nums[mid] > nums[lower]:
return mid
elif nums[lower] < nums[mid] < nums[higher]:
start = mid + 1
elif nums[lower] > nums[mid] > nums[higher]:
end = mid - 1
else:
start = mid + 1
return 0
|
the-stack_106_21179
|
#!/usr/bin/env python
"""
Get twinpy strucuture
"""
import argparse
import numpy as np
from pymatgen.io.vasp import Poscar
from twinpy.properties.hexagonal import (get_hexagonal_lattice_from_a_c,
get_wyckoff_from_hcp)
from twinpy.interfaces.pymatgen import get_cell_from_pymatgen_structure
from twinpy.api_twinpy import Twinpy
from twinpy.file_io import write_poscar
# argparse
def get_argparse():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--structure', type=str,
help="'shear' or 'twinboundary'")
parser.add_argument('-r', '--shear_strain_ratio', type=float, default=0.,
help="shear strain ratio")
parser.add_argument('--twinmode', type=str,
help="twinmode")
parser.add_argument('--twintype', type=int, default=None,
help="twintype, when you specify this, \
twin boundary mode is evoked")
parser.add_argument('--xshift', type=float, default=0.,
help="x shift")
parser.add_argument('--yshift', type=float, default=0.,
help="y shift")
parser.add_argument('--dim', type=str, default='1 1 1',
help="dimension")
parser.add_argument('--layers', type=int,
help="layers for twin boundary structure")
parser.add_argument('--delta', type=float, default=0.,
help="delta")
parser.add_argument('--expansion_ratios', type=str, default='1 1 1',
help="expansion_ratios")
parser.add_argument('--no_make_tb_flat', action='store_true',
help="do not project atoms on the twin boundary")
parser.add_argument('-c', '--posfile', default=None,
help="POSCAR file")
parser.add_argument('--get_poscar', action='store_true',
help="get poscar")
parser.add_argument('--get_lattice', action='store_true',
help="get lattice not structure")
parser.add_argument('-o', '--output', default=None,
help="POSCAR filename")
parser.add_argument('--is_primitive', action='store_true',
help="get primitive shear structure")
parser.add_argument('--get_primitive_standardized', action='store_true',
help="get primitive standardized")
parser.add_argument('--get_conventional_standardized', action='store_true',
help="get conventional standardized")
parser.add_argument('--dump', action='store_true',
help="dump twinpy structure object to yaml")
parser.add_argument('--show_nearest_distance', action='store_true',
help="Show nearest atomic distance.")
arguments = parser.parse_args()
return arguments
def _get_output_name(structure, get_lattice, shear_strain_ratio, twinmode):
name = ''
if structure == 'shear':
if np.allclose(shear_strain_ratio, 0.):
name += 'parent'
else:
name += 'shear'
else:
name += 'tb'
name += '_%s' % twinmode
if get_lattice:
name += '_lat.poscar'
else:
name += '.poscar'
return name
def main(structure,
shear_strain_ratio,
twinmode,
twintype,
xshift,
yshift,
dim,
layers,
delta,
expansion_ratios,
no_make_tb_flat,
posfile,
get_poscar,
get_lattice,
output,
is_primitive,
get_primitive_standardized,
get_conventional_standardized,
dump,
show_nearest_distance,
):
move_atoms_into_unitcell = True
symprec = 1e-5
no_idealize = False
no_sort = True
get_sort_list = False
if posfile is None:
print("Warning:")
print(" POSCAR file did not specify")
print(" Set automatically, a=2.93, c=4.65, symbol='Ti', "
"wyckoff='c'")
lattice = get_hexagonal_lattice_from_a_c(a=2.93, c=4.65)
symbol = 'Ti'
wyckoff = 'c'
else:
poscar = Poscar.from_file(posfile)
pmgstructure = poscar.structure
cell = get_cell_from_pymatgen_structure(pmgstructure)
lattice = cell[0]
symbol = cell[2][0]
wyckoff = get_wyckoff_from_hcp(cell)
twinpy = Twinpy(lattice=lattice,
twinmode=twinmode,
symbol=symbol,
wyckoff=wyckoff)
if get_poscar:
if output is None:
output = _get_output_name(structure=structure,
get_lattice=get_lattice,
shear_strain_ratio=shear_strain_ratio,
twinmode=twinmode)
if structure == 'shear':
twinpy.set_shear(xshift=xshift,
yshift=yshift,
dim=dim,
shear_strain_ratio=shear_strain_ratio,
expansion_ratios=expansion_ratios,
is_primitive=is_primitive)
std = twinpy.get_shear_standardize(
get_lattice=get_lattice,
move_atoms_into_unitcell=move_atoms_into_unitcell,
)
else:
make_tb_flat = not no_make_tb_flat
twinpy.set_twinboundary(twintype=twintype,
xshift=xshift,
yshift=yshift,
layers=layers,
delta=delta,
shear_strain_ratio=shear_strain_ratio,
expansion_ratios=expansion_ratios,
make_tb_flat=make_tb_flat)
std = twinpy.get_twinboundary_standardize(
get_lattice=get_lattice,
move_atoms_into_unitcell=move_atoms_into_unitcell,
)
if show_nearest_distance:
from twinpy.structure.twinboundary \
import plot_nearest_atomic_distance_of_twinboundary
plot_nearest_atomic_distance_of_twinboundary(
lattice=lattice,
symbol=symbol,
twinmode=twinmode,
layers=layers,
wyckoff=wyckoff,
delta=delta,
twintype=twintype,
shear_strain_ratio=shear_strain_ratio,
expansion_ratios=expansion_ratios,
make_tb_flat=make_tb_flat,
)
if get_primitive_standardized:
to_primitive = True
elif get_conventional_standardized:
to_primitive = False
else:
to_primitive = None
if to_primitive is None:
out_cell = std.cell
else:
out_cell = std.get_standardized_cell(
to_primitive=to_primitive,
no_idealize=no_idealize,
symprec=symprec,
no_sort=no_sort,
get_sort_list=get_sort_list,
)
if output is not None:
write_poscar(cell=out_cell,
filename=output)
if dump:
twinpy.dump_yaml()
if __name__ == '__main__':
args = get_argparse()
dimension = list(map(int, args.dim.split()))
expand = list(map(float, args.expansion_ratios.split()))
assert args.structure in ['shear', 'twinboundary'], \
"structure must be 'shear' or 'twinboundary'"
main(structure=args.structure,
shear_strain_ratio=args.shear_strain_ratio,
twinmode=args.twinmode,
twintype=args.twintype,
xshift=args.xshift,
yshift=args.yshift,
dim=dimension,
layers=args.layers,
delta=args.delta,
expansion_ratios=expand,
no_make_tb_flat=args.no_make_tb_flat,
posfile=args.posfile,
get_poscar=args.get_poscar,
get_lattice=args.get_lattice,
output=args.output,
is_primitive=args.is_primitive,
get_primitive_standardized=args.get_primitive_standardized,
get_conventional_standardized=args.get_conventional_standardized,
dump=args.dump,
show_nearest_distance=args.show_nearest_distance,
)
|
the-stack_106_21180
|
# Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for OpenStack Cinder DotHill driver."""
from lxml import etree
import mock
import requests
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.volume.drivers.dothill import dothill_client as dothill
from cinder.volume.drivers.dothill import dothill_common
from cinder.volume.drivers.dothill import dothill_fc
from cinder.volume.drivers.dothill import dothill_iscsi
from cinder.zonemanager import utils as fczm_utils
session_key = '12a1626754554a21d85040760c81b'
resp_login = '''<RESPONSE><OBJECT basetype="status" name="status" oid="1">
<PROPERTY name="response-type">success</PROPERTY>
<PROPERTY name="response-type-numeric">0</PROPERTY>
<PROPERTY name="response">12a1626754554a21d85040760c81b</PROPERTY>
<PROPERTY name="return-code">1</PROPERTY></OBJECT></RESPONSE>'''
resp_badlogin = '''<RESPONSE><OBJECT basetype="status" name="status" oid="1">
<PROPERTY name="response-type">error</PROPERTY>
<PROPERTY name="response-type-numeric">1</PROPERTY>
<PROPERTY name="response">Authentication failure</PROPERTY>
<PROPERTY name="return-code">1</PROPERTY></OBJECT></RESPONSE>'''
response_ok = '''<RESPONSE><OBJECT basetype="status" name="status" oid="1">
<PROPERTY name="response">some data</PROPERTY>
<PROPERTY name="return-code">0</PROPERTY>
</OBJECT></RESPONSE>'''
response_not_ok = '''<RESPONSE><OBJECT basetype="status" name="status" oid="1">
<PROPERTY name="response">Error Message</PROPERTY>
<PROPERTY name="return-code">1</PROPERTY>
</OBJECT></RESPONSE>'''
response_stats_linear = '''<RESPONSE><OBJECT basetype="virtual-disks">
<PROPERTY name="size-numeric">3863830528</PROPERTY>
<PROPERTY name="freespace-numeric">3863830528</PROPERTY>
</OBJECT></RESPONSE>'''
response_stats_virtual = '''<RESPONSE><OBJECT basetype="pools">
<PROPERTY name="total-size-numeric">3863830528</PROPERTY>
<PROPERTY name="total-avail-numeric">3863830528</PROPERTY>
</OBJECT></RESPONSE>'''
response_no_lun = '''<RESPONSE></RESPONSE>'''
response_lun = '''<RESPONSE><OBJECT basetype="host-view-mappings">
<PROPERTY name="lun">1</PROPERTY></OBJECT>
<OBJECT basetype="host-view-mappings">
<PROPERTY name="lun">4</PROPERTY></OBJECT></RESPONSE>'''
response_ports = '''<RESPONSE>
<OBJECT basetype="port">
<PROPERTY name="port-type">FC</PROPERTY>
<PROPERTY name="target-id">id1</PROPERTY>
<PROPERTY name="status">Disconnected</PROPERTY></OBJECT>
<OBJECT basetype="port">
<PROPERTY name="port-type">FC</PROPERTY>
<PROPERTY name="target-id">id2</PROPERTY>
<PROPERTY name="status">Up</PROPERTY></OBJECT>
<OBJECT basetype="port">
<PROPERTY name="port-type">iSCSI</PROPERTY>
<PROPERTY name="target-id">id3</PROPERTY>
<PROPERTY name="%(ip)s" >10.0.0.10</PROPERTY>
<PROPERTY name="status">Disconnected</PROPERTY></OBJECT>
<OBJECT basetype="port">
<PROPERTY name="port-type">iSCSI</PROPERTY>
<PROPERTY name="target-id">id4</PROPERTY>
<PROPERTY name="%(ip)s" >10.0.0.11</PROPERTY>
<PROPERTY name="status">Up</PROPERTY></OBJECT>
<OBJECT basetype="port">
<PROPERTY name="port-type">iSCSI</PROPERTY>
<PROPERTY name="target-id">id5</PROPERTY>
<PROPERTY name="%(ip)s" >10.0.0.12</PROPERTY>
<PROPERTY name="status">Up</PROPERTY></OBJECT>
</RESPONSE>'''
response_ports_linear = response_ports % {'ip': 'primary-ip-address'}
response_ports_virtual = response_ports % {'ip': 'ip-address'}
invalid_xml = '''<RESPONSE></RESPONSE>'''
malformed_xml = '''<RESPONSE>'''
fake_xml = '''<fakexml></fakexml>'''
stats_low_space = {'free_capacity_gb': 10, 'total_capacity_gb': 100}
stats_large_space = {'free_capacity_gb': 90, 'total_capacity_gb': 100}
vol_id = 'fceec30e-98bc-4ce5-85ff-d7309cc17cc2'
test_volume = {'id': vol_id, 'name_id': None,
'display_name': 'test volume', 'name': 'volume', 'size': 10}
test_retype_volume = {'attach_status': fields.VolumeAttachStatus.DETACHED,
'id': vol_id, 'name_id': None,
'display_name': 'test volume', 'name': 'volume',
'size': 10}
test_host = {'capabilities': {'location_info':
'DotHillVolumeDriver:xxxxx:dg02:A'}}
test_snap = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'volume': {'name_id': None},
'volume_id': vol_id,
'display_name': 'test volume', 'name': 'volume', 'size': 10}
encoded_volid = 'v_O7DDpi8TOWF_9cwnMF'
encoded_snapid = 's_O7DDpi8TOWF_9cwnMF'
dest_volume = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'source_volid': vol_id,
'display_name': 'test volume', 'name': 'volume', 'size': 10}
attached_volume = {'id': vol_id,
'display_name': 'test volume', 'name': 'volume',
'size': 10, 'status': 'in-use',
'attach_status': fields.VolumeAttachStatus.ATTACHED}
attaching_volume = {'id': vol_id,
'display_name': 'test volume', 'name': 'volume',
'size': 10, 'status': 'attaching',
'attach_status': fields.VolumeAttachStatus.ATTACHED}
detached_volume = {'id': vol_id, 'name_id': None,
'display_name': 'test volume', 'name': 'volume',
'size': 10, 'status': 'available',
'attach_status': 'detached'}
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': ["111111111111111", "111111111111112"],
'wwnns': ["211111111111111", "211111111111112"],
'host': 'fakehost'}
invalid_connector = {'ip': '10.0.0.2',
'initiator': '',
'wwpns': [],
'wwnns': [],
'host': 'fakehost'}
class TestDotHillClient(test.TestCase):
def setUp(self):
super(TestDotHillClient, self).setUp()
self.login = 'manage'
self.passwd = '!manage'
self.ip = '10.0.0.1'
self.protocol = 'http'
self.ssl_verify = False
self.client = dothill.DotHillClient(self.ip, self.login, self.passwd,
self.protocol, self.ssl_verify)
@mock.patch('requests.get')
def test_login(self, mock_requests_get):
m = mock.Mock()
m.text.encode.side_effect = [resp_login]
mock_requests_get.return_value = m
self.client.login()
self.assertEqual(session_key, self.client._session_key)
m.text.encode.side_effect = [resp_badlogin]
self.assertRaises(exception.DotHillAuthenticationError,
self.client.login)
def test_build_request_url(self):
url = self.client._build_request_url('/path')
self.assertEqual('http://10.0.0.1/api/path', url)
url = self.client._build_request_url('/path', arg1='val1')
self.assertEqual('http://10.0.0.1/api/path/arg1/val1', url)
url = self.client._build_request_url('/path', arg_1='val1')
self.assertEqual('http://10.0.0.1/api/path/arg-1/val1', url)
url = self.client._build_request_url('/path', 'arg1')
self.assertEqual('http://10.0.0.1/api/path/arg1', url)
url = self.client._build_request_url('/path', 'arg1', arg2='val2')
self.assertEqual('http://10.0.0.1/api/path/arg2/val2/arg1', url)
url = self.client._build_request_url('/path', 'arg1', 'arg3',
arg2='val2')
self.assertEqual('http://10.0.0.1/api/path/arg2/val2/arg1/arg3', url)
@mock.patch('requests.get')
def test_request(self, mock_requests_get):
self.client._session_key = session_key
m = mock.Mock()
m.text.encode.side_effect = [response_ok, malformed_xml,
requests.exceptions.
RequestException("error")]
mock_requests_get.return_value = m
ret = self.client._request('/path')
self.assertTrue(type(ret) == etree._Element)
self.assertRaises(exception.DotHillConnectionError,
self.client._request,
'/path')
self.assertRaises(exception.DotHillConnectionError,
self.client._request,
'/path')
def test_assert_response_ok(self):
ok_tree = etree.XML(response_ok)
not_ok_tree = etree.XML(response_not_ok)
invalid_tree = etree.XML(invalid_xml)
ret = self.client._assert_response_ok(ok_tree)
self.assertIsNone(ret)
self.assertRaises(exception.DotHillRequestError,
self.client._assert_response_ok,
not_ok_tree)
self.assertRaises(exception.DotHillRequestError,
self.client._assert_response_ok, invalid_tree)
@mock.patch.object(dothill.DotHillClient, '_request')
def test_backend_exists(self, mock_request):
mock_request.side_effect = [exception.DotHillRequestError,
fake_xml]
self.assertFalse(self.client.backend_exists('backend_name',
'linear'))
self.assertTrue(self.client.backend_exists('backend_name',
'linear'))
@mock.patch.object(dothill.DotHillClient, '_request')
def test_backend_stats(self, mock_request):
stats = {'free_capacity_gb': 1979,
'total_capacity_gb': 1979}
linear = etree.XML(response_stats_linear)
virtual = etree.XML(response_stats_virtual)
mock_request.side_effect = [linear, virtual]
self.assertEqual(stats, self.client.backend_stats('OpenStack',
'linear'))
self.assertEqual(stats, self.client.backend_stats('A',
'virtual'))
@mock.patch.object(dothill.DotHillClient, '_request')
def test_get_lun(self, mock_request):
mock_request.side_effect = [etree.XML(response_no_lun),
etree.XML(response_lun)]
ret = self.client._get_first_available_lun_for_host("fakehost")
self.assertEqual(1, ret)
ret = self.client._get_first_available_lun_for_host("fakehost")
self.assertEqual(2, ret)
@mock.patch.object(dothill.DotHillClient, '_request')
def test_get_ports(self, mock_request):
mock_request.side_effect = [etree.XML(response_ports)]
ret = self.client.get_active_target_ports()
self.assertEqual([{'port-type': 'FC',
'target-id': 'id2',
'status': 'Up'},
{'port-type': 'iSCSI',
'target-id': 'id4',
'status': 'Up'},
{'port-type': 'iSCSI',
'target-id': 'id5',
'status': 'Up'}], ret)
@mock.patch.object(dothill.DotHillClient, '_request')
def test_get_fc_ports(self, mock_request):
mock_request.side_effect = [etree.XML(response_ports)]
ret = self.client.get_active_fc_target_ports()
self.assertEqual(['id2'], ret)
@mock.patch.object(dothill.DotHillClient, '_request')
def test_get_iscsi_iqns(self, mock_request):
mock_request.side_effect = [etree.XML(response_ports)]
ret = self.client.get_active_iscsi_target_iqns()
self.assertEqual(['id4', 'id5'], ret)
@mock.patch.object(dothill.DotHillClient, '_request')
def test_get_iscsi_portals(self, mock_request):
portals = {'10.0.0.12': 'Up', '10.0.0.11': 'Up'}
mock_request.side_effect = [etree.XML(response_ports_linear),
etree.XML(response_ports_virtual)]
ret = self.client.get_active_iscsi_target_portals()
self.assertEqual(portals, ret)
ret = self.client.get_active_iscsi_target_portals()
self.assertEqual(portals, ret)
class FakeConfiguration1(object):
dothill_backend_name = 'OpenStack'
dothill_backend_type = 'linear'
san_ip = '10.0.0.1'
san_login = 'manage'
san_password = '!manage'
dothill_api_protocol = 'http'
def safe_get(self, key):
return 'fakevalue'
class FakeConfiguration2(FakeConfiguration1):
dothill_iscsi_ips = ['10.0.0.11']
use_chap_auth = None
class TestFCDotHillCommon(test.TestCase):
def setUp(self):
super(TestFCDotHillCommon, self).setUp()
self.config = FakeConfiguration1()
self.common = dothill_common.DotHillCommon(self.config)
self.common.client_login = mock.MagicMock()
self.common.client_logout = mock.MagicMock()
self.common.serialNumber = "xxxxx"
self.common.owner = "A"
self.connector_element = "wwpns"
@mock.patch.object(dothill.DotHillClient, 'get_serial_number')
@mock.patch.object(dothill.DotHillClient, 'get_owner_info')
@mock.patch.object(dothill.DotHillClient, 'backend_exists')
def test_do_setup(self, mock_backend_exists,
mock_owner_info, mock_serial_number):
mock_backend_exists.side_effect = [False, True]
mock_owner_info.return_value = "A"
mock_serial_number.return_value = "xxxxx"
self.assertRaises(exception.DotHillInvalidBackend,
self.common.do_setup, None)
self.assertIsNone(self.common.do_setup(None))
mock_backend_exists.assert_called_with(self.common.backend_name,
self.common.backend_type)
mock_owner_info.assert_called_with(self.common.backend_name,
self.common.backend_type)
def test_vol_name(self):
self.assertEqual(encoded_volid, self.common._get_vol_name(vol_id))
self.assertEqual(encoded_snapid, self.common._get_snap_name(vol_id))
def test_check_flags(self):
class FakeOptions(object):
def __init__(self, d):
for k, v in d.items():
self.__dict__[k] = v
options = FakeOptions({'opt1': 'val1', 'opt2': 'val2'})
required_flags = ['opt1', 'opt2']
ret = self.common.check_flags(options, required_flags)
self.assertIsNone(ret)
options = FakeOptions({'opt1': 'val1', 'opt2': 'val2'})
required_flags = ['opt1', 'opt2', 'opt3']
self.assertRaises(exception.Invalid, self.common.check_flags,
options, required_flags)
def test_assert_connector_ok(self):
self.assertRaises(exception.InvalidInput,
self.common._assert_connector_ok, invalid_connector,
self.connector_element)
self.assertIsNone(self.common._assert_connector_ok(
connector,
self.connector_element))
@mock.patch.object(dothill.DotHillClient, 'backend_stats')
def test_update_volume_stats(self, mock_stats):
mock_stats.side_effect = [exception.DotHillRequestError,
stats_large_space]
self.assertRaises(exception.Invalid, self.common._update_volume_stats)
mock_stats.assert_called_with(self.common.backend_name,
self.common.backend_type)
ret = self.common._update_volume_stats()
self.assertIsNone(ret)
self.assertEqual({'driver_version': self.common.VERSION,
'pools': [{'QoS_support': False,
'free_capacity_gb': 90,
'location_info':
'DotHillVolumeDriver:xxxxx:OpenStack:A',
'pool_name': 'OpenStack',
'total_capacity_gb': 100}],
'storage_protocol': None,
'vendor_name': 'DotHill',
'volume_backend_name': None}, self.common.stats)
@mock.patch.object(dothill.DotHillClient, 'create_volume')
def test_create_volume(self, mock_create):
mock_create.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.create_volume,
test_volume)
ret = self.common.create_volume(test_volume)
self.assertIsNone(ret)
mock_create.assert_called_with(encoded_volid,
"%sGB" % test_volume['size'],
self.common.backend_name,
self.common.backend_type)
@mock.patch.object(dothill.DotHillClient, 'delete_volume')
def test_delete_volume(self, mock_delete):
not_found_e = exception.DotHillRequestError(
'The volume was not found on this system.')
mock_delete.side_effect = [not_found_e, exception.DotHillRequestError,
None]
self.assertIsNone(self.common.delete_volume(test_volume))
self.assertRaises(exception.Invalid, self.common.delete_volume,
test_volume)
self.assertIsNone(self.common.delete_volume(test_volume))
mock_delete.assert_called_with(encoded_volid)
@mock.patch.object(dothill.DotHillClient, 'copy_volume')
@mock.patch.object(dothill.DotHillClient, 'backend_stats')
def test_create_cloned_volume(self, mock_stats, mock_copy):
mock_stats.side_effect = [stats_low_space, stats_large_space,
stats_large_space]
self.assertRaises(exception.DotHillNotEnoughSpace,
self.common.create_cloned_volume,
dest_volume, detached_volume)
self.assertFalse(mock_copy.called)
mock_copy.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid,
self.common.create_cloned_volume,
dest_volume, detached_volume)
ret = self.common.create_cloned_volume(dest_volume, detached_volume)
self.assertIsNone(ret)
mock_copy.assert_called_with(encoded_volid,
'vqqqqqqqqqqqqqqqqqqq',
self.common.backend_name,
self.common.backend_type)
@mock.patch.object(dothill.DotHillClient, 'copy_volume')
@mock.patch.object(dothill.DotHillClient, 'backend_stats')
def test_create_volume_from_snapshot(self, mock_stats, mock_copy):
mock_stats.side_effect = [stats_low_space, stats_large_space,
stats_large_space]
self.assertRaises(exception.DotHillNotEnoughSpace,
self.common.create_volume_from_snapshot,
dest_volume, test_snap)
mock_copy.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid,
self.common.create_volume_from_snapshot,
dest_volume, test_snap)
ret = self.common.create_volume_from_snapshot(dest_volume, test_snap)
self.assertIsNone(ret)
mock_copy.assert_called_with('sqqqqqqqqqqqqqqqqqqq',
'vqqqqqqqqqqqqqqqqqqq',
self.common.backend_name,
self.common.backend_type)
@mock.patch.object(dothill.DotHillClient, 'extend_volume')
def test_extend_volume(self, mock_extend):
mock_extend.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.extend_volume,
test_volume, 20)
ret = self.common.extend_volume(test_volume, 20)
self.assertIsNone(ret)
mock_extend.assert_called_with(encoded_volid, '10GB')
@mock.patch.object(dothill.DotHillClient, 'create_snapshot')
def test_create_snapshot(self, mock_create):
mock_create.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.create_snapshot,
test_snap)
ret = self.common.create_snapshot(test_snap)
self.assertIsNone(ret)
mock_create.assert_called_with(encoded_volid, 'sqqqqqqqqqqqqqqqqqqq')
@mock.patch.object(dothill.DotHillClient, 'delete_snapshot')
def test_delete_snapshot(self, mock_delete):
not_found_e = exception.DotHillRequestError(
'The volume was not found on this system.')
mock_delete.side_effect = [not_found_e, exception.DotHillRequestError,
None]
self.assertIsNone(self.common.delete_snapshot(test_snap))
self.assertRaises(exception.Invalid, self.common.delete_snapshot,
test_snap)
self.assertIsNone(self.common.delete_snapshot(test_snap))
mock_delete.assert_called_with('sqqqqqqqqqqqqqqqqqqq')
@mock.patch.object(dothill.DotHillClient, 'map_volume')
def test_map_volume(self, mock_map):
mock_map.side_effect = [exception.DotHillRequestError, 10]
self.assertRaises(exception.Invalid, self.common.map_volume,
test_volume, connector, self.connector_element)
lun = self.common.map_volume(test_volume, connector,
self.connector_element)
self.assertEqual(10, lun)
mock_map.assert_called_with(encoded_volid,
connector, self.connector_element)
@mock.patch.object(dothill.DotHillClient, 'unmap_volume')
def test_unmap_volume(self, mock_unmap):
mock_unmap.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.unmap_volume,
test_volume, connector, self.connector_element)
ret = self.common.unmap_volume(test_volume, connector,
self.connector_element)
self.assertIsNone(ret)
mock_unmap.assert_called_with(encoded_volid, connector,
self.connector_element)
@mock.patch.object(dothill.DotHillClient, 'copy_volume')
@mock.patch.object(dothill.DotHillClient, 'delete_volume')
@mock.patch.object(dothill.DotHillClient, 'modify_volume_name')
def test_retype(self, mock_modify, mock_delete, mock_copy):
mock_copy.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.migrate_volume,
test_retype_volume, test_host)
ret = self.common.migrate_volume(test_retype_volume, test_host)
self.assertEqual((True, None), ret)
ret = self.common.migrate_volume(test_retype_volume,
{'capabilities': {}})
self.assertEqual((False, None), ret)
@mock.patch.object(dothill_common.DotHillCommon, '_get_vol_name')
@mock.patch.object(dothill.DotHillClient, 'modify_volume_name')
def test_manage_existing(self, mock_modify, mock_volume):
existing_ref = {'source-name': 'xxxx'}
mock_modify.side_effect = [exception.DotHillRequestError, None]
self.assertRaises(exception.Invalid, self.common.manage_existing,
test_volume, existing_ref)
ret = self.common.manage_existing(test_volume, existing_ref)
self.assertIsNone(ret)
@mock.patch.object(dothill.DotHillClient, 'get_volume_size')
def test_manage_existing_get_size(self, mock_volume):
existing_ref = {'source-name': 'xxxx'}
mock_volume.side_effect = [exception.DotHillRequestError, 1]
self.assertRaises(exception.Invalid,
self.common.manage_existing_get_size,
None, existing_ref)
ret = self.common.manage_existing_get_size(None, existing_ref)
self.assertEqual(1, ret)
class TestISCSIDotHillCommon(TestFCDotHillCommon):
def setUp(self):
super(TestISCSIDotHillCommon, self).setUp()
self.connector_element = 'initiator'
class TestDotHillFC(test.TestCase):
@mock.patch.object(dothill_common.DotHillCommon, 'do_setup')
def setUp(self, mock_setup):
super(TestDotHillFC, self).setUp()
self.vendor_name = 'DotHill'
mock_setup.return_value = True
def fake_init(self, *args, **kwargs):
super(dothill_fc.DotHillFCDriver, self).__init__()
self.common = None
self.configuration = FakeConfiguration1()
self.lookup_service = fczm_utils.create_lookup_service()
dothill_fc.DotHillFCDriver.__init__ = fake_init
self.driver = dothill_fc.DotHillFCDriver()
self.driver.do_setup(None)
def _test_with_mock(self, mock, method, args, expected=None):
func = getattr(self.driver, method)
mock.side_effect = [exception.Invalid(), None]
self.assertRaises(exception.Invalid, func, *args)
self.assertEqual(expected, func(*args))
@mock.patch.object(dothill_common.DotHillCommon, 'create_volume')
def test_create_volume(self, mock_create):
self._test_with_mock(mock_create, 'create_volume', [None])
@mock.patch.object(dothill_common.DotHillCommon,
'create_cloned_volume')
def test_create_cloned_volume(self, mock_create):
self._test_with_mock(mock_create, 'create_cloned_volume', [None, None])
@mock.patch.object(dothill_common.DotHillCommon,
'create_volume_from_snapshot')
def test_create_volume_from_snapshot(self, mock_create):
self._test_with_mock(mock_create, 'create_volume_from_snapshot',
[None, None])
@mock.patch.object(dothill_common.DotHillCommon, 'delete_volume')
def test_delete_volume(self, mock_delete):
self._test_with_mock(mock_delete, 'delete_volume', [None])
@mock.patch.object(dothill_common.DotHillCommon, 'create_snapshot')
def test_create_snapshot(self, mock_create):
self._test_with_mock(mock_create, 'create_snapshot', [None])
@mock.patch.object(dothill_common.DotHillCommon, 'delete_snapshot')
def test_delete_snapshot(self, mock_delete):
self._test_with_mock(mock_delete, 'delete_snapshot', [None])
@mock.patch.object(dothill_common.DotHillCommon, 'extend_volume')
def test_extend_volume(self, mock_extend):
self._test_with_mock(mock_extend, 'extend_volume', [None, 10])
@mock.patch.object(dothill_common.DotHillCommon, 'client_logout')
@mock.patch.object(dothill_common.DotHillCommon,
'get_active_fc_target_ports')
@mock.patch.object(dothill_common.DotHillCommon, 'map_volume')
@mock.patch.object(dothill_common.DotHillCommon, 'client_login')
def test_initialize_connection(self, mock_login, mock_map, mock_ports,
mock_logout):
mock_login.return_value = None
mock_logout.return_value = None
mock_map.side_effect = [exception.Invalid, 1]
mock_ports.side_effect = [['id1']]
self.assertRaises(exception.Invalid,
self.driver.initialize_connection, test_volume,
connector)
mock_map.assert_called_with(test_volume, connector, 'wwpns')
ret = self.driver.initialize_connection(test_volume, connector)
self.assertEqual({'driver_volume_type': 'fibre_channel',
'data': {'initiator_target_map': {
'111111111111111': ['id1'],
'111111111111112': ['id1']},
'target_wwn': ['id1'],
'target_lun': 1,
'target_discovered': True}}, ret)
@mock.patch.object(dothill_common.DotHillCommon, 'unmap_volume')
@mock.patch.object(dothill.DotHillClient, 'list_luns_for_host')
def test_terminate_connection(self, mock_list, mock_unmap):
mock_unmap.side_effect = [exception.Invalid, 1]
mock_list.side_effect = ['yes']
actual = {'driver_volume_type': 'fibre_channel', 'data': {}}
self.assertRaises(exception.Invalid,
self.driver.terminate_connection, test_volume,
connector)
mock_unmap.assert_called_with(test_volume, connector, 'wwpns')
ret = self.driver.terminate_connection(test_volume, connector)
self.assertEqual(actual, ret)
@mock.patch.object(dothill_common.DotHillCommon, 'get_volume_stats')
def test_get_volume_stats(self, mock_stats):
stats = {'storage_protocol': None,
'driver_version': self.driver.VERSION,
'volume_backend_name': None,
'vendor_name': self.vendor_name,
'pools': [{'free_capacity_gb': 90,
'reserved_percentage': 0,
'total_capacity_gb': 100,
'QoS_support': False,
'location_info': 'xx:xx:xx:xx',
'pool_name': 'x'}]}
mock_stats.side_effect = [exception.Invalid, stats, stats]
self.assertRaises(exception.Invalid, self.driver.get_volume_stats,
False)
ret = self.driver.get_volume_stats(False)
self.assertEqual(stats, ret)
ret = self.driver.get_volume_stats(True)
self.assertEqual(stats, ret)
mock_stats.assert_called_with(True)
@mock.patch.object(dothill_common.DotHillCommon, 'retype')
def test_retype(self, mock_retype):
mock_retype.side_effect = [exception.Invalid, True, False]
args = [None, None, None, None, None]
self.assertRaises(exception.Invalid, self.driver.retype, *args)
self.assertTrue(self.driver.retype(*args))
self.assertFalse(self.driver.retype(*args))
@mock.patch.object(dothill_common.DotHillCommon, 'manage_existing')
def test_manage_existing(self, mock_manage_existing):
self._test_with_mock(mock_manage_existing, 'manage_existing',
[None, None])
@mock.patch.object(dothill_common.DotHillCommon,
'manage_existing_get_size')
def test_manage_size(self, mock_manage_size):
mock_manage_size.side_effect = [exception.Invalid, 1]
self.assertRaises(exception.Invalid,
self.driver.manage_existing_get_size,
None, None)
self.assertEqual(1, self.driver.manage_existing_get_size(None, None))
class TestDotHillISCSI(TestDotHillFC):
@mock.patch.object(dothill_common.DotHillCommon, 'do_setup')
def setUp(self, mock_setup):
super(TestDotHillISCSI, self).setUp()
self.vendor_name = 'DotHill'
mock_setup.return_value = True
def fake_init(self, *args, **kwargs):
super(dothill_iscsi.DotHillISCSIDriver, self).__init__()
self.common = None
self.configuration = FakeConfiguration2()
self.iscsi_ips = ['10.0.0.11']
dothill_iscsi.DotHillISCSIDriver.__init__ = fake_init
self.driver = dothill_iscsi.DotHillISCSIDriver()
self.driver.do_setup(None)
@mock.patch.object(dothill_common.DotHillCommon, 'client_logout')
@mock.patch.object(dothill_common.DotHillCommon,
'get_active_iscsi_target_portals')
@mock.patch.object(dothill_common.DotHillCommon,
'get_active_iscsi_target_iqns')
@mock.patch.object(dothill_common.DotHillCommon, 'map_volume')
@mock.patch.object(dothill_common.DotHillCommon, 'client_login')
def test_initialize_connection(self, mock_login, mock_map, mock_iqns,
mock_portals, mock_logout):
mock_login.return_value = None
mock_logout.return_value = None
mock_map.side_effect = [exception.Invalid, 1]
self.driver.iscsi_ips = ['10.0.0.11']
self.driver.initialize_iscsi_ports()
mock_iqns.side_effect = [['id2']]
mock_portals.return_value = {'10.0.0.11': 'Up', '10.0.0.12': 'Up'}
self.assertRaises(exception.Invalid,
self.driver.initialize_connection, test_volume,
connector)
mock_map.assert_called_with(test_volume, connector, 'initiator')
ret = self.driver.initialize_connection(test_volume, connector)
self.assertEqual({'driver_volume_type': 'iscsi',
'data': {'target_iqn': 'id2',
'target_lun': 1,
'target_discovered': True,
'target_portal': '10.0.0.11:3260'}}, ret)
@mock.patch.object(dothill_common.DotHillCommon, 'unmap_volume')
def test_terminate_connection(self, mock_unmap):
mock_unmap.side_effect = [exception.Invalid, 1]
self.assertRaises(exception.Invalid,
self.driver.terminate_connection, test_volume,
connector)
mock_unmap.assert_called_with(test_volume, connector, 'initiator')
ret = self.driver.terminate_connection(test_volume, connector)
self.assertIsNone(ret)
|
the-stack_106_21182
|
"""
Derived from henchbot.py script: https://github.com/henchbot/mybinder.org-upgrades/blob/master/henchbot.py
"""
from yaml import safe_load as load
import requests
import subprocess
import os
import shutil
import time
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
GL_BOT_NAME = os.environ['GL_BOT_NAME'].strip()
GL_BOT_EMAIL = os.environ['GL_BOT_EMAIL'].strip()
GL_BOT_TOKEN = os.environ['GL_BOT_TOKEN'].strip()
# https://docs.gitlab.com/ee/api/#personal-access-tokens
GL_API_AUTHORIZATION_HEADER = {"Private-Token": GL_BOT_TOKEN}
GL_API_URL = f"https://git.gesis.org/api/v4/"
GL_ORG_NAME = os.environ.get("GL_ORG_NAME", "ilcm")
GL_REPO_NAME = os.environ.get("GL_REPO_NAME", "orc")
GL_REPO_URL = f"https://oauth2:{GL_BOT_TOKEN}@git.gesis.org/{GL_ORG_NAME}/{GL_REPO_NAME}"
GH_ORG_NAME = os.environ.get("GH_ORG_NAME", "gesiscss")
GH_REPO_NAME = os.environ.get("GH_REPO_NAME", "orc")
GH_REPO_RAW_URL = f"https://raw.githubusercontent.com/{GH_ORG_NAME}/{GH_REPO_NAME}/master/"
BHUB_RAW_URL = "https://raw.githubusercontent.com/jupyterhub/binderhub/"
MYBINDER_REPO_URL = f"https://github.com/jupyterhub/mybinder.org-deploy/"
MYBINDER_REPO_RAW_URL = f"https://raw.githubusercontent.com/jupyterhub/mybinder.org-deploy/"
class Bot:
"""
Class for a bot that determines whether an upgrade is necessary
for GESIS Binder depending on if repo2docker and BinderHub is updated on mybinder.org.
If an upgrade is needed, it will checkout into <repo>_bump branch,
edit files and create a PR.
"""
def __init__(self):
"""
Start by getting the latest commits of repo2docker, BinderHub and Jupyterhub in mybinder.org
and the live ones in GESIS Binder.
"""
self.commit_info = {'binderhub': {}, 'repo2docker': {}, 'jupyterhub': {}}
self.get_new_commits()
self.gitlab_project_id = None
self.branch_name = None
def set_gitlab_project_id(self, repo_name):
projects = requests.get(f'{GL_API_URL}projects?search={repo_name}',
headers=GL_API_AUTHORIZATION_HEADER).json()
for project in projects:
if project['name'] == repo_name:
self.gitlab_project_id = project['id']
break
def check_existing_prs(self, repo):
"""
Check if there are open PRs created by bot
"""
# https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-requests
prs = requests.get(GL_API_URL + 'merge_requests?state=opened',
headers=GL_API_AUTHORIZATION_HEADER).json()
for pr in prs:
if repo in pr['title'].lower():
pr_latest = pr['title'].split('...')[-1].strip()
if pr_latest == self.commit_info[repo]['latest']:
# same update, pr is not merged yet
return None
return {'iid': pr['iid']} # iid is unique only in scope of a single project
return False
def check_branch_exists(self):
# https://docs.gitlab.com/ee/api/branches.html#list-repository-branches
branches = requests.get(f'{GL_API_URL}/projects/{self.gitlab_project_id}/repository/branches',
headers=GL_API_AUTHORIZATION_HEADER).json()
return self.branch_name in [b['name'] for b in branches]
def delete_old_branch_if_exists(self):
if self.check_branch_exists():
# https://docs.gitlab.com/ee/api/branches.html#delete-repository-branch
res = requests.delete(f'{GL_API_URL}/projects/{self.gitlab_project_id}/repository/branches/{self.branch_name}',
headers=GL_API_AUTHORIZATION_HEADER)
assert res.status_code == 204
def edit_repo2docker_files(self, repo):
"""
Update the SHA to latest for r2d
"""
fname = 'gesisbinder/gesisbinder/values.yaml'
with open(fname, 'r', encoding='utf8') as f:
values_yaml = f.read()
updated_yaml = values_yaml.replace(
"jupyter/repo2docker:{}".format(self.commit_info[repo]['live']),
"jupyter/repo2docker:{}".format(self.commit_info[repo]['latest'])
)
with open(fname, 'w', encoding='utf8') as f:
f.write(updated_yaml)
return [fname]
def edit_binderhub_files(self, repo):
"""
Update the SHA to latest for bhub
"""
fname = 'gesisbinder/gesisbinder/requirements.yaml'
with open(fname, 'r', encoding='utf8') as f:
requirements_yaml = f.read()
updated_yaml = requirements_yaml.replace(
"version: {}".format(self.commit_info[repo]['live']),
"version: {}".format(self.commit_info[repo]['latest']),
1
)
with open(fname, 'w', encoding='utf8') as f:
f.write(updated_yaml)
return [fname]
def edit_files(self, repo):
"""
Controlling method to update file for the repo
"""
if repo == 'repo2docker':
return self.edit_repo2docker_files(repo)
elif repo == 'binderhub':
return self.edit_binderhub_files(repo)
def add_commit_push(self, files_changed, repo):
"""
After making change, add, commit and push to fork
"""
for f in files_changed:
subprocess.check_call(['git', 'add', f])
if repo == 'repo2docker':
commit_message = 'repo2docker: https://github.com/jupyter/repo2docker/compare/{}...{}'.format(
self.commit_info['repo2docker']['live'].split('.dirty')[0].split('.')[-1][1:],
self.commit_info['repo2docker']['latest'].split('.dirty')[0].split('.')[-1][1:])
elif repo == 'binderhub':
commit_message = 'binderhub: https://github.com/jupyterhub/binderhub/compare/{}...{}'.format(
self.commit_info['binderhub']['live'].split('-')[-1].split('.')[-1],
self.commit_info['binderhub']['latest'].split('-')[-1].split('.')[-1])
subprocess.check_call(['git', 'config', 'user.name', GL_BOT_NAME])
subprocess.check_call(['git', 'config', 'user.email', GL_BOT_EMAIL])
subprocess.check_call(['git', 'commit', '-m', commit_message])
if self.check_branch_exists():
# there is an open PR for this repo, so update it
subprocess.check_call(['git', 'push', '-f', GL_REPO_URL, self.branch_name])
else:
subprocess.check_call(['git', 'push', GL_REPO_URL, self.branch_name])
def get_associated_prs(self, compare_url):
"""
Gets all PRs from dependency repo associated with the upgrade
"""
repo_api = compare_url.replace('github.com', 'api.github.com/repos')
res = requests.get(repo_api).json()
if 'commits' not in res or not res['commits']:
logging.error("Compare url returns no commits but there must be commits. "
"Something must be wrong with compare url.")
commit_shas = [x['sha'] for x in res['commits']]
pr_api = repo_api.split('/compare/')[0] + '/pulls/'
associated_prs = ['Associated PRs:']
for sha in commit_shas[::-1]:
res = requests.get('https://api.github.com/search/issues?q=sha:{}'.format(sha)).json()
if 'items' in res:
for i in res['items']:
formatted = '- {} [#{}]({})'.format(i['title'], i['number'], i['html_url'])
repo_owner = i['repository_url'].split('/')[-2]
try:
merged_at = requests.get(pr_api + str(i['number'])).json()['merged_at']
except KeyError:
continue
if formatted not in associated_prs and repo_owner.startswith('jupyter') and merged_at:
associated_prs.append(formatted)
time.sleep(3)
return associated_prs
def make_pr_body(self, repo):
"""
Formats a text body for the PR
"""
if repo == 'repo2docker':
compare_url = 'https://github.com/jupyter/repo2docker/compare/{}...{}'.format(
self.commit_info['repo2docker']['live'].split('.dirty')[0].split('.')[-1][1:],
self.commit_info['repo2docker']['latest'].split('.dirty')[0].split('.')[-1][1:])
elif repo == 'binderhub':
compare_url = 'https://github.com/jupyterhub/binderhub/compare/{}...{}'.format(
self.commit_info['binderhub']['live'].split('-')[-1].split('.')[-1],
self.commit_info['binderhub']['latest'].split('-')[-1].split('.')[-1])
logging.info('compare url: {}'.format(compare_url))
associated_prs = self.get_associated_prs(compare_url)
body = '\n'.join(
[f'This is a {repo} version bump. See the link below for a diff of new changes:\n',
compare_url + ' \n'] + associated_prs
)
return body
def create_update_pr(self, repo, existing_pr):
"""
Makes the PR from all components
"""
body = self.make_pr_body(repo)
title = f"{repo}: {self.commit_info[repo]['live']}...{self.commit_info[repo]['latest']}"
params = {'source_branch': self.branch_name, 'target_branch': 'master',
'title': title, 'description': f'{body}'}
if existing_pr:
# update title and description of existing PR
# https://docs.gitlab.com/ee/api/merge_requests.html#update-mr
res = requests.put(f"{GL_API_URL}projects/{self.gitlab_project_id}/merge_requests/{existing_pr['iid']}",
params=params, headers=GL_API_AUTHORIZATION_HEADER)
else:
# https://docs.gitlab.com/ee/api/merge_requests.html#create-mr
res = requests.post(f"{GL_API_URL}projects/{self.gitlab_project_id}/merge_requests",
params=params, headers=GL_API_AUTHORIZATION_HEADER)
logging.info(f"PR done: {title}")
def update_repos(self, repos):
"""
Main method to check/create upgrades
"""
for repo in repos:
self.branch_name = repo + '_bump'
if self.commit_info[repo]['live'] != self.commit_info[repo]['latest']:
logging.info(f"{repo}:{self.commit_info[repo]['live']}-->{self.commit_info[repo]['latest']}")
self.set_gitlab_project_id(GL_REPO_NAME)
existing_pr = self.check_existing_prs(repo)
if existing_pr is None:
# there is a PR with same update, it is not merged yet
continue
elif existing_pr is False:
# no PR exists for this repo
self.delete_old_branch_if_exists()
subprocess.check_call(['git', 'clone', f'{GL_REPO_URL}.git'])
os.chdir(GL_REPO_NAME)
subprocess.check_call(['git', 'checkout', '-b', self.branch_name])
files_changed = self.edit_files(repo)
self.add_commit_push(files_changed, repo)
os.chdir('..')
shutil.rmtree(GL_REPO_NAME)
self.create_update_pr(repo, existing_pr)
else:
logging.info(f"{repo}: already up-to-date")
def get_repo2docker_live(self):
"""
Get the live r2d SHA from GESIS Notebooks
"""
# Load master repo2docker
url_helm_chart = f"{GH_REPO_RAW_URL}gesisbinder/gesisbinder/values.yaml"
helm_chart = requests.get(url_helm_chart)
helm_chart = load(helm_chart.text)
r2d_live = helm_chart['binderhub']['config']['BinderHub']['build_image'].split(':')[-1]
self.commit_info['repo2docker']['live'] = r2d_live
def get_binderhub_live(self):
"""
Get the latest BinderHub SHA from GESIS Notebooks
"""
# Load master requirements
url_requirements = f"{GH_REPO_RAW_URL}gesisbinder/gesisbinder/requirements.yaml"
requirements = load(requests.get(url_requirements).text)
binderhub_dep = [ii for ii in requirements['dependencies'] if ii['name'] == 'binderhub'][0]
bhub_live = binderhub_dep['version'].strip()
self.commit_info['binderhub']['live'] = bhub_live
def get_jupyterhub_live(self):
"""
Get the live JupyterHub SHA from BinderHub repo
"""
url_binderhub_requirements = f"{BHUB_RAW_URL}{self.commit_info['binderhub']['live'].split('-')[-1].split('.')[-1]}" \
f"/helm-chart/binderhub/requirements.yaml"
requirements = load(requests.get(url_binderhub_requirements).text)
jupyterhub_dep = [ii for ii in requirements['dependencies'] if ii['name'] == 'jupyterhub'][0]
jhub_live = jupyterhub_dep['version'].strip()
self.commit_info['jupyterhub']['live'] = jhub_live
def get_repo2docker_latest(self):
"""
Get the latest r2d SHA from mybinder.org
"""
# Load master repo2docker
url_helm_chart = f"{MYBINDER_REPO_RAW_URL}master/mybinder/values.yaml"
helm_chart = requests.get(url_helm_chart)
helm_chart = load(helm_chart.text)
r2d_latest = helm_chart['binderhub']['config']['BinderHub']['build_image'].split(':')[-1]
self.commit_info['repo2docker']['latest'] = r2d_latest
def get_binderhub_latest(self):
"""
Get the latest BinderHub SHA from mybinder.org
"""
# Load master requirements
url_requirements = f"{MYBINDER_REPO_RAW_URL}master/mybinder/requirements.yaml"
requirements = load(requests.get(url_requirements).text)
binderhub_dep = [ii for ii in requirements['dependencies'] if ii['name'] == 'binderhub'][0]
bhub_latest = binderhub_dep['version'].strip()
self.commit_info['binderhub']['latest'] = bhub_latest
def get_jupyterhub_latest(self):
"""
Get the live JupyterHub SHA from BinderHub repo
"""
url_binderhub_requirements = f"{BHUB_RAW_URL}{self.commit_info['binderhub']['latest'].split('-')[-1].split('.')[-1]}/helm-chart/binderhub/requirements.yaml"
requirements = load(requests.get(url_binderhub_requirements).text)
jupyterhub_dep = [ii for ii in requirements['dependencies'] if ii['name'] == 'jupyterhub'][0]
jhub_latest = jupyterhub_dep['version'].strip()
self.commit_info['jupyterhub']['latest'] = jhub_latest
def get_new_commits(self):
"""
Main controlling method to get commit SHAs
"""
# logging.info('Fetching latest commit SHAs for repo2docker, BinderHub and JupyterHub that deployed on GESIS Binder')
self.get_repo2docker_live()
self.get_binderhub_live()
self.get_jupyterhub_live()
# logging.info('Fetching latest commit SHAs for repo2docker, BinderHub and JupyterHub that deployed on mybinder.org')
self.get_repo2docker_latest()
self.get_binderhub_latest()
self.get_jupyterhub_latest()
logging.info(self.commit_info)
if __name__ == '__main__':
b = Bot()
b.update_repos(['repo2docker', 'binderhub'])
|
the-stack_106_21183
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from functools import partial
from test_utils import check_batch
from test_utils import compare_pipelines
from test_utils import RandomDataIterator
from test_utils import ConstantDataIterator
from test_utils import get_dali_extra_path
import os
import librosa as librosa
import math
class SpectrogramPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, nfft, window_length, window_step,
window=None, num_threads=1, device_id=0):
super(SpectrogramPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.iterator = iterator
self.inputs = ops.ExternalSource()
window_fn = window(window_length).tolist() if window is not None else None
self.fft = ops.Spectrogram(device = self.device,
nfft = nfft,
window_length = window_length,
window_step = window_step,
window_fn = window_fn,
power = 2)
def define_graph(self):
self.data = self.inputs()
out = self.data.gpu() if self.device == 'gpu' else self.data
out = self.fft(out)
return out
def iter_setup(self):
data = self.iterator.next()
# randomly insert extra axis (channels?)
r = np.random.randint(-1, 2)
if r == 0:
data = [x[np.newaxis,:] for x in data]
elif r == 1:
data = [x[:, np.newaxis] for x in data]
self.feed_input(self.data, data)
def hann_win(n):
hann = np.ones([n], dtype=np.float32)
a = (2.0 * math.pi / n)
for t in range(n):
phase = a * (t + 0.5)
hann[t] = 0.5 * (1.0 - math.cos(phase))
return hann
def cos_win(n):
phase = (np.arange(n) + 0.5) * (math.pi / n)
return np.sin(phase).astype(np.float32)
def spectrogram_func_librosa(nfft, win_len, win_step, window, input_data):
# Squeeze to 1d
if len(input_data.shape) > 1:
input_data = np.squeeze(input_data)
if window is None:
window = hann_win
out = np.abs(librosa.stft(y=input_data, n_fft=nfft or win_len,
win_length=win_len, hop_length=win_step, window=window))**2
# Alternative way to calculate the spectrogram:
# out, _ = librosa.core.spectrum._spectrogram(
# y=input_data, n_fft=nfft, hop_length=win_step, window=hann_win, power=2)
return out
class SpectrogramPythonPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, nfft, window_length, window_step, window=None,
num_threads=1, device_id=0, spectrogram_func=spectrogram_func_librosa):
super(SpectrogramPythonPipeline, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.iterator = iterator
self.inputs = ops.ExternalSource()
function = partial(spectrogram_func, nfft, window_length, window_step, window)
self.spectrogram = ops.PythonFunction(function=function, output_layouts=["ft"])
def define_graph(self):
self.data = self.inputs()
out = self.spectrogram(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data)
def check_operator_spectrogram_vs_python(device, batch_size, input_shape,
nfft, window_length, window_step):
eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
compare_pipelines(
SpectrogramPipeline(device, batch_size, iter(eii1), nfft=nfft, window=None,
window_length=window_length, window_step=window_step),
SpectrogramPythonPipeline(device, batch_size, iter(eii2), window=None, nfft=nfft,
window_length=window_length, window_step=window_step),
batch_size=batch_size, N_iterations=5, eps=1e-04)
def test_operator_spectrogram_vs_python():
for device in ['cpu']:
for batch_size in [3]:
for nfft, window_length, window_step, shape in [(256, 256, 128, (1, 4096)),
(256, 256, 128, (4096,)),
(256, 256, 128, (4096, 1)),
(256, 256, 128, (1, 1, 4096, 1)),
(16, 16, 8, (1, 1000)),
(10, 10, 5, (1, 1000)),
(None, 10, 5, (1, 1000)),
]:
yield check_operator_spectrogram_vs_python, device, batch_size, shape, \
nfft, window_length, window_step
def check_operator_spectrogram_vs_python_wave_1d(device, batch_size, input_length,
nfft, window_length, window_step, window):
f = 4000 # [Hz]
sr = 44100 # [Hz]
x = np.arange(input_length, dtype=np.float32)
y = np.sin(2 * np.pi * f * x / sr)
data1 = ConstantDataIterator(batch_size, y, dtype=np.float32)
data2 = ConstantDataIterator(batch_size, y, dtype=np.float32)
compare_pipelines(
SpectrogramPipeline(device, batch_size, iter(data1), nfft=nfft,
window_length=window_length, window_step=window_step, window=window),
SpectrogramPythonPipeline(device, batch_size, iter(data2),
nfft=nfft, window_length=window_length, window_step=window_step,
window=window),
batch_size=batch_size, N_iterations=5, eps=1e-04)
def test_operator_spectrogram_vs_python_wave():
for device in ['cpu', 'gpu']:
for window in [None, hann_win, cos_win]:
for batch_size in [3]:
for nfft, window_length, window_step, length in [(256, 256, 128, 4096),
(128, 100, 61, 1000),
(10, 10, 5, 1000),
]:
yield check_operator_spectrogram_vs_python_wave_1d, device, batch_size, \
length, nfft, window_length, window_step, window
for test in test_operator_spectrogram_vs_python_wave():
test[0](*test[1:])
dali_extra = get_dali_extra_path()
audio_files = os.path.join(dali_extra, "db", "audio")
class AudioSpectrogramPipeline(Pipeline):
def __init__(self, device, batch_size, nfft, window_length, window_step,
num_threads=1, device_id=0, layout="ft"):
super(AudioSpectrogramPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.File(device="cpu", file_root=audio_files)
self.decode = ops.decoders.Audio(device="cpu", dtype=types.FLOAT, downmix=True)
self.fft = ops.Spectrogram(device=device,
nfft=nfft,
window_length=window_length,
window_step=window_step,
power=2,
layout=layout)
def define_graph(self):
read, _ = self.input()
audio, rate = self.decode(read)
if self.fft.device == "gpu":
audio = audio.gpu()
spec = self.fft(audio)
return spec
class AudioSpectrogramPythonPipeline(Pipeline):
def __init__(self, batch_size, nfft, window_length, window_step,
num_threads=1, device_id=0, spectrogram_func=spectrogram_func_librosa,
layout="ft"):
super(AudioSpectrogramPythonPipeline, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.input = ops.readers.File(device="cpu", file_root=audio_files)
self.decode = ops.decoders.Audio(device="cpu", dtype=types.FLOAT, downmix=True)
function = partial(spectrogram_func, nfft, window_length, window_step, None)
self.spectrogram = ops.PythonFunction(function=function, output_layouts=["ft"])
self.layout = layout
def define_graph(self):
read, _ = self.input()
audio, rate = self.decode(read)
out = self.spectrogram(audio)
if self.layout == "tf":
out = dali.fn.transpose(out, perm=[1,0], transpose_layout=True)
return out
def check_operator_decoder_and_spectrogram_vs_python(device, batch_size, nfft, window_length, window_step, layout):
compare_pipelines(
AudioSpectrogramPipeline(device=device, batch_size=batch_size,
nfft=nfft, window_length=window_length, window_step=window_step, layout=layout),
AudioSpectrogramPythonPipeline(batch_size, nfft=nfft,
window_length=window_length, window_step=window_step, layout=layout),
batch_size=batch_size, N_iterations=5, eps=1e-04)
def test_operator_decoder_and_spectrogram():
for device in ["cpu", "gpu"]:
for layout in ["tf", "ft"]:
for batch_size in [3]:
for nfft, window_length, window_step, shape in [(256, 256, 128, (1, 4096)),
(256, 256, 128, (4096,)),
(256, 256, 128, (4096, 1)),
(256, 256, 128, (1, 1, 4096, 1)),
(16, 16, 8, (1, 1000)),
(10, 10, 5, (1, 1000)),
]:
yield check_operator_decoder_and_spectrogram_vs_python, device, batch_size, \
nfft, window_length, window_step, layout
|
the-stack_106_21188
|
from numpy import arcsin, exp
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : SlotM10
A SlotM10 object
Returns
-------
point_dict: dict
A dict of the slot coordinates
"""
Rbo = self.get_Rbo()
point_dict = dict()
# alpha is the angle to rotate Z0 so ||Z1,Z10|| = W0
alpha = float(arcsin(self.W0 / (2 * Rbo)))
Z1 = Rbo * exp(-1j * alpha)
if self.is_outwards():
Z2 = Z1 + self.H0
else: # inward slot
Z2 = Z1 - self.H0
ZM1 = Z2.real - 1j * self.Wmag / 2
if self.is_outwards():
ZM2 = ZM1 - self.Hmag
else: # inward slot
ZM2 = ZM1 + self.Hmag
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["ZM1"] = ZM1
point_dict["ZM2"] = ZM2
# symetry
point_dict["Z3"] = Z2.conjugate()
point_dict["Z4"] = Z1.conjugate()
point_dict["ZM3"] = ZM2.conjugate()
point_dict["ZM4"] = ZM1.conjugate()
return point_dict
|
the-stack_106_21189
|
"""
Sensor for Inter RAO cabinet.
Retrieves indications regarding current state of accounts.
"""
import logging
import re
from datetime import date, datetime
from enum import IntEnum
from typing import (
Any,
ClassVar,
Dict,
Final,
Hashable,
Mapping,
Optional,
TypeVar,
Union,
)
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SERVICE,
CONF_DESCRIPTION,
STATE_LOCKED,
STATE_OK,
STATE_PROBLEM,
STATE_UNKNOWN,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import slugify
from custom_components.lkcomu_interrao._base import (
LkcomuInterRAOEntity,
SupportedServicesType,
make_common_async_setup_entry,
)
from custom_components.lkcomu_interrao._encoders import invoice_to_attrs, payment_to_attrs
from custom_components.lkcomu_interrao._util import with_auto_auth
from custom_components.lkcomu_interrao.const import (
ATTR_ACCOUNT_CODE,
ATTR_ACCOUNT_ID,
ATTR_ADDRESS,
ATTR_BENEFITS,
ATTR_CALL_PARAMS,
ATTR_CHARGED,
ATTR_COMMENT,
ATTR_DESCRIPTION,
ATTR_END,
ATTR_FULL_NAME,
ATTR_IGNORE_INDICATIONS,
ATTR_IGNORE_PERIOD,
ATTR_INCREMENTAL,
ATTR_INDICATIONS,
ATTR_INITIAL,
ATTR_INSTALL_DATE,
ATTR_INSURANCE,
ATTR_INVOICE_ID,
ATTR_LAST_INDICATIONS_DATE,
ATTR_LIVING_AREA,
ATTR_METER_CATEGORY,
ATTR_METER_CODE,
ATTR_METER_MODEL,
ATTR_MODEL,
ATTR_PAID,
ATTR_PENALTY,
ATTR_PERIOD,
ATTR_PREVIOUS,
ATTR_PROVIDER_NAME,
ATTR_PROVIDER_TYPE,
ATTR_REASON,
ATTR_REMAINING_DAYS,
ATTR_RESULT,
ATTR_SERVICE_NAME,
ATTR_SERVICE_TYPE,
ATTR_START,
ATTR_STATUS,
ATTR_SUBMIT_PERIOD_ACTIVE,
ATTR_SUBMIT_PERIOD_END,
ATTR_SUBMIT_PERIOD_START,
ATTR_SUCCESS,
ATTR_SUM,
ATTR_TOTAL,
ATTR_TOTAL_AREA,
CONF_ACCOUNTS,
CONF_DEV_PRESENTATION,
CONF_LAST_INVOICE,
CONF_LOGOS,
CONF_METERS,
DATA_PROVIDER_LOGOS,
DOMAIN,
FORMAT_VAR_ID,
FORMAT_VAR_TYPE_EN,
FORMAT_VAR_TYPE_RU,
)
from inter_rao_energosbyt.exceptions import EnergosbytException
from inter_rao_energosbyt.interfaces import (
AbstractAccountWithBalance,
AbstractAccountWithInvoices,
AbstractAccountWithMeters,
AbstractAccountWithPayments,
AbstractBalance,
AbstractCalculatableMeter,
AbstractInvoice,
AbstractMeter,
AbstractSubmittableMeter,
Account,
)
from inter_rao_energosbyt.presets.byt import AccountWithBytInfo, BytInfoSingle
from inter_rao_energosbyt.util import process_start_end_arguments
_LOGGER = logging.getLogger(__name__)
RE_HTML_TAGS = re.compile(r"<[^<]+?>")
RE_MULTI_SPACES = re.compile(r"\s{2,}")
INDICATIONS_MAPPING_SCHEMA = vol.Schema(
{
vol.Required(vol.Match(r"t\d+")): cv.positive_float,
}
)
INDICATIONS_SEQUENCE_SCHEMA = vol.All(
vol.Any(vol.All(cv.positive_float, cv.ensure_list), [cv.positive_float]),
lambda x: dict(map(lambda y: ("t" + str(y[0]), y[1]), enumerate(x, start=1))),
)
CALCULATE_PUSH_INDICATIONS_SCHEMA = vol.All(
cv.deprecated("notification"),
cv.make_entity_service_schema({
vol.Required(ATTR_INDICATIONS): vol.Any(
vol.All(
cv.string, lambda x: list(map(str.strip, x.split(","))), INDICATIONS_SEQUENCE_SCHEMA
),
INDICATIONS_MAPPING_SCHEMA,
INDICATIONS_SEQUENCE_SCHEMA,
),
vol.Optional(ATTR_IGNORE_PERIOD, default=False): cv.boolean,
vol.Optional(ATTR_IGNORE_INDICATIONS, default=False): cv.boolean,
vol.Optional(ATTR_INCREMENTAL, default=False): cv.boolean,
vol.Optional("notification", default=None): lambda x: x,
})
)
SERVICE_PUSH_INDICATIONS: Final = "push_indications"
SERVICE_PUSH_INDICATIONS_SCHEMA: Final = CALCULATE_PUSH_INDICATIONS_SCHEMA
SERVICE_CALCULATE_INDICATIONS: Final = "calculate_indications"
SERVICE_CALCULATE_INDICATIONS_SCHEMA: Final = CALCULATE_PUSH_INDICATIONS_SCHEMA
_SERVICE_SCHEMA_BASE_DATED: Final = {
vol.Optional(ATTR_START, default=None): vol.Any(vol.Equal(None), cv.datetime),
vol.Optional(ATTR_END, default=None): vol.Any(vol.Equal(None), cv.datetime),
}
FEATURE_PUSH_INDICATIONS: Final = 1
FEATURE_CALCULATE_INDICATIONS: Final = FEATURE_PUSH_INDICATIONS * 2
FEATURE_GET_PAYMENTS: Final = FEATURE_CALCULATE_INDICATIONS * 2
FEATURE_GET_INVOICES: Final = FEATURE_GET_PAYMENTS * 2
SERVICE_SET_DESCRIPTION: Final = "set_description"
SERVICE_GET_PAYMENTS: Final = "get_payments"
SERVICE_GET_INVOICES: Final = "get_invoices"
_TLkcomuInterRAOEntity = TypeVar("_TLkcomuInterRAOEntity", bound=LkcomuInterRAOEntity)
def get_supported_features(from_services: SupportedServicesType, for_object: Any) -> int:
features = 0
for type_feature, services in from_services.items():
if type_feature is None:
continue
check_cls, feature = type_feature
if isinstance(for_object, check_cls):
features |= feature
return features
class LkcomuAccount(LkcomuInterRAOEntity[Account]):
"""The class for this sensor"""
config_key: ClassVar[str] = CONF_ACCOUNTS
_supported_services: ClassVar[SupportedServicesType] = {
None: {
"set_description": {
vol.Optional(CONF_DESCRIPTION): vol.Any(vol.Equal(None), cv.string),
},
},
(AbstractAccountWithInvoices, FEATURE_GET_INVOICES): {
"get_invoices": _SERVICE_SCHEMA_BASE_DATED,
},
(AbstractAccountWithPayments, FEATURE_GET_PAYMENTS): {
"get_payments": _SERVICE_SCHEMA_BASE_DATED,
},
}
def __init__(self, *args, balance: Optional[AbstractBalance] = None, **kwargs) -> None:
super().__init__(*args, *kwargs)
self._balance = balance
self.entity_id: Optional[str] = f"sensor." + slugify(
f"{self.account_provider_code or 'unknown'}_{self._account.code}_account"
)
@property
def entity_picture(self) -> Optional[str]:
if not self._account_config[CONF_LOGOS]:
return None
logos = self.hass.data.get(DATA_PROVIDER_LOGOS)
if not logos:
return None
account_provider_code = self.account_provider_code
if account_provider_code is None:
return None
provider_logo = logos.get(account_provider_code)
if isinstance(provider_logo, str):
return provider_logo
return None
@property
def code(self) -> str:
return self._account.code
@property
def device_class(self) -> Optional[str]:
return DOMAIN + "_account"
@property
def unique_id(self) -> str:
"""Return the unique ID of the sensor"""
acc = self._account
return f"{acc.api.__class__.__name__}_account_{acc.id}"
@property
def state(self) -> Union[str, float]:
if self._account.is_locked:
return STATE_PROBLEM
balance = self._balance
if balance is not None:
if self._account_config[CONF_DEV_PRESENTATION]:
return ("-" if (balance.balance or 0.0) < 0.0 else "") + "#####.###"
return round(balance.balance or 0.0, 2) # fixes -0.0 issues
return STATE_UNKNOWN
@property
def icon(self) -> str:
return "mdi:lightning-bolt-circle"
@property
def unit_of_measurement(self) -> Optional[str]:
return "руб."
@property
def sensor_related_attributes(self) -> Optional[Mapping[str, Any]]:
account = self._account
service_type_value = account.service_type
service_type = (
service_type_value.name.lower()
if isinstance(service_type_value, IntEnum)
else str(service_type_value)
)
provider_type_value = account.provider_type
provider_type = (
provider_type_value.name.lower()
if isinstance(provider_type_value, IntEnum)
else str(provider_type_value)
)
attributes = {
ATTR_ADDRESS: account.address,
ATTR_DESCRIPTION: account.description,
ATTR_PROVIDER_TYPE: provider_type,
ATTR_PROVIDER_NAME: account.provider_name,
ATTR_SERVICE_TYPE: service_type,
ATTR_SERVICE_NAME: account.service_name,
}
if account.is_locked:
attributes[ATTR_STATUS] = STATE_LOCKED
attributes[ATTR_REASON] = account.lock_reason
else:
attributes[ATTR_STATUS] = STATE_OK
if isinstance(account, AccountWithBytInfo):
info = account.info
if info:
attributes.update(
{
ATTR_FULL_NAME: info.full_name,
ATTR_LIVING_AREA: info.living_area,
ATTR_TOTAL_AREA: info.total_area,
ATTR_METER_CATEGORY: info.meter_category,
ATTR_METER_CODE: info.meter_code,
}
)
zones = account.info.zones
if zones is not None:
for zone_id, zone_def in zones.items():
attrs = ("name", "description", "tariff")
for prefix in ("", "within_"):
values = tuple(getattr(zone_def, prefix + attr) for attr in attrs)
if any(values):
attributes.update(
zip(
map(lambda x: f"zone_{zone_id}_{prefix}{x}", attrs),
values,
)
)
if isinstance(info, BytInfoSingle):
attributes[ATTR_METER_MODEL] = info.meter_model
self._handle_dev_presentation(
attributes,
(),
(
ATTR_DESCRIPTION,
ATTR_FULL_NAME,
ATTR_ADDRESS,
ATTR_LIVING_AREA,
ATTR_TOTAL_AREA,
ATTR_METER_MODEL,
ATTR_METER_CODE,
),
)
return attributes
@property
def name_format_values(self) -> Mapping[str, Any]:
"""Return the name of the sensor"""
account = self._account
return {
FORMAT_VAR_ID: str(account.id),
FORMAT_VAR_TYPE_EN: "account",
FORMAT_VAR_TYPE_RU: "лицевой счёт",
}
#################################################################################
# Functional implementation of inherent class
#################################################################################
@classmethod
async def async_refresh_accounts(
cls,
entities: Dict[Hashable, _TLkcomuInterRAOEntity],
account: "Account",
config_entry: ConfigEntry,
account_config: ConfigType,
):
entity_key = account.id
try:
entity = entities[entity_key]
except KeyError:
entity = cls(account, account_config)
entities[entity_key] = entity
return [entity]
else:
if entity.enabled:
entity.async_schedule_update_ha_state(force_refresh=True)
async def async_update_internal(self) -> None:
await self._account.async_update_related()
account = self._account
if isinstance(account, AbstractAccountWithBalance):
self._balance = await account.async_get_balance()
if isinstance(account, AccountWithBytInfo):
await account.async_update_info()
self.register_supported_services(account)
#################################################################################
# Services callbacks
#################################################################################
@property
def supported_features(self) -> int:
return get_supported_features(
self._supported_services,
self._account,
)
async def async_service_get_payments(self, **call_data):
account = self._account
_LOGGER.info(self.log_prefix + "Begin handling payments retrieval")
if not isinstance(account, AbstractAccountWithPayments):
raise ValueError("account does not support payments retrieval")
dt_start: Optional["datetime"] = call_data[ATTR_START]
dt_end: Optional["datetime"] = call_data[ATTR_END]
dt_start, dt_end = process_start_end_arguments(dt_start, dt_end)
results = []
event_data = {
ATTR_ACCOUNT_CODE: account.code,
ATTR_ACCOUNT_ID: account.id,
ATTR_SUCCESS: False,
ATTR_START: dt_start.isoformat(),
ATTR_END: dt_end.isoformat(),
ATTR_RESULT: results,
ATTR_COMMENT: None,
ATTR_SUM: 0.0,
}
try:
payments = await with_auto_auth(
account.api,
account.async_get_payments,
dt_start,
dt_end,
)
for payment in payments:
event_data[ATTR_SUM] += payment.amount
results.append(payment_to_attrs(payment))
except BaseException as e:
event_data[ATTR_COMMENT] = "Unknown error: %r" % e
_LOGGER.exception(event_data[ATTR_COMMENT])
raise
else:
event_data[ATTR_SUCCESS] = True
finally:
self.hass.bus.async_fire(
event_type=DOMAIN + "_" + SERVICE_GET_PAYMENTS,
event_data=event_data,
)
_LOGGER.info(self.log_prefix + "Finish handling payments retrieval")
async def async_service_get_invoices(self, **call_data):
account = self._account
_LOGGER.info(self.log_prefix + "Begin handling invoices retrieval")
if not isinstance(account, AbstractAccountWithInvoices):
raise ValueError("account does not support invoices retrieval")
dt_start: Optional["datetime"] = call_data[ATTR_START]
dt_end: Optional["datetime"] = call_data[ATTR_END]
dt_start, dt_end = process_start_end_arguments(dt_start, dt_end)
results = []
event_data = {
ATTR_ACCOUNT_CODE: account.code,
ATTR_ACCOUNT_ID: account.id,
ATTR_SUCCESS: False,
ATTR_START: dt_start.isoformat(),
ATTR_END: dt_end.isoformat(),
ATTR_RESULT: results,
ATTR_COMMENT: None,
ATTR_SUM: 0.0,
}
try:
invoices = await with_auto_auth(
account.api,
account.async_get_invoices,
dt_start,
dt_end,
)
for invoice in invoices:
event_data[ATTR_SUM] += invoice.total
results.append(invoice_to_attrs(invoice))
except BaseException as e:
event_data[ATTR_COMMENT] = "Unknown error: %r" % e
_LOGGER.exception(event_data[ATTR_COMMENT])
raise
else:
event_data[ATTR_SUCCESS] = True
finally:
self.hass.bus.async_fire(
event_type=DOMAIN + "_" + SERVICE_GET_INVOICES,
event_data=event_data,
)
_LOGGER.info(self.log_prefix + "Finish handling invoices retrieval")
async def async_service_set_description(self, **call_data):
account = self._account
_LOGGER.info(self.log_prefix + "Begin handling description setting")
event_data = {
ATTR_ACCOUNT_CODE: account.code,
ATTR_ACCOUNT_ID: account.id,
ATTR_SUCCESS: False,
ATTR_DESCRIPTION: call_data.get(CONF_DESCRIPTION),
ATTR_PREVIOUS: account.description,
}
try:
await with_auto_auth(
account.api,
account.async_set_description,
description=event_data[ATTR_DESCRIPTION],
update=False,
)
except EnergosbytException as e:
event_data[ATTR_COMMENT] = "Error: %s" % e
raise
except Exception as e:
event_data[ATTR_COMMENT] = "Unknown error: %s" % e
_LOGGER.exception("Unknown error: %s", e)
raise
else:
event_data[ATTR_COMMENT] = "Successful calculation"
event_data[ATTR_SUCCESS] = True
self.async_schedule_update_ha_state(force_refresh=True)
finally:
self.hass.bus.async_fire(
event_type=DOMAIN + "_" + SERVICE_SET_DESCRIPTION,
event_data=event_data,
)
_LOGGER.info(self.log_prefix + "End handling indications calculation")
class LkcomuMeter(LkcomuInterRAOEntity[AbstractAccountWithMeters]):
"""The class for this sensor"""
config_key: ClassVar[str] = CONF_METERS
_supported_services: ClassVar[SupportedServicesType] = {
(AbstractSubmittableMeter, FEATURE_PUSH_INDICATIONS): {
"push_indications": SERVICE_PUSH_INDICATIONS_SCHEMA,
},
(AbstractCalculatableMeter, FEATURE_CALCULATE_INDICATIONS): {
"calculate_indications": SERVICE_CALCULATE_INDICATIONS_SCHEMA,
},
}
def __init__(self, *args, meter: AbstractMeter, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._meter = meter
self.entity_id: Optional[str] = f"sensor." + slugify(
f"{self.account_provider_code or 'unknown'}_{self._account.code}_meter_{self.code}"
)
#################################################################################
# Implementation base of inherent class
#################################################################################
@classmethod
async def async_refresh_accounts(
cls,
entities: Dict[Hashable, Optional[_TLkcomuInterRAOEntity]],
account: "Account",
config_entry: ConfigEntry,
account_config: ConfigType,
):
new_meter_entities = []
if isinstance(account, AbstractAccountWithMeters):
meters = await account.async_get_meters()
for meter_id, meter in meters.items():
entity_key = (account.id, meter_id)
try:
entity = entities[entity_key]
except KeyError:
entity = cls(
account,
account_config,
meter=meter,
)
entities[entity_key] = entity
new_meter_entities.append(entity)
else:
if entity.enabled:
entity.async_schedule_update_ha_state(force_refresh=True)
return new_meter_entities if new_meter_entities else None
async def async_update_internal(self) -> None:
meters = await self._account.async_get_meters()
meter = meters.get(self._meter.id)
if meter is None:
self.hass.async_create_task(self.async_remove())
else:
self.register_supported_services(meter)
self._meter = meter
#################################################################################
# Data-oriented implementation of inherent class
#################################################################################
@property
def code(self) -> str:
return self._meter.code
@property
def unique_id(self) -> str:
"""Return the unique ID of the sensor"""
met = self._meter
acc = met.account
return f"{acc.api.__class__.__name__}_meter_{acc.id}_{met.id}"
@property
def state(self) -> str:
return self._meter.status or STATE_OK
@property
def icon(self):
return "mdi:counter"
@property
def device_class(self) -> Optional[str]:
return DOMAIN + "_meter"
@property
def supported_features(self) -> int:
meter = self._meter
return (
isinstance(meter, AbstractSubmittableMeter) * FEATURE_PUSH_INDICATIONS
| isinstance(meter, AbstractCalculatableMeter) * FEATURE_CALCULATE_INDICATIONS
)
@property
def sensor_related_attributes(self) -> Optional[Mapping[str, Any]]:
met = self._meter
attributes = {
ATTR_METER_CODE: met.code,
ATTR_ACCOUNT_CODE: met.account.code,
}
# Meter model attribute
model = met.model
if model:
attributes[ATTR_MODEL] = model
# Installation date attribute
install_date = met.installation_date
if install_date:
attributes[ATTR_INSTALL_DATE] = install_date.isoformat()
# Submission periods attributes
is_submittable = False
if isinstance(met, AbstractSubmittableMeter):
is_submittable = True # this weird hack calms my IDE
# noinspection PyUnresolvedReferences
today = date.today()
start_date, end_date = met.submission_period
attributes[ATTR_SUBMIT_PERIOD_START] = start_date.isoformat()
attributes[ATTR_SUBMIT_PERIOD_END] = end_date.isoformat()
attributes[ATTR_SUBMIT_PERIOD_ACTIVE] = start_date <= today <= end_date
if date.today() >= end_date:
remaining_days = 0
elif date.today() >= start_date:
remaining_days = (end_date - today).days
else:
remaining_days = (start_date - today).days
attributes[ATTR_REMAINING_DAYS] = remaining_days
last_indications_date = met.last_indications_date
attributes[ATTR_LAST_INDICATIONS_DATE] = (
None if last_indications_date is None else last_indications_date.isoformat()
)
# Add zone information
for zone_id, zone_def in met.zones.items():
iterator = [
("name", zone_def.name),
("last_indication", zone_def.last_indication or 0.0),
("today_indication", zone_def.today_indication),
]
if is_submittable:
submitted_indication = zone_def.today_indication
if submitted_indication is None and last_indications_date is not None:
# noinspection PyUnboundLocalVariable
if start_date <= last_indications_date <= end_date:
submitted_indication = zone_def.last_indication or 0.0
iterator.append(("period_indication", submitted_indication))
for attribute, value in iterator:
attributes[f"zone_{zone_id}_{attribute}"] = value
self._handle_dev_presentation(
attributes,
(),
(
ATTR_METER_CODE,
ATTR_INSTALL_DATE,
ATTR_LAST_INDICATIONS_DATE,
*filter(lambda x: x.endswith("_indication"), attributes.keys()),
),
)
return attributes
@property
def name_format_values(self) -> Mapping[str, Any]:
meter = self._meter
return {
FORMAT_VAR_ID: meter.id or "<unknown>",
FORMAT_VAR_TYPE_EN: "meter",
FORMAT_VAR_TYPE_RU: "счётчик",
}
#################################################################################
# Additional functionality
#################################################################################
def _fire_callback_event(
self, call_data: Mapping[str, Any], event_data: Mapping[str, Any], event_id: str, title: str
):
meter = self._meter
hass = self.hass
comment = event_data.get(ATTR_COMMENT)
if comment is not None:
message = str(comment)
comment = "Response comment: " + str(comment)
else:
comment = "Response comment not provided"
message = comment
_LOGGER.log(
logging.INFO if event_data.get(ATTR_SUCCESS) else logging.ERROR,
RE_MULTI_SPACES.sub(" ", RE_HTML_TAGS.sub("", comment)),
)
meter_code = meter.code
event_data = {
ATTR_ENTITY_ID: self.entity_id,
ATTR_METER_CODE: meter_code,
ATTR_CALL_PARAMS: dict(call_data),
ATTR_SUCCESS: False,
ATTR_INDICATIONS: None,
ATTR_COMMENT: None,
**event_data,
}
_LOGGER.debug("Firing event '%s' with post_fields: %s" % (event_id, event_data))
hass.bus.async_fire(event_type=event_id, event_data=event_data)
def _get_real_indications(self, call_data: Mapping) -> Mapping[str, Union[int, float]]:
indications: Mapping[str, Union[int, float]] = call_data[ATTR_INDICATIONS]
meter_zones = self._meter.zones
for zone_id, new_value in indications.items():
if zone_id not in meter_zones:
raise ValueError(f"meter zone {zone_id} does not exist")
if call_data[ATTR_INCREMENTAL]:
return {
zone_id: (
(
meter_zones[zone_id].today_indication
or meter_zones[zone_id].last_indication
or 0
)
+ new_value
)
for zone_id, new_value in indications.items()
}
return indications
async def async_service_push_indications(self, **call_data):
"""
Push indications entity service.
:param call_data: Parameters for service call
:return:
"""
_LOGGER.info(self.log_prefix + "Begin handling indications submission")
meter = self._meter
if meter is None:
raise Exception("Meter is unavailable")
meter_code = meter.code
if not isinstance(meter, AbstractSubmittableMeter):
raise Exception("Meter '%s' does not support indications submission" % (meter_code,))
else:
event_data = {}
try:
indications = self._get_real_indications(call_data)
event_data[ATTR_INDICATIONS] = dict(indications)
await with_auto_auth(
meter.account.api,
meter.async_submit_indications,
**indications,
ignore_periods=call_data[ATTR_IGNORE_PERIOD],
ignore_values=call_data[ATTR_IGNORE_INDICATIONS],
)
except EnergosbytException as e:
event_data[ATTR_COMMENT] = "API error: %s" % e
raise
except BaseException as e:
event_data[ATTR_COMMENT] = "Unknown error: %r" % e
_LOGGER.error(event_data[ATTR_COMMENT])
raise
else:
event_data[ATTR_COMMENT] = "Indications submitted successfully"
event_data[ATTR_SUCCESS] = True
self.async_schedule_update_ha_state(force_refresh=True)
finally:
self._fire_callback_event(
call_data,
event_data,
DOMAIN + "_" + SERVICE_PUSH_INDICATIONS,
"Передача показаний",
)
_LOGGER.info(self.log_prefix + "End handling indications submission")
async def async_service_calculate_indications(self, **call_data):
meter = self._meter
if meter is None:
raise Exception("Meter is unavailable")
meter_code = meter.code
_LOGGER.info(self.log_prefix + "Begin handling indications calculation")
if not isinstance(meter, AbstractCalculatableMeter):
raise Exception("Meter '%s' does not support indications calculation" % (meter_code,))
event_data = {ATTR_CHARGED: None, ATTR_SUCCESS: False}
try:
indications = self._get_real_indications(call_data)
event_data[ATTR_INDICATIONS] = dict(indications)
calculation = await with_auto_auth(
meter.account.api,
meter.async_calculate_indications,
**indications,
ignore_periods=call_data[ATTR_IGNORE_PERIOD],
ignore_values=call_data[ATTR_IGNORE_INDICATIONS],
)
except EnergosbytException as e:
event_data[ATTR_COMMENT] = "Error: %s" % e
raise
except BaseException as e:
event_data[ATTR_COMMENT] = "Unknown error: %r" % e
_LOGGER.exception(event_data[ATTR_COMMENT])
raise
else:
event_data[ATTR_CHARGED] = float(calculation)
event_data[ATTR_COMMENT] = "Successful calculation"
event_data[ATTR_SUCCESS] = True
self.async_schedule_update_ha_state(force_refresh=True)
finally:
self._fire_callback_event(
call_data,
event_data,
DOMAIN + "_" + SERVICE_CALCULATE_INDICATIONS,
"Подсчёт показаний",
)
_LOGGER.info(self.log_prefix + "End handling indications calculation")
class LkcomuLastInvoice(LkcomuInterRAOEntity[AbstractAccountWithInvoices]):
config_key = CONF_LAST_INVOICE
def __init__(self, *args, last_invoice: Optional["AbstractInvoice"] = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._last_invoice = last_invoice
self.entity_id: Optional[str] = "sensor." + slugify(
f"{self.account_provider_code or 'unknown'}_{self._account.code}_last_invoice"
)
@property
def code(self) -> str:
return self._account.code
@property
def device_class(self) -> Optional[str]:
return DOMAIN + "_invoice"
@property
def unique_id(self) -> str:
"""Return the unique ID of the sensor"""
acc = self._account
return f"{acc.api.__class__.__name__}_lastinvoice_{acc.id}"
@property
def state(self) -> Union[float, str]:
invoice = self._last_invoice
if invoice:
if self._account_config[CONF_DEV_PRESENTATION]:
return ("-" if (invoice.total or 0.0) < 0.0 else "") + "#####.###"
return round(invoice.total or 0.0, 2)
return STATE_UNKNOWN
@property
def icon(self) -> str:
return "mdi:receipt"
@property
def unit_of_measurement(self) -> str:
return "руб." if self._last_invoice else None
@property
def sensor_related_attributes(self):
invoice = self._last_invoice
if invoice:
attributes = invoice_to_attrs(invoice)
self._handle_dev_presentation(
attributes,
(ATTR_PERIOD, ATTR_INVOICE_ID),
(
ATTR_TOTAL,
ATTR_PAID,
ATTR_INITIAL,
ATTR_CHARGED,
ATTR_INSURANCE,
ATTR_BENEFITS,
ATTR_PENALTY,
ATTR_SERVICE,
),
)
return attributes
return {}
@property
def name_format_values(self) -> Mapping[str, Any]:
invoice = self._last_invoice
return {
FORMAT_VAR_ID: invoice.id if invoice else "<?>",
FORMAT_VAR_TYPE_EN: "last invoice",
FORMAT_VAR_TYPE_RU: "последняя квитанция",
}
@classmethod
async def async_refresh_accounts(
cls,
entities: Dict[Hashable, _TLkcomuInterRAOEntity],
account: "Account",
config_entry: ConfigEntry,
account_config: ConfigType,
):
entity_key = account.id
if isinstance(account, AbstractAccountWithInvoices):
try:
entity = entities[entity_key]
except KeyError:
entity = cls(account, account_config)
entities[entity_key] = entity
return [entity]
else:
if entity.enabled:
await entity.async_update_ha_state(force_refresh=True)
return None
async def async_update_internal(self) -> None:
self._last_invoice = await self._account.async_get_last_invoice()
async_setup_entry = make_common_async_setup_entry(
LkcomuAccount,
LkcomuLastInvoice,
LkcomuMeter,
)
|
the-stack_106_21190
|
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox(
firefox_binary="C:\\Program Files\\Mozilla Firefox\\firefox.exe"
)
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unknown browser {}".format(browser))
self.base_url = base_url
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
def open_addressbook(self):
self.wd.get(self.base_url)
def destroy(self):
self.wd.quit()
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
|
the-stack_106_21191
|
# FSM Simulation
edges = {(1, 'a') : 2,
(2, 'a') : 2,
(2, '1') : 3,
(3, '1') : 3}
accepting = [3]
def fsmsim(string, current, edges, accepting):
if string == "":
return current in accepting
else:
letter = string[0]
currentState = (current, letter)
if currentState in edges:
nextState = edges[currentState]
return fsmsim(string[1:], nextState, edges, accepting)
else:
return False
# QUIZ: You fill this out!
# Is there a valid edge?
# If so, take it.
# If not, return False.
# Hint: recursion.
print (fsmsim("aaa111",1,edges,accepting))
# >>> True
|
the-stack_106_21192
|
from __future__ import division
import logging
import numpy as np
from smqtk.algorithms.nn_index.lsh.functors import LshFunctor
from smqtk.representation.descriptor_element import elements_to_matrix
from smqtk.utils.bin_utils import report_progress
class SimpleRPFunctor (LshFunctor):
"""
This class is meant purely as a baseline comparison for other
LshFunctors and NNIndex plugins. It is not meant to be used in
production, as it is unlikely to produce a quality index.
"""
@classmethod
def is_usable(cls):
return True
def __init__(self, bit_length=8, normalize=None, random_seed=None):
super(SimpleRPFunctor, self).__init__()
self.bit_length = bit_length
self.normalize = normalize
self.random_seed = random_seed
# Model components
self.rps = None
self.mean_vec = None
def _norm_vector(self, v):
"""
Class standard array normalization. Normalized along max dimension
(a=0 for a 1D array, a=1 for a 2D array, etc.).
:param v: Vector to normalize
:type v: numpy.ndarray
:return: Returns the normalized version of input array ``v``.
:rtype: numpy.ndarray
"""
vm = v - self.mean_vec
if self.normalize is None:
# Normalization off
return vm
n = np.linalg.norm(vm, ord=self.normalize, axis=-1, keepdims=True)
with np.errstate(divide='ignore'):
return np.nan_to_num(vm/n)
def get_config(self):
return {
"bit_length": self.bit_length,
"normalize": self.normalize,
"random_seed": self.random_seed,
}
def has_model(self):
return self.mean_vec is not None
def fit(self, descriptors, use_multiprocessing=True):
"""
Fit the ITQ model given the input set of descriptors
:param descriptors: Iterable of ``DescriptorElement`` vectors to fit
the model to.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
:param use_multiprocessing: If multiprocessing should be used, as
opposed to threading, for collecting descriptor vectors from the
provided iterable.
:type use_multiprocessing: bool
:raises RuntimeError: There is already a model loaded
:return: Matrix hash codes for provided descriptors in order.
:rtype: numpy.ndarray[bool]
"""
if self.has_model():
raise RuntimeError("Model components have already been loaded.")
dbg_report_interval = None
if self.get_logger().getEffectiveLevel() <= logging.DEBUG:
dbg_report_interval = 1.0 # seconds
if not hasattr(descriptors, "__len__"):
self._log.info("Creating sequence from iterable")
descriptors_l = []
rs = [0]*7
for d in descriptors:
descriptors_l.append(d)
report_progress(self._log.debug, rs, dbg_report_interval)
descriptors = descriptors_l
self._log.info("Creating matrix of descriptors for fitting")
x = elements_to_matrix(
descriptors, report_interval=dbg_report_interval,
use_multiprocessing=use_multiprocessing)
self._log.debug("descriptor matrix shape: %s", x.shape)
n, dim = x.shape
self._log.debug("Generating random projections")
np.random.seed(self.random_seed)
self.rps = np.random.randn(dim, self.bit_length)
self._log.debug("Info normalizing descriptors with norm type: %s",
self.normalize)
return self.get_hash(x)
def get_hash(self, descriptor):
if self.rps is None:
raise RuntimeError("Random projection model not constructed. Call "
"`fit` first!")
b = (self._norm_vector(descriptor).dot(self.rps) >= 0.0)
return b.squeeze()
|
the-stack_106_21194
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import falcon_server_upgrade
from resource_management import *
from resource_management.libraries.functions.version import *
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_PROPERTIES
from falcon import falcon
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
class FalconServer(Script):
def configure(self, env):
import params
env.set_params(params)
falcon('server', action='config')
def start(self, env, rolling_restart=False):
import params
env.set_params(params)
self.configure(env)
falcon('server', action='start')
def stop(self, env, rolling_restart=False):
import params
env.set_params(params)
falcon('server', action='stop')
# if performing an upgrade, backup some directories after stopping falcon
if rolling_restart:
falcon_server_upgrade.post_stop_backup()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class FalconServerLinux(FalconServer):
def get_stack_to_component(self):
return {"HDP": "falcon-server"}
def install(self, env):
import params
self.install_packages(env)
env.set_params(params)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.server_pid_file)
def pre_rolling_restart(self, env):
import params
env.set_params(params)
# this function should not execute if the version can't be determined or
# is not at least HDP 2.2.0.0
if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
return
Logger.info("Executing Falcon Server Rolling Upgrade pre-restart")
Execute(format("hdp-select set falcon-server {version}"))
falcon_server_upgrade.pre_start_restore()
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
props_value_check = {"*.falcon.authentication.type": "kerberos",
"*.falcon.http.authentication.type": "kerberos"}
props_empty_check = ["*.falcon.service.authentication.kerberos.principal",
"*.falcon.service.authentication.kerberos.keytab",
"*.falcon.http.authentication.kerberos.principal",
"*.falcon.http.authentication.kerberos.keytab"]
props_read_check = ["*.falcon.service.authentication.kerberos.keytab",
"*.falcon.http.authentication.kerberos.keytab"]
falcon_startup_props = build_expectations('startup', props_value_check, props_empty_check,
props_read_check)
falcon_expectations ={}
falcon_expectations.update(falcon_startup_props)
security_params = get_params_from_filesystem('/etc/falcon/conf',
{'startup.properties': FILE_TYPE_PROPERTIES})
result_issues = validate_security_config_properties(security_params, falcon_expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if ( 'startup' not in security_params
or '*.falcon.service.authentication.kerberos.keytab' not in security_params['startup']
or '*.falcon.service.authentication.kerberos.principal' not in security_params['startup']) \
or '*.falcon.http.authentication.kerberos.keytab' not in security_params['startup'] \
or '*.falcon.http.authentication.kerberos.principal' not in security_params['startup']:
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out(
{"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.falcon_user,
security_params['startup']['*.falcon.service.authentication.kerberos.keytab'],
security_params['startup']['*.falcon.service.authentication.kerberos.principal'],
status_params.hostname,
status_params.tmp_dir)
cached_kinit_executor(status_params.kinit_path_local,
status_params.falcon_user,
security_params['startup']['*.falcon.http.authentication.kerberos.keytab'],
security_params['startup']['*.falcon.http.authentication.kerberos.principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class FalconServerWindows(FalconServer):
def install(self, env):
import params
if not check_windows_service_exists(params.falcon_win_service_name):
self.install_packages(env)
def status(self, env):
import status_params
check_windows_service_status(status_params.falcon_win_service_name)
if __name__ == "__main__":
FalconServer().execute()
|
the-stack_106_21195
|
# -*- coding: utf-8 -*-
import json
import logging
import re
from ..utils.crawler import Crawler
logger = logging.getLogger(__name__)
search_url = 'https://www.royalroad.com/fictions/search?keyword=%s'
class RoyalRoadCrawler(Crawler):
base_url = 'https://www.royalroad.com/'
def search_novel(self, query):
query = query.lower().replace(' ', '+')
soup = self.get_soup(search_url % query)
results = []
for a in soup.select('h2.fiction-title a')[:5]:
url = self.absolute_url(a['href'])
results.append({
'url': url,
'title': a.text.strip(),
'info': self.search_novel_info(url),
})
# end for
return results
# end def
def search_novel_info(self, url):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', url)
soup = self.get_soup(url)
score = soup.select_one('span.star')['data-content']
chapters = len(soup.find('tbody').findAll('a', href=True))
latest = soup.find('tbody').findAll('a', href=True)[-1].text.strip()
info = 'Score: %s, Chapter count %s, Latest: %s' % (
score, chapters, latest)
return info
# end def
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_title = soup.find("h1", {"property": "name"}).text.strip()
logger.info('Novel title: %s', self.novel_title)
self.novel_cover = self.absolute_url(
soup.find("img", {"class": "img-offset thumbnail inline-block"})['src'])
logger.info('Novel cover: %s', self.novel_cover)
self.novel_author = soup.find(
"span", {"property": "name"}).text.strip()
logger.info('Novel author: %s', self.novel_author)
chapters = soup.find('tbody').findAll('a', href=True)
for x in chapters:
chap_id = len(self.chapters) + 1
if len(self.chapters) % 100 == 0:
vol_id = chap_id//100 + 1
vol_title = 'Volume ' + str(vol_id)
self.volumes.append({
'id': vol_id,
'title': vol_title,
})
# end if
self.chapters.append({
'id': chap_id,
'volume': vol_id,
'url': self.absolute_url(x['href']),
'title': x.text.strip() or ('Chapter %d' % chap_id),
})
# end for
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format.'''
logger.info('Downloading %s', chapter['url'])
soup = self.get_soup(chapter['url'])
logger.debug(soup.title.string)
if 'Chapter' in soup.select_one('h2').text:
chapter['title'] = soup.select_one('h2').text
else:
chapter['title'] = chapter['title']
# end if
contents = soup.find("div", {"class": "chapter-content"})
self.clean_contents(contents)
return str(contents)
# end def
# end class
|
the-stack_106_21196
|
# -*- coding: utf-8 -*-
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.imgmath',
'sphinx.ext.graphviz',
'rst2pdf.pdfbuilder',
]
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Foobar'
copyright = u'2009, Jason S'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.1'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# -- Options for PDF output ----------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
pdf_documents = [('index', u'index', u'index', u'lorenzo')]
# Language to be used for hyphenation support
pdf_language = "en_US"
# verbosity level. 0 1 or 2
pdf_verbosity = 0
pdf_invariant = True
pdf_real_footnotes = True
# Set a consistent date for the cover page
today = 'April 29, 2018'
|
the-stack_106_21198
|
import os
import pickle
def check_file_exists(file_path):
return os.path.exists(file_path)
def save_to_pickle(data, save_path):
with open(save_path, "wb") as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_from_pickle(load_path, encoding="latin1"):
if check_file_exists(load_path):
with open(load_path, "rb") as handle:
data = pickle._Unpickler(handle)
data.encoding = encoding
return data.load()
else:
raise Exception("File not found: " + load_path)
|
the-stack_106_21199
|
# This sample tests the scoping rules for assignment expressions
# within a list comprehension.
from typing import Tuple
def foo() -> Tuple[str, int]:
a = 3
y = 4
b = [(a := x) for x in ["1", "2"] for y in ["1", "2"]]
# The type of "y" should be int because the "y" within
# the list comprehension doesn't leak outside. On the
# other hand, "a" does leak outside the list comprehension.
return (a, y)
|
the-stack_106_21200
|
from __future__ import print_function, division
from sympy import Symbol, Integer, sympify
class PlotInterval(object):
"""
"""
_v, _v_min, _v_max, _v_steps = None, None, None, None
def require_all_args(f):
def check(self, *args, **kwargs):
for g in [self._v, self._v_min, self._v_max, self._v_steps]:
if g is None:
raise ValueError("PlotInterval is incomplete.")
return f(self, *args, **kwargs)
return check
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], PlotInterval):
self.fill_from(args[0])
return
elif isinstance(args[0], str):
try:
args = eval(args[0])
except TypeError:
s_eval_error = "Could not interpret string %s."
raise ValueError(s_eval_error % (args[0]))
elif isinstance(args[0], (tuple, list)):
args = args[0]
else:
raise ValueError("Not an interval.")
if not isinstance(args, (tuple, list)) or len(args) > 4:
f_error = "PlotInterval must be a tuple or list of length 4 or less."
raise ValueError(f_error)
args = list(args)
if len(args) > 0 and (args[0] is None or isinstance(args[0], Symbol)):
self.v = args.pop(0)
if len(args) in [2, 3]:
self.v_min = args.pop(0)
self.v_max = args.pop(0)
if len(args) == 1:
self.v_steps = args.pop(0)
elif len(args) == 1:
self.v_steps = args.pop(0)
def get_v(self):
return self._v
def set_v(self, v):
if v is None:
self._v = None
return
if not isinstance(v, Symbol):
raise ValueError("v must be a sympy Symbol.")
self._v = v
def get_v_min(self):
return self._v_min
def set_v_min(self, v_min):
if v_min is None:
self._v_min = None
return
try:
self._v_min = sympify(v_min)
float(self._v_min.evalf())
except TypeError:
raise ValueError("v_min could not be interpreted as a number.")
def get_v_max(self):
return self._v_max
def set_v_max(self, v_max):
if v_max is None:
self._v_max = None
return
try:
self._v_max = sympify(v_max)
float(self._v_max.evalf())
except TypeError:
raise ValueError("v_max could not be interpreted as a number.")
def get_v_steps(self):
return self._v_steps
def set_v_steps(self, v_steps):
if v_steps is None:
self._v_steps = None
return
if isinstance(v_steps, int):
v_steps = Integer(v_steps)
elif not isinstance(v_steps, Integer):
raise ValueError("v_steps must be an int or sympy Integer.")
if v_steps <= Integer(0):
raise ValueError("v_steps must be positive.")
self._v_steps = v_steps
@require_all_args
def get_v_len(self):
return self.v_steps + 1
v = property(get_v, set_v)
v_min = property(get_v_min, set_v_min)
v_max = property(get_v_max, set_v_max)
v_steps = property(get_v_steps, set_v_steps)
v_len = property(get_v_len)
def fill_from(self, b):
if b.v is not None:
self.v = b.v
if b.v_min is not None:
self.v_min = b.v_min
if b.v_max is not None:
self.v_max = b.v_max
if b.v_steps is not None:
self.v_steps = b.v_steps
@staticmethod
def try_parse(*args):
"""
Returns a PlotInterval if args can be interpreted
as such, otherwise None.
"""
if len(args) == 1 and isinstance(args[0], PlotInterval):
return args[0]
try:
return PlotInterval(*args)
except ValueError:
return None
def _str_base(self):
return ",".join([str(self.v), str(self.v_min),
str(self.v_max), str(self.v_steps)])
def __repr__(self):
"""
A string representing the interval in class constructor form.
"""
return "PlotInterval(%s)" % (self._str_base())
def __str__(self):
"""
A string representing the interval in list form.
"""
return "[%s]" % (self._str_base())
@require_all_args
def assert_complete(self):
pass
@require_all_args
def vrange(self):
"""
Yields v_steps+1 sympy numbers ranging from
v_min to v_max.
"""
d = (self.v_max - self.v_min) / self.v_steps
for i in xrange(self.v_steps + 1):
a = self.v_min + (d * Integer(i))
yield a
@require_all_args
def vrange2(self):
"""
Yields v_steps pairs of sympy numbers ranging from
(v_min, v_min + step) to (v_max - step, v_max).
"""
d = (self.v_max - self.v_min) / self.v_steps
a = self.v_min + (d * Integer(0))
for i in xrange(self.v_steps):
b = self.v_min + (d * Integer(i + 1))
yield a, b
a = b
def frange(self):
for i in self.vrange():
yield float(i.evalf())
|
the-stack_106_21201
|
# File to ingest an equities bundle for zipline
# Import libraries
import pandas as pd
import numpy as np
import sys
data_folder = r'C:\Users\\walte\\OneDrive - K Squared Capital\\K Squared Capital\\Trading Models\\Code\\Live Trading\\Live Trading'
from zipline.utils.calendars import get_calendar
def eu_etfs_bundle():
# Define custom ingest function
def ingest(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
cache,
show_progress,
output_dir,
start_session,
end_session):
# Read in data
# Load in pricing data
#prices, midpoint = get_data('GD')
#prices.to_csv(r'C:\Users\walte\OneDrive - K Squared Capital\K Squared Capital\Trading Models\Data\Zipline\GD EU ETFs\gd_eu_etfs_prices.csv', index = False)
#midpoint.to_csv(r'C:\Users\walte\OneDrive - K Squared Capital\K Squared Capital\Trading Models\Data\Zipline\GD EU ETFs\gd_eu_etfs_midpoint.csv', index = False)
prices = pd.read_csv(r'C:\Users\walte\OneDrive - K Squared Capital\K Squared Capital\Trading Models\Data\Zipline\GD EU ETFs\gd_eu_etfs_prices.csv')
midpoint = pd.read_csv(r'C:\Users\walte\OneDrive - K Squared Capital\K Squared Capital\Trading Models\Data\Zipline\GD EU ETFs\gd_eu_etfs_midpoint.csv')
trades = pd.read_csv(r'C:\Users\walte\OneDrive - K Squared Capital\K Squared Capital\Trading Models\Data\Zipline\GD EU ETFs\gd_eu_etfs_trades.csv')
prices.Date = pd.to_datetime(prices.Date)
prices = prices.set_index(['Date','ConId'])
midpoint.Date = pd.to_datetime(midpoint.Date)
midpoint = midpoint.set_index(['Date','ConId'])
trades.Date = pd.to_datetime(trades.Date)
trades = trades.set_index(['Date','ConId'])
lse_calendar = get_calendar('LSE')
all_sessions = lse_calendar.all_sessions
#prices = prices.sort_index(level = 0)
close_px = prices['Close'].unstack().loc['2016-01-01':]
open_px = prices['Open'].unstack().loc['2016-01-01':]
high_px = prices['High'].unstack().loc['2016-01-01':]
low_px = prices['Low'].unstack().loc['2016-01-01':]
volume = prices['Volume'].unstack().loc['2016-01-01':]
first_idx = all_sessions.get_loc(close_px.index[0])
last_idx = all_sessions.get_loc(close_px.index[-1])
cal_sessions = all_sessions[first_idx:last_idx+1]
# Load in midpoint data
mid_close_px = midpoint['Close'].unstack().loc['2016-01-01':]
mid_open_px = midpoint['Open'].unstack().loc['2016-01-01':]
mid_high_px = midpoint['High'].unstack().loc['2016-01-01':]
mid_low_px = midpoint['Low'].unstack().loc['2016-01-01':]
# Load in trades data
trd_close_px = trades['Close'].unstack().loc['2016-01-01':]
trd_open_px = trades['Open'].unstack().loc['2016-01-01':]
trd_high_px = trades['High'].unstack().loc['2016-01-01':]
trd_low_px = trades['Low'].unstack().loc['2016-01-01':]
# Reindex to calendar sessions
close_px = close_px.tz_localize('UTC').reindex(cal_sessions)
open_px = open_px.tz_localize('UTC').reindex(cal_sessions)
high_px = high_px.tz_localize('UTC').reindex(cal_sessions)
low_px = low_px.tz_localize('UTC').reindex(cal_sessions)
volume = volume.tz_localize('UTC').reindex(cal_sessions)
mid_close_px = mid_close_px.tz_localize('UTC').reindex(cal_sessions)
mid_open_px = mid_open_px.tz_localize('UTC').reindex(cal_sessions)
mid_low_px = mid_low_px.tz_localize('UTC').reindex(cal_sessions)
mid_high_px = mid_high_px.tz_localize('UTC').reindex(cal_sessions)
trd_close_px = trd_close_px.tz_localize('UTC').reindex(cal_sessions)
trd_open_px = trd_open_px.tz_localize('UTC').reindex(cal_sessions)
trd_low_px = trd_low_px.tz_localize('UTC').reindex(cal_sessions)
trd_high_px = trd_high_px.tz_localize('UTC').reindex(cal_sessions)
# Load in ETF info
etf_info = pd.read_excel(data_folder + '\Global_Defensive_EU\EU ETFs.xlsx')
ticker_map = etf_info[['Con ID','IB Ticker']].set_index('Con ID').dropna().squeeze()
ticker_map.index = ticker_map.index.astype(int)
# Rename pricing index
close_px = close_px.rename(columns = ticker_map)
open_px = open_px.rename(columns = ticker_map)
high_px = high_px.rename(columns = ticker_map)
low_px = low_px.rename(columns = ticker_map)
volume = volume.rename(columns = ticker_map)
mid_close_px = mid_close_px.rename(columns = ticker_map)
mid_open_px = mid_open_px.rename(columns = ticker_map)
mid_high_px = mid_high_px.rename(columns = ticker_map)
mid_low_px = mid_low_px.rename(columns = ticker_map)
trd_close_px = trd_close_px.rename(columns = ticker_map)
trd_open_px = trd_open_px.rename(columns = ticker_map)
trd_high_px = trd_high_px.rename(columns = ticker_map)
trd_low_px = trd_low_px.rename(columns = ticker_map)
mid_close_px = mid_close_px.ffill()
mid_open_px = mid_open_px.ffill()
mid_high_px = mid_high_px.ffill()
mid_low_px = mid_low_px.ffill()
volume = volume.fillna(1).astype(int)
volume[:] = 1000000000
# Fill in missing closing prices with midpoint data
for etf in close_px.columns:
first_idx = close_px[etf].first_valid_index()
temp_data = close_px.loc[first_idx:, etf]
midpoint_temp = mid_close_px.loc[first_idx:, etf]
trades_temp = trd_close_px.loc[first_idx:, etf]
adj_ratio = temp_data / trades_temp
adj_ratio = adj_ratio.ffill()
missing_idx = temp_data[temp_data.isna()].index
temp_data[missing_idx] = midpoint_temp[missing_idx] * adj_ratio[missing_idx]
close_px.loc[temp_data.index, etf] = temp_data
for etf in open_px.columns:
first_idx = open_px[etf].first_valid_index()
temp_data = open_px.loc[first_idx:, etf]
midpoint_temp = mid_open_px.loc[first_idx:, etf]
trades_temp = trd_open_px.loc[first_idx:, etf]
adj_ratio = temp_data / trades_temp
adj_ratio = adj_ratio.ffill()
missing_idx = temp_data[temp_data.isna()].index
temp_data[missing_idx] = midpoint_temp[missing_idx] * adj_ratio[missing_idx]
open_px.loc[temp_data.index, etf] = temp_data
for etf in high_px.columns:
first_idx = high_px[etf].first_valid_index()
temp_data = high_px.loc[first_idx:, etf]
midpoint_temp = mid_high_px.loc[first_idx:, etf]
trades_temp = trd_high_px.loc[first_idx:, etf]
adj_ratio = temp_data / trades_temp
adj_ratio = adj_ratio.ffill()
missing_idx = temp_data[temp_data.isna()].index
temp_data[missing_idx] = midpoint_temp[missing_idx] * adj_ratio[missing_idx]
high_px.loc[temp_data.index, etf] = temp_data
for etf in low_px.columns:
first_idx = low_px[etf].first_valid_index()
temp_data = low_px.loc[first_idx:, etf]
midpoint_temp = mid_low_px.loc[first_idx:, etf]
trades_temp = trd_low_px.loc[first_idx:, etf]
adj_ratio = temp_data / trades_temp
adj_ratio = adj_ratio.ffill()
missing_idx = temp_data[temp_data.isna()].index
temp_data[missing_idx] = midpoint_temp[missing_idx] * adj_ratio[missing_idx]
low_px.loc[temp_data.index, etf] = temp_data
close_px = close_px.ffill()
open_px = open_px.ffill()
high_px = high_px.ffill()
low_px = low_px.ffill()
symbols = close_px.columns.tolist()
# Create asset metadata
dtype = [('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object')]
metadata = pd.DataFrame(np.empty(len(symbols), dtype=dtype))
# Create dividend and split dataframe
dividends = pd.DataFrame(columns = ['sid', 'amount',
'ex_date', 'record_date',
'declared_date', 'pay_date'])
splits = pd.DataFrame(columns = ['sid', 'ratio','effective_date'])
# Create list to hold data
data_to_write = []
# Loop through symbols and prepare data
for sid, symbol in enumerate(symbols):
data_ = pd.DataFrame(columns = ['open','high','low','close','volume','open_interest'],
index = close_px.index)
data_['open'] = open_px[symbol]
data_['high'] = high_px[symbol]
data_['low'] = low_px[symbol]
data_['close'] = close_px[symbol]
data_['volume'] = volume[symbol]
data_['open_interest'] = 0
start_dt = data_.index.min()
end_dt = data_.index.max()
# Set auto cloes to day after last trade
ac_date = end_dt + pd.tseries.offsets.BDay()
metadata.iloc[sid] = start_dt, end_dt, ac_date, symbol
# Append data to list
data_to_write.append((sid, data_))
daily_bar_writer.write(data_to_write, show_progress = True)
# Hardcode exchange data
exchanges = etf_info.set_index('IB Ticker').loc[metadata.symbol,'Exchange']
metadata['exchange'] = exchanges.values
# Write metadata
asset_db_writer.write(equities = metadata)
# Write splits and dividents
adjustment_writer.write(splits = splits,
dividends = dividends)
return ingest
|
the-stack_106_21202
|
import os
from unittest import TestCase
from unittest.mock import patch
from selenium_youtube_crawler.utilities import read_playlist_from_file, read_playlist_from_youtube_api, \
populate_local_archive, create_required_dirs_for_archive_if_not_present
class TestUtilities(TestCase):
def test_read_playlist_from_file_folder_not_present(self):
result = read_playlist_from_file("tester")
self.assertEqual({}, result)
def test_read_playlist_from_file_folder_present_with_no_files(self):
os.system("mkdir channels")
result = read_playlist_from_file("channels")
self.assertEqual({}, result)
os.system("rm -rf channels")
def test_read_playlist_from_file_folder_present_with_files(self):
os.system("mkdir channels")
video_ids = ["asdfdsfds", "dsfadfsad"]
with open("channels/test.txt", "w") as f:
f.writelines(video_id + "\n" for video_id in video_ids)
result = read_playlist_from_file("channels")
self.assertEqual({'test': video_ids}, result)
os.system("rm -rf channels")
@patch('selenium_youtube_crawler.utilities.YoutubeApiUtils')
def test_read_playlist_from_youtube_api(self, mock_youtube_api):
dummy_data = {
"https://www.youtube.com/channel/adfdsf": "Test1",
"https://www.youtube.com/channel/safafsd": "Test2"
}
mock_youtube_api.return_value.get_channels.return_value = dummy_data
result = read_playlist_from_youtube_api()
self.assertEqual(dummy_data, result)
mock_youtube_api.return_value.get_channels.assert_called_once()
def test_populate_local_archive(self):
os.system("mkdir archive")
source = "test"
os.system("mkdir archive/" + source)
video_id = "sdflsjfsd"
populate_local_archive(source, video_id)
path = "archive/" + source + "/archive.txt"
self.assertTrue(os.path.exists(path))
with open(path, 'r') as f:
words = f.read().rstrip()
self.assertEqual("youtube " + video_id, words)
os.system("rm -rf archive")
def test_create_required_dirs_for_archive_if_not_present(self):
source = "test"
create_required_dirs_for_archive_if_not_present(source)
self.assertTrue(os.path.exists("archive"))
self.assertTrue(os.path.exists("archive/"+source))
os.system("rm -rf archive")
|
the-stack_106_21203
|
# -*- coding: utf-8 -*-
"""
Kakao Hangul Analyzer III
__version__ = '0.4'
__author__ = 'Kakao Corp.'
__copyright__ = 'Copyright (C) 2018-, Kakao Corp. All rights reserved.'
__license__ = 'Apache 2.0'
__maintainer__ = 'Jamie'
__email__ = '[email protected]'
"""
###########
# imports #
###########
from distutils.command.build import build
import os
import shutil
import subprocess
import zipfile
from setuptools import setup
#############
# constants #
#############
_SRC_NAME = 'khaiii-0.4'
#########
# types #
#########
class CustomBuild(build):
"""
custom handler for 'build' command
"""
def run(self):
"""
run build command
"""
with zipfile.ZipFile('{}.zip'.format(_SRC_NAME), 'r') as src_zip:
src_zip.extractall()
build_dir = '{}/build'.format(_SRC_NAME)
os.makedirs(build_dir, exist_ok=True)
subprocess.check_call('cmake -E env CXXFLAGS=\"-w\" cmake ..', cwd=build_dir, shell=True)
subprocess.check_call('make all resource', cwd=build_dir, shell=True)
shutil.rmtree('khaiii/lib', ignore_errors=True)
shutil.copytree('{}/lib'.format(build_dir), 'khaiii/lib')
shutil.rmtree('khaiii/share', ignore_errors=True)
shutil.copytree('{}/share'.format(build_dir), 'khaiii/share')
shutil.rmtree(_SRC_NAME)
build.run(self)
#############
# functions #
#############
#########
# setup #
#########
setup(
name='khaiii',
version='0.4',
description='Kakao Hangul Analyzer III',
url='https://github.com/kakao/khaiii',
author='Kakao Corp.',
author_email='[email protected]',
classifiers=[
'Development Status :: 5 - Stable',
'License :: OSI Approved :: Apache 2.0',
'Programming Language :: Python :: 3',
],
license='Apache 2.0',
packages=['khaiii', ],
include_package_data=True,
install_requires=[],
setup_requires=['pytest-runner', ],
tests_require=['pytest', ],
zip_safe=False,
cmdclass={'build': CustomBuild}
)
|
the-stack_106_21204
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common util functions and classes used by both keras cifar and imagenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import numpy as np
# pylint: disable=g-bad-import-order
from absl import flags
import tensorflow as tf
from official.utils.misc import keras_utils
# pylint: disable=ungrouped-imports
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.keras.optimizer_v2 import (gradient_descent as
gradient_descent_v2)
FLAGS = flags.FLAGS
BASE_LEARNING_RATE = 0.1 # This matches Jing's version.
TRAIN_TOP_1 = 'training_accuracy_top_1'
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
"""Callback to update learning rate on every batch (not epoch boundaries).
N.B. Only support Keras optimizers, not TF optimizers.
Args:
schedule: a function that takes an epoch index and a batch index as input
(both integer, indexed from 0) and returns a new learning rate as
output (float).
"""
def __init__(self, schedule, batch_size, num_images):
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.batches_per_epoch = num_images / batch_size
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.batches_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
tf.compat.v1.logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
class PiecewiseConstantDecayWithWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Piecewise constant decay with warmup schedule."""
def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries,
multipliers, compute_lr_on_cpu=True, name=None):
super(PiecewiseConstantDecayWithWarmup, self).__init__()
if len(boundaries) != len(multipliers) - 1:
raise ValueError('The length of boundaries must be 1 less than the '
'length of multipliers')
base_lr_batch_size = 256
num_batches_per_epoch = epoch_size // batch_size
self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self.step_boundaries = [float(num_batches_per_epoch) * x
for x in boundaries]
self.lr_values = [self.rescaled_lr * m for m in multipliers]
self.warmup_steps = warmup_epochs * num_batches_per_epoch
self.compute_lr_on_cpu = compute_lr_on_cpu
self.name = name
self.learning_rate_ops_cache = {}
def __call__(self, step):
if tf.executing_eagerly():
return self._get_learning_rate(step)
# In an eager function or graph, the current implementation of optimizer
# repeatedly call and thus create ops for the learning rate schedule. To
# avoid this, we cache the ops if not executing eagerly.
graph = tf.compat.v1.get_default_graph()
if graph not in self.learning_rate_ops_cache:
if self.compute_lr_on_cpu:
with tf.device('/device:CPU:0'):
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
else:
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
return self.learning_rate_ops_cache[graph]
def _get_learning_rate(self, step):
"""Compute learning rate at given step."""
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
[self.rescaled_lr, self.step_boundaries,
self.lr_values, self.warmup_steps,
self.compute_lr_on_cpu]):
def warmup_lr(step):
return self.rescaled_lr * (
tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(
step, self.step_boundaries, self.lr_values)
return tf.cond(step < self.warmup_steps,
lambda: warmup_lr(step),
lambda: piecewise_lr(step))
def get_config(self):
return {
'rescaled_lr': self.rescaled_lr,
'step_boundaries': self.step_boundaries,
'lr_values': self.lr_values,
'warmup_steps': self.warmup_steps,
'compute_lr_on_cpu': self.compute_lr_on_cpu,
'name': self.name
}
def get_config_proto_v1():
"""Return config proto according to flag settings, or None to use default."""
config = None
if FLAGS.enable_xla:
config = tf.compat.v1.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_2)
# Disable PinToHostOptimizer in grappler when enabling XLA because it causes
# OOM and performance regression.
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
def set_config_v2():
"""Config eager context according to flag values using TF 2.0 API."""
if FLAGS.enable_xla:
tf.config.optimizer.set_jit(True)
# Disable PinToHostOptimizer in grappler when enabling XLA because it
# causes OOM and performance regression.
tf.config.optimizer.set_experimental_options(
{"pin_to_host_optimization": False}
)
def set_gpu_thread_mode_and_count(flags_obj):
"""Set GPU thread mode and count, and adjust dataset threads count."""
cpu_count = multiprocessing.cpu_count()
tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)
# Allocate private thread pool for each GPU to schedule and launch kernels
per_gpu_thread_count = flags_obj.per_gpu_thread_count or 2
os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
os.environ['TF_GPU_THREAD_COUNT'])
tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
os.environ['TF_GPU_THREAD_MODE'])
# Limit data preprocessing threadpool to CPU cores minus number of total GPU
# private threads and memory copy threads.
total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
num_runtime_threads = flags_obj.num_gpus
if not flags_obj.datasets_num_private_threads:
flags_obj.datasets_num_private_threads = min(
cpu_count - total_gpu_thread_count - num_runtime_threads,
flags_obj.num_gpus * 8)
tf.compat.v1.logging.info('Set datasets_num_private_threads to %s',
flags_obj.datasets_num_private_threads)
def get_optimizer(learning_rate=0.1):
"""Returns optimizer to use."""
# The learning_rate is overwritten at the beginning of each step by callback.
return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9)
def get_callbacks(learning_rate_schedule_fn, num_images):
"""Returns common callbacks."""
time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
callbacks = [time_callback]
if not FLAGS.use_tensor_lr:
lr_callback = LearningRateBatchScheduler(
learning_rate_schedule_fn,
batch_size=FLAGS.batch_size,
num_images=num_images)
callbacks.append(lr_callback)
if FLAGS.enable_tensorboard:
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=FLAGS.model_dir)
callbacks.append(tensorboard_callback)
if FLAGS.profile_steps:
profiler_callback = keras_utils.get_profiler_callback(
FLAGS.model_dir,
FLAGS.profile_steps,
FLAGS.enable_tensorboard)
callbacks.append(profiler_callback)
return callbacks
def build_stats(history, eval_output, callbacks):
"""Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
callbacks: a list of callbacks which might include a time history callback
used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if history and history.history:
train_hist = history.history
# Gets final loss from training.
stats['loss'] = train_hist['loss'][-1].item()
# Gets top_1 training accuracy.
if 'categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
elif 'sparse_categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()
if not callbacks:
return stats
# Look for the time history callback which was used during keras.fit
for callback in callbacks:
if isinstance(callback, keras_utils.TimeHistory):
timestamp_log = callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
callback.batch_size * callback.log_steps *
(len(callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
def define_keras_flags():
"""Define flags for Keras models."""
flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
flags.DEFINE_boolean(name='use_trivial_model', default=False,
help='Whether to use a trivial Keras model.')
flags.DEFINE_boolean(name='report_accuracy_metrics', default=True,
help='Report metrics during training and evaluation.')
flags.DEFINE_boolean(name='use_tensor_lr', default=False,
help='Use learning rate tensor instead of a callback.')
flags.DEFINE_boolean(
name='enable_xla', default=False,
help='Whether to enable XLA auto jit compilation. This is still an '
'experimental feature, and is not yet effective with TF 2.0.')
flags.DEFINE_boolean(
name='enable_tensorboard', default=False,
help='Whether to enable Tensorboard callback.')
flags.DEFINE_integer(
name='train_steps', default=None,
help='The number of steps to run for training. If it is larger than '
'# batches per epoch, then use # batches per epoch. When this flag is '
'set, only one epoch is going to run for training.')
flags.DEFINE_string(
name='profile_steps', default=None,
help='Save profiling data to model dir at given range of steps. The '
'value must be a comma separated pair of positive integers, specifying '
'the first and last step to profile. For example, "--profile_steps=2,4" '
'triggers the profiler to process 3 steps, starting from the 2nd step. '
'Note that profiler has a non-trivial performance overhead, and the '
'output file can be gigantic if profiling many steps.')
flags.DEFINE_boolean(
name='data_delay_prefetch', default=False,
help='Add a small delay in tf.data prefetch to prioritize memory copy of '
'other tensors over the data minibatch for the (T+1)th step. It should '
'help improve performance using EagerIterator and function. The codepath '
'when enabling this feature is experimental and will be removed once the '
'corresponding performance features are fully supported in TensorFlow.')
flags.DEFINE_boolean(
name='batchnorm_spatial_persistent', default=True,
help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
flags.DEFINE_boolean(
name='clone_model_in_keras_dist_strat', default=None,
help='If False, then the experimental code path is used that doesn\'t '
'clone models for distribution.')
flags.DEFINE_boolean(
name='enable_get_next_as_optional', default=False,
help='Enable get_next_as_optional behavior in DistributedIterator.')
def get_synth_input_fn(height, width, num_channels, num_classes,
dtype=tf.float32, drop_remainder=True):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tuning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
# pylint: disable=unused-argument
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
# Synthetic input should be within [0, 255].
inputs = tf.random.truncated_normal([height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')
labels = tf.random.uniform([1],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
# Cast to float32 for Keras model.
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
# `drop_remainder` will make dataset produce outputs with known shapes.
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn
def is_v2_0():
"""Returns true if using tf 2.0."""
return tf.__version__.startswith('2')
def data_delay_prefetch():
"""Use unstable code for perf tuning purposes."""
if not FLAGS.use_synthetic_data:
_monkey_patch_org_create_device_dataset()
def set_cudnn_batchnorm_mode():
"""Set CuDNN batchnorm mode for better performance. Note that the spatial
persistent mode may lead to accuracy losses for certain models."""
if FLAGS.batchnorm_spatial_persistent:
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
else:
os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None)
# TODO(haoyuzhang): remove this monkey patch when the "prefetch with slack"
# feature is available in tf.data.
def _monkey_patch_org_create_device_dataset():
"""Monkey-patch `_create_device_dataset` method with delayed prefetch."""
import ast # pylint: disable=g-import-not-at-top
import inspect # pylint: disable=g-import-not-at-top
from tensorflow.python.data.ops import multi_device_iterator_ops # pylint: disable=g-import-not-at-top
tf.compat.v1.logging.info(
'Using monkey-patched version of MultiDeviceIterator. It should be '
'removed when the prefetch with slack feature is implemented in tf.data.')
cls_multi_device_iterator = ast.parse(
inspect.getsource(multi_device_iterator_ops.MultiDeviceIterator))
org_create_device_dataset_code = inspect.getsource(
multi_device_iterator_ops.MultiDeviceIterator._create_device_dataset) # pylint: disable=protected-access
code_lines = org_create_device_dataset_code.split('\n')
# Insert in reverse order to avoid line number shift by previous insertions
code_lines.insert(5, ' ds = ds.apply(sleep_ops.sleep(11000))') # 11ms
code_lines.insert(2, ' from tensorflow.python.data.experimental.ops import sleep as sleep_ops') # pylint: disable=line-too-long
patched_code = '\n'.join(line[2:] for line in code_lines)
cls_multi_device_iterator.body[0].body[2] = ast.parse(patched_code).body[0]
exec(compile(cls_multi_device_iterator, '<string>', 'exec'), # pylint: disable=exec-used
multi_device_iterator_ops.__dict__)
|
the-stack_106_21205
|
from .pdg_format import _round, _strip
import numpy as np
import re
def pdg_format(value, *errors):
if value is None:
strings, nexp = _round((0, *errors), None, None)
strings = strings[1:]
else:
strings, nexp = _round((value, *errors), None, None)
strings = _strip(strings)
if nexp != 0:
for i, s in enumerate(strings):
if s[-1] in "fn":
continue
m = None
if i == 0:
m = re.match(r"(-?)0\.0+$", s)
if m:
s = m.group(1) + "0"
suffix = ""
if not m:
suffix = "e%i" % nexp
s += suffix
strings[i] = s
return strings
def matrix_format(*values):
vs = np.array(values)
mv = np.max(np.abs(vs))
smv = "%.3g" % mv
try:
i = smv.index("e")
sexp = smv[i + 1 :]
exp = int(sexp)
vs /= 10 ** exp
s = [("%.3fe%i" % (v, exp) if np.isfinite(v) else str(v)) for v in vs]
except ValueError:
s = ["%.3f" % v for v in vs]
return _strip(s)
def goaledm(fm):
# - taken from the source code, see VariableMeticBuilder::Minimum and
# ModularFunctionMinimizer::Minimize
# - goal is used to detect convergence but violations by 10x are also accepted;
# see VariableMetricBuilder.cxx:425
mn_eps_2 = 4 * np.sqrt(np.finfo("float").eps)
return 2e-3 * max(fm.tolerance * fm.up, mn_eps_2)
def format_row(widths, *args):
return (
"".join(
("│{0:^%i}" % w if w > 0 else "│ {0:%i}" % (-w - 1)).format(a)
for (w, a) in zip(widths, args)
)
+ "│"
)
def format_line(widths, edges):
s = edges[0]
for w, e in zip(widths, edges[1:]):
s += "─" * abs(w)
s += e
return s
def fmin_fields(fm):
return [
"FCN = %.4g" % fm.fval,
"Nfcn = %i (%i total)" % (fm.nfcn, fm.nfcn_total),
"EDM = %.3g (Goal: %.3g)" % (fm.edm, goaledm(fm)),
"Ngrad = %i (%i total)" % (fm.ngrad, fm.ngrad_total)
if fm.ngrad_total > 0
else "",
("Valid" if fm.is_valid else "INVALID") + " Minimum",
("Valid" if fm.has_valid_parameters else "INVALID") + " Parameters",
("SOME" if fm.has_parameters_at_limit else "No") + " Parameters at limit",
("ABOVE" if fm.is_above_max_edm else "Below") + " EDM threshold (goal x 10)",
("ABOVE" if fm.has_reached_call_limit else "Below") + " call limit",
"Hesse " + ("FAILED" if fm.hesse_failed else "ok"),
("Has" if fm.has_covariance else "NO") + " Covariance",
"Accurate" if fm.has_accurate_covar else "APPROXIMATE",
"Pos. def." if fm.has_posdef_covar else "NOT pos. def.",
"FORCED" if fm.has_made_posdef_covar else "Not forced",
]
def fmin(fm):
ff = fmin_fields(fm)
w = (-34, 38)
l1 = format_line(w, "┌┬┐")
i1 = format_row(w, *ff[0:2])
i2 = format_row(w, *ff[2:4])
w = (15, 18, 38)
l2 = format_line(w, "├┬┼┤")
v1 = format_row(w, *ff[4:7])
l3 = format_line(w, "├┴┼┤")
v2 = format_row((34, 38), *ff[7:9])
w = (15, 18, 11, 13, 12)
l4 = format_line(w, "├┬┼┬┬┤")
v3 = format_row(w, *ff[9:14])
l5 = format_line(w, "└┴┴┴┴┘")
return "\n".join((l1, i1, i2, l2, v1, l3, v2, l4, v3, l5))
def params(mps):
vnames = [mp.name for mp in mps]
name_width = max([4] + [len(x) for x in vnames])
num_width = max(2, len("%i" % (len(vnames) - 1)))
ws = (-num_width - 1, -name_width - 2, 11, 11, 12, 12, 9, 9, 7)
h = format_row(
ws,
"",
"Name",
"Value",
"Hesse Err",
"Minos Err-",
"Minos Err+",
"Limit-",
"Limit+",
"Fixed",
)
ni = len(ws) - 1
l1 = format_line(ws, "┌" + "┬" * ni + "┐")
l2 = format_line(ws, "├" + "┼" * ni + "┤")
lines = [l1, h, l2]
mes = mps.merrors
for i, mp in enumerate(mps):
if mes and mp.name in mes:
me = mes[mp.name]
val, err, mel, meu = pdg_format(mp.value, mp.error, me.lower, me.upper)
else:
val, err = pdg_format(mp.value, mp.error)
mel = ""
meu = ""
lines.append(
format_row(
ws,
str(i),
mp.name,
val,
err,
mel,
meu,
"%g" % mp.lower_limit if mp.lower_limit is not None else "",
"%g" % mp.upper_limit if mp.upper_limit is not None else "",
"yes" if mp.is_fixed else "CONST" if mp.is_const else "",
)
)
ln3 = format_line(ws, "└" + "┴" * ni + "┘")
lines.append(ln3)
return "\n".join(lines)
def merrors(mes):
n = len(mes)
ws = [10] + [23] * n
l1 = format_line(ws, "┌" + "┬" * n + "┐")
header = format_row(ws, "", *(m.name for m in mes))
ws = [10] + [11] * (2 * n)
l2 = format_line(ws, "├" + "┼┬" * n + "┤")
l3 = format_line(ws, "└" + "┴" * n * 2 + "┘")
x = []
for m in mes:
mel, meu = pdg_format(None, m.lower, m.upper)
x.append(mel)
x.append(meu)
error = format_row(ws, "Error", *x)
x = []
for m in mes:
x.append(str(m.lower_valid))
x.append(str(m.upper_valid))
valid = format_row(ws, "Valid", *x)
x = []
for m in mes:
x.append(str(m.at_lower_limit))
x.append(str(m.at_upper_limit))
at_limit = format_row(ws, "At Limit", *x)
x = []
for m in mes:
x.append(str(m.at_lower_max_fcn))
x.append(str(m.at_upper_max_fcn))
max_fcn = format_row(ws, "Max FCN", *x)
x = []
for m in mes:
x.append(str(m.lower_new_min))
x.append(str(m.upper_new_min))
new_min = format_row(ws, "New Min", *x)
return "\n".join((l1, header, l2, error, valid, at_limit, max_fcn, new_min, l3))
def matrix(m):
n = len(m)
args = []
for mi in m:
for mj in mi:
args.append(mj)
nums = matrix_format(*args)
def row_fmt(args):
s = "│ " + args[0] + " │"
for x in args[1:]:
s += " " + x
s += " │"
return s
first_row_width = max(len(v) for v in m.names)
row_width = max(first_row_width, max(len(v) for v in nums))
v_names = [("{:>%is}" % first_row_width).format(x) for x in m.names]
h_names = [("{:>%is}" % row_width).format(x) for x in m.names]
val_fmt = ("{:>%is}" % row_width).format
w = (first_row_width + 2, (row_width + 1) * len(m.names) + 1)
l1 = format_line(w, "┌┬┐")
l2 = format_line(w, "├┼┤")
l3 = format_line(w, "└┴┘")
header = row_fmt([" " * first_row_width] + h_names)
lines = [l1, header, l2]
for i, vn in enumerate(v_names):
lines.append(row_fmt([vn] + [val_fmt(nums[n * i + j]) for j in range(n)]))
lines.append(l3)
return "\n".join(lines)
|
the-stack_106_21206
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
from m2r import MdInclude
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'MMGeneration'
copyright = '2018-2020, OpenMMLab'
author = 'MMGeneration Authors'
version_file = '../mmgen/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_markdown_tables',
'sphinx.ext.autosectionlabel',
'myst_parser',
'sphinx_copybutton',
]
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmgen.version', 'mmcv.ops'
]
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
# 'logo_url': 'https://mmocr.readthedocs.io/en/latest/',
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmgeneration',
},
{
'name':
'Docs',
'children': [
{
'name': 'MMCV',
'url': 'https://mmcv.readthedocs.io/en/latest/',
},
{
'name': 'MMAction2',
'url': 'https://mmaction2.readthedocs.io/en/latest/',
},
{
'name': 'MMClassification',
'url':
'https://mmclassification.readthedocs.io/en/latest/',
},
{
'name': 'MMDetection',
'url': 'https://mmdetection.readthedocs.io/en/latest/',
},
{
'name': 'MMDetection3D',
'url': 'https://mmdetection3d.readthedocs.io/en/latest/',
},
{
'name': 'MMEditing',
'url': 'https://mmediting.readthedocs.io/en/latest/',
},
{
'name': 'MMGeneration',
'url': 'https://mmgeneration.readthedocs.io/en/latest/',
},
{
'name': 'MMOCR',
'url': 'https://mmocr.readthedocs.io/en/latest/',
},
{
'name': 'MMPose',
'url': 'https://mmpose.readthedocs.io/en/latest/',
},
{
'name': 'MMSegmentation',
'url': 'https://mmsegmentation.readthedocs.io/en/latest/',
},
{
'name': 'MMTracking',
'url': 'https://mmtracking.readthedocs.io/en/latest/',
},
]
},
{
'name':
'OpenMMLab',
'children': [
{
'name': 'Homepage',
'url': 'https://openmmlab.com/'
},
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/'
},
{
'name': 'Twitter',
'url': 'https://twitter.com/OpenMMLab'
},
{
'name': 'Zhihu',
'url': 'https://zhihu.com/people/openmmlab'
},
]
},
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
myst_enable_extensions = ['colon_fence']
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
app.add_config_value('no_underscore_emphasis', False, 'env')
app.add_config_value('m2r_parse_relative_links', False, 'env')
app.add_config_value('m2r_anonymous_references', False, 'env')
app.add_config_value('m2r_disable_inline_math', False, 'env')
app.add_directive('mdinclude', MdInclude)
app.add_config_value('recommonmark_config', {
'auto_toc_tree_section': 'Contents',
'enable_eval_rst': True,
}, True)
app.add_transform(AutoStructify)
|
the-stack_106_21207
|
"""
Exceptions that are raised by sam deploy
"""
from samcli.commands.exceptions import UserException
class ChangeEmptyError(UserException):
def __init__(self, stack_name):
self.stack_name = stack_name
message_fmt = "No changes to deploy. Stack {stack_name} is up to date"
super(ChangeEmptyError, self).__init__(message=message_fmt.format(stack_name=self.stack_name))
class ChangeSetError(UserException):
def __init__(self, stack_name, msg):
self.stack_name = stack_name
self.msg = msg
message_fmt = "Failed to create changeset for the stack: {stack_name}, {msg}"
super(ChangeSetError, self).__init__(message=message_fmt.format(stack_name=self.stack_name, msg=self.msg))
class DeployFailedError(UserException):
def __init__(self, stack_name, msg):
self.stack_name = stack_name
self.msg = msg
message_fmt = "Failed to create/update the stack: {stack_name}, {msg}"
super(DeployFailedError, self).__init__(message=message_fmt.format(stack_name=self.stack_name, msg=msg))
class GuidedDeployFailedError(UserException):
def __init__(self, msg):
self.msg = msg
super(GuidedDeployFailedError, self).__init__(message=msg)
class DeployStackOutPutFailedError(UserException):
def __init__(self, stack_name, msg):
self.stack_name = stack_name
self.msg = msg
message_fmt = "Failed to get outputs from stack: {stack_name}, {msg}"
super(DeployStackOutPutFailedError, self).__init__(
message=message_fmt.format(stack_name=self.stack_name, msg=msg)
)
class DeployBucketInDifferentRegionError(UserException):
def __init__(self, msg):
self.msg = msg
message_fmt = "{msg} : deployment s3 bucket is in a different region, try sam deploy --guided"
super(DeployBucketInDifferentRegionError, self).__init__(message=message_fmt.format(msg=self.msg))
class DeployBucketRequiredError(UserException):
def __init__(self):
message_fmt = (
"Templates with a size greater than 51,200 bytes must be deployed "
"via an S3 Bucket. Please add the --s3-bucket parameter to your "
"command. The local template will be copied to that S3 bucket and "
"then deployed."
)
super(DeployBucketRequiredError, self).__init__(message=message_fmt)
|
the-stack_106_21208
|
# micropolisevaluationpanel.py
#
# Micropolis, Unix Version. This game was released for the Unix platform
# in or about 1990 and has been modified for inclusion in the One Laptop
# Per Child program. Copyright (C) 1989 - 2007 Electronic Arts Inc. If
# you need assistance with this program, you may contact:
# http://wiki.laptop.org/go/Micropolis or email [email protected].
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details. You should have received a
# copy of the GNU General Public License along with this program. If
# not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS per GNU GPL Section 7
#
# No trademark or publicity rights are granted. This license does NOT
# give you any right, title or interest in the trademark SimCity or any
# other Electronic Arts trademark. You may not distribute any
# modification of this program using the trademark SimCity or claim any
# affliation or association with Electronic Arts Inc. or its employees.
#
# Any propagation or conveyance of this program must include this
# copyright notice and these terms.
#
# If you convey this program (or any modifications of it) and assume
# contractual liability for the program to recipients of it, you agree
# to indemnify Electronic Arts for any liability that those contractual
# assumptions impose on Electronic Arts.
#
# You may not misrepresent the origins of this program; modified
# versions of the program must be marked as such and not identified as
# the original program.
#
# This disclaimer supplements the one included in the General Public
# License. TO THE FULLEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW, THIS
# PROGRAM IS PROVIDED TO YOU "AS IS," WITH ALL FAULTS, WITHOUT WARRANTY
# OF ANY KIND, AND YOUR USE IS AT YOUR SOLE RISK. THE ENTIRE RISK OF
# SATISFACTORY QUALITY AND PERFORMANCE RESIDES WITH YOU. ELECTRONIC ARTS
# DISCLAIMS ANY AND ALL EXPRESS, IMPLIED OR STATUTORY WARRANTIES,
# INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY, SATISFACTORY QUALITY,
# FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT OF THIRD PARTY
# RIGHTS, AND WARRANTIES (IF ANY) ARISING FROM A COURSE OF DEALING,
# USAGE, OR TRADE PRACTICE. ELECTRONIC ARTS DOES NOT WARRANT AGAINST
# INTERFERENCE WITH YOUR ENJOYMENT OF THE PROGRAM; THAT THE PROGRAM WILL
# MEET YOUR REQUIREMENTS; THAT OPERATION OF THE PROGRAM WILL BE
# UNINTERRUPTED OR ERROR-FREE, OR THAT THE PROGRAM WILL BE COMPATIBLE
# WITH THIRD PARTY SOFTWARE OR THAT ANY ERRORS IN THE PROGRAM WILL BE
# CORRECTED. NO ORAL OR WRITTEN ADVICE PROVIDED BY ELECTRONIC ARTS OR
# ANY AUTHORIZED REPRESENTATIVE SHALL CREATE A WARRANTY. SOME
# JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF OR LIMITATIONS ON IMPLIED
# WARRANTIES OR THE LIMITATIONS ON THE APPLICABLE STATUTORY RIGHTS OF A
# CONSUMER, SO SOME OR ALL OF THE ABOVE EXCLUSIONS AND LIMITATIONS MAY
# NOT APPLY TO YOU.
########################################################################
# Micropolis Evaluation Panel
# Don Hopkins
########################################################################
# Import stuff
from gi.repository import Gtk as gtk
import cairo
from gi.repository import Pango as pango
from . import micropolisengine
from . import micropolisview
########################################################################
# MicropolisEvaluationPanel
class MicropolisAgentPanel(gtk.Frame):
def __init__(
self,
engine=None,
**args):
gtk.Frame.__init__(
self,
**args)
self.engine = engine
# Views
hbox1 = gtk.HBox(False, 0)
self.hbox1 = hbox1
self.add(hbox1)
vbox1 = gtk.VBox(False, 0)
self.vbox1 = vbox1
hbox1.pack_start(vbox1, False, False, 0)
vbox2 = gtk.VBox(False, 0)
self.vbox2 = vbox2
hbox1.pack_start(vbox2, False, False, 0)
vbox3 = gtk.VBox(False, 0)
self.vbox3 = vbox3
hbox1.pack_start(vbox3, False, False, 0)
buttonMonster = gtk.Button("Reset")
self.buttonMonster = buttonMonster
buttonMonster.connect('clicked', lambda item: self.reset_game())
vbox1.pack_start(buttonMonster, False, False, 0)
checkButtonAutoReset = gtk.CheckButton("Auto Reset")
self.checkButtonAutoReset = checkButtonAutoReset
checkButtonAutoReset.connect('clicked', lambda item: self.enable_auto_reset())
vbox1.pack_start(checkButtonAutoReset, False, False, 0)
self.checkButtonAutoReset.set_active(True)
self.checkButtonStaticBuilds = gtk.CheckButton("Static Builds")
self.checkButtonStaticBuilds.connect('toggled', lambda item: self.set_static())
self.vbox1.pack_start(self.checkButtonStaticBuilds, False, False, 0)
#TODO: not this
pop_threshold = (0, 1000)
traffic_range = (0, 2000)
num_plants_range = (0, 200)
mayor_rating_range = (0, 100)
scaleRes = gtk.HScale()
scaleResMetric = gtk.HScale()
self.scaleRes = scaleRes
self.scaleResMetric = scaleResMetric
scaleRes.set_digits(10)
scaleResMetric.set_digits(10)
scaleRes.set_range(*pop_threshold)
scaleResMetric.set_range(*pop_threshold)
scaleRes.connect('value_changed', self.scaleResChanged)
labelRes = gtk.Label('Residential:')
vbox2.pack_start(labelRes, False, False, 0)
vbox2.pack_start(scaleRes, False, False, 0)
vbox2.pack_start(scaleResMetric, False, False, 0)
scaleCom = gtk.HScale()
scaleComMetric = gtk.HScale()
self.scaleCom = scaleCom
self.scaleComMetric = scaleComMetric
scaleCom.set_digits(10)
scaleCom.set_range(*pop_threshold)
scaleCom.connect('value_changed', self.scaleComChanged)
scaleComMetric.set_digits(10)
scaleComMetric.set_range(*pop_threshold)
labelCom = gtk.Label('Commercial:')
vbox2.pack_start(labelCom, False, False, 0)
vbox2.pack_start(scaleCom, False, False, 0)
vbox2.pack_start(scaleComMetric, False, False, 0)
scaleInd = gtk.HScale()
self.scaleInd = scaleInd
scaleInd.set_digits(10)
scaleInd.set_range(*pop_threshold)
scaleIndMetric = gtk.HScale()
self.scaleIndMetric = scaleIndMetric
scaleIndMetric.set_digits(10)
scaleIndMetric.set_range(*pop_threshold)
scaleInd.connect('value_changed', self.scaleIndChanged)
labelInd = gtk.Label('Industrial:')
vbox2.pack_start(labelInd, False, False, 0)
vbox2.pack_start(scaleInd, False, False, 0)
vbox2.pack_start(scaleIndMetric, False, False, 0)
scaleTraffic = gtk.HScale()
self.scaleTraffic = scaleTraffic
scaleTraffic.set_digits(10)
scaleTraffic.set_range(0, 2000)
scaleTraffic.connect('value_changed', self.scaleTrafficChanged)
scaleTrafficMetric = gtk.HScale()
self.scaleTrafficMetric = scaleTrafficMetric
scaleTrafficMetric.set_digits(10)
scaleTrafficMetric.set_range(0, 2000)
labelTraffic = gtk.Label('Traffic:')
vbox3.pack_start(labelTraffic, False, False, 0)
vbox3.pack_start(scaleTraffic, False, False, 0)
vbox3.pack_start(scaleTrafficMetric, False, False, 0)
scalePlants = gtk.HScale()
self.scalePlants = scalePlants
scalePlants.set_digits(10)
scalePlants.set_range(*num_plants_range)
scalePlantsMetric = gtk.HScale()
self.scalePlantsMetric = scalePlantsMetric
scalePlantsMetric.set_digits(10)
scalePlantsMetric.set_range(*num_plants_range)
scalePlants.connect('value_changed', self.scalePlantsChanged)
labelPlants = gtk.Label('Plants:')
vbox3.pack_start(labelPlants, False, False, 0)
vbox3.pack_start(scalePlants, False, False, 0)
vbox3.pack_start(scalePlantsMetric, False, False, 0)
scaleRating = gtk.HScale()
self.scaleRating = scaleRating
scaleRating.set_digits(10)
scaleRating.set_range(*mayor_rating_range)
scaleRatingMetric = gtk.HScale()
self.scaleRatingMetric = scaleRatingMetric
scaleRatingMetric.set_digits(10)
scaleRatingMetric.set_range(*mayor_rating_range)
scaleRating.connect('value_changed', self.scaleRatingChanged)
labelRating = gtk.Label('Rating:')
vbox3.pack_start(labelRating, False, False, 0)
vbox3.pack_start(scaleRating, False, False, 0)
vbox3.pack_start(scaleRatingMetric, False, False, 0)
def scaleResChanged(self, scale):
self.engine.env.set_res_weight(scale.get_value())
def scaleComChanged(self, scale):
self.engine.env.set_com_weight(scale.get_value())
def scaleIndChanged(self, scale):
self.engine.env.set_ind_weight(scale.get_value())
def scaleTrafficChanged(self, scale):
self.engine.env.set_traffic_weight(scale.get_value())
def scalePlantsChanged(self, scale):
self.engine.env.set_plants_weight(scale.get_value())
def scaleRatingChanged(self, scale):
self.engine.env.set_rating_weight(scale.get_value())
def displayTrgs(self, trgs):
self.scaleRes.set_value(trgs['res_pop'])
self.scaleCom.set_value(trgs['com_pop'])
self.scaleInd.set_value(trgs['ind_pop'])
self.scaleTraffic.set_value(trgs['traffic'])
self.scalePlants.set_value(trgs['num_plants'])
self.scaleRating.set_value(trgs['mayor_rating'])
def displayMetrics(self, metrics):
self.scaleResMetric.set_value(metrics['res_pop'])
self.scaleComMetric.set_value(metrics['com_pop'])
self.scaleIndMetric.set_value(metrics['ind_pop'])
self.scaleTrafficMetric.set_value(metrics['traffic'])
self.scalePlantsMetric.set_value(metrics['num_plants'])
self.scaleRatingMetric.set_value(metrics['mayor_rating'])
def setMetricRanges(self, metric_ranges):
self.scaleResMetric.set_range(*metric_ranges['res_pop'])
self.scaleComMetric.set_range(*metric_ranges['com_pop'])
self.scaleIndMetric.set_range(*metric_ranges['ind_pop'])
self.scaleTrafficMetric.set_range(*metric_ranges['traffic'])
self.scalePlantsMetric.set_range(*metric_ranges['num_plants'])
self.scaleRes.set_range(*metric_ranges['res_pop'])
self.scaleCom.set_range(*metric_ranges['com_pop'])
self.scaleInd.set_range(*metric_ranges['ind_pop'])
self.scaleTraffic.set_range(*metric_ranges['traffic'])
self.scalePlants.set_range(*metric_ranges['num_plants'])
self.scaleRating.set_range(*metric_ranges['mayor_rating'])
self.scaleRating.set_range(*metric_ranges['mayor_rating'])
def reset_game(self):
self.engine.env.reset()
def set_static(self):
self.engine.env.static_player_builds = self.checkButtonStaticBuilds.get_active()
def enable_auto_reset(self):
self.engine.env.auto_reset = self.checkButtonAutoReset.get_active()
########################################################################
|
the-stack_106_21209
|
# multithread demo
# https://nrsyed.com/2018/07/05/multithreading-with-opencv-python-to-improve-video-processing-performance/
# object-oriented programming + multithread
#
# 1 thread - acquire image from camera
# 1 thread - to disply raw image
# 1 thread - to calculate Laplacian of Guassian image of the raw image
# see demo_mthread.py for a simplified version
import numpy as np
from threading import Thread
import cv2
from datetime import datetime
class CountsPerSec:
"""
Class that tracks the number of occurrences ("counts") of an
arbitrary event and returns the frequency in occurrences
(counts) per second. The caller must increment the count.
"""
def __init__(self):
self._start_time = None
self._num_occurrences = 0
def start(self):
self._start_time = datetime.now()
return self
def increment(self):
self._num_occurrences += 1
def countsPerSec(self):
elapsed_time = (datetime.now() - self._start_time).total_seconds()
return self._num_occurrences / ( elapsed_time + np.finfo(float).eps )
class VideoGet:
"""
Class that continuously gets frames from a VideoCapture object
with a dedicated thread.
"""
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.get, args=()).start()
return self
def get(self):
while not self.stopped:
if not self.grabbed:
self.stop()
else:
(self.grabbed, self.frame) = self.stream.read()
def stop(self):
self.stopped = True
def threadVideoGet(source=0):
"""
Dedicated thread for grabbing video frames with VideoGet object.
Main thread shows video frames.
"""
video_getter = VideoGet(source).start()
cps = CountsPerSec().start()
while True:
if (cv2.waitKey(1) == ord("q")) or video_getter.stopped:
video_getter.stop()
break
frame = video_getter.frame
frame = putIterationsPerSec(frame, cps.countsPerSec())
cv2.imshow("Video", frame)
cps.increment()
class VideoShow:
"""
Class that continuously shows a frame using a dedicated thread.
"""
def __init__(self, frame=None):
self.frame = frame
self.stopped = False
def start(self):
Thread(target=self.show, args=()).start()
return self
def show(self):
while not self.stopped:
cv2.imshow("Video", self.frame)
if cv2.waitKey(1) == ord("q"):
self.stopped = True
def stop(self):
self.stopped = True
class VideoShow_edge:
"""
Class that continuously shows a frame using a dedicated thread.
"""
def __init__(self, frame=None):
self.frame = frame
self.stopped = False
def start(self):
Thread(target=self.show, args=()).start()
return self
def show(self):
while not self.stopped:
laplacian = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
x = cv2.filter2D( self.frame, -1, laplacian )
cv2.imshow("edge", x)
if cv2.waitKey(1) == ord("q"):
self.stopped = True
def stop(self):
self.stopped = True
def threadVideoShow(source=0):
"""
Dedicated thread for showing video frames with VideoShow object.
Main thread grabs video frames.
"""
cap = cv2.VideoCapture(source)
(grabbed, frame) = cap.read()
video_shower = VideoShow(frame).start()
cps = CountsPerSec().start()
while True:
(grabbed, frame) = cap.read()
if not grabbed or video_shower.stopped:
video_shower.stop()
break
frame = putIterationsPerSec(frame, cps.countsPerSec())
video_shower.frame = frame
cps.increment()
import argparse
def putIterationsPerSec(frame, iterations_per_sec):
"""
Add iterations per second text to lower-left corner of a frame.
"""
cv2.putText(frame, "{:.0f} iterations/sec".format(iterations_per_sec),
(10, 450), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
return frame
def threadAll(source=0):
"""
Dedicated thread for grabbing video frames with VideoGet object.
Dedicated thread for showing video frames with VideoShow object.
Main thread serves only to pass frames between VideoGet and
VideoShow objects/threads.
"""
video_getter = VideoGet(source).start()
video_shower = VideoShow(video_getter.frame).start()
video_edgerr = VideoShow_edge(video_getter.frame).start() # to show image edge online
cps = CountsPerSec().start()
while True:
if video_getter.stopped or video_shower.stopped or video_edgerr.stopped:
video_shower.stop()
video_getter.stop()
video_edgerr.stop()
break
frame = video_getter.frame
frame = putIterationsPerSec(frame, cps.countsPerSec())
video_shower.frame = frame
video_edgerr.frame = frame
cps.increment()
if __name__ == '__main__':
threadAll(0)
|
the-stack_106_21211
|
import logging
from aiogram.dispatcher import FSMContext
from aiogram.types import CallbackQuery, Message
from filters.filters_admin import IsAdmin
from keyboards.default.cancel_menu import cancel_menu
from keyboards.inline.callback_datas import message_callback
from keyboards.inline.react_buttons import message_choices
from loader import dp
from states.state_sub import SelectMessageBySubState
from states.state_tag import SelectMessageByTagState
from utils.db_api import db_commands as commands
@dp.message_handler(IsAdmin(), text="Список cообщений на конкретную тему 💬")
async def select_tags(message: Message):
"""Handler запроса темы."""
await message.answer(text="Введите тему")
await SelectMessageByTagState.tag.set()
@dp.message_handler(IsAdmin(), state=SelectMessageByTagState.tag)
async def return_messages_with_tag(message: Message, state: FSMContext):
"""Handler возврата сообщений с определенной темой."""
tag = message.text
if not await commands.check_exist_tag(tag):
await message.answer(
"Введено некорректное название темы. "
+ "Повторите попытку "
+ "или отмените заявку.",
reply_markup=cancel_menu,
)
return
messages = await commands.get_messages_with_tag(tag)
if messages:
for msg in messages:
from_user = [
str(msg.author.user_id),
msg.author.username,
msg.author.first_name,
msg.author.last_name,
]
from_user = " ".join(
list(filter(lambda item: not (item is None), from_user))
)
await message.answer(
text="\n".join(
[
f"<b>От кого</b>: {from_user}",
f"<b>Тема</b>: {msg.tag}",
f"{msg.text}",
]
),
reply_markup=message_choices,
)
await state.finish()
return
await message.answer(f"Сообщений на тему <b>{tag}</b> нет!")
await state.finish()
@dp.message_handler(IsAdmin(), text="Список сообщений от корреспондента 👨⚕️")
async def select_sub(message: Message):
"""Handler запроса username."""
await message.answer(text="Введите user_id:")
await SelectMessageBySubState.sub.set()
@dp.message_handler(IsAdmin(), state=SelectMessageBySubState.sub)
async def return_messages_by_sub(message: Message, state: FSMContext):
"""Handler возврата сообщений от корреспондента."""
user_id = int(message.text)
if not await commands.check_exist_user_id(user_id):
await message.answer(
"Введено некорректный username. "
+ "Повторите попытку "
+ "или отмените заявку.",
reply_markup=cancel_menu,
)
return
messages = await commands.get_messages_from_sub_by_user_id(user_id)
if messages:
from_user = [
str(messages[0].author.user_id),
messages[0].author.username,
messages[0].author.first_name,
messages[0].author.last_name,
]
from_user = " ".join(
list(filter(lambda item: not (item is None), from_user))
)
for msg in messages:
await message.answer(
text="\n".join(
[
f"<b>От кого</b>: {from_user}",
f"<b>Тема</b>: {msg.tag}",
f"{msg.text}",
]
),
reply_markup=message_choices,
)
await state.finish()
return
await message.answer("Сообщений нет!")
await state.finish()
@dp.callback_query_handler(message_callback.filter(operation="delete"))
async def buying_apples(call: CallbackQuery, callback_data: dict):
await call.answer(cache_time=60)
logging.info(f"{callback_data=}")
text = call.message.text
text = text.split("\n")[-1]
await commands.delete_message_by_text(text)
await call.message.delete()
|
the-stack_106_21212
|
from __future__ import unicode_literals
import pprint
from django import VERSION as DJANGO_VERSION
from django.conf import global_settings
from django.core.checks import Warning
from yacms.conf import settings
def check_template_settings(app_configs, **kwargs):
issues = []
if not settings.TEMPLATES:
suggested_config = _build_suggested_template_config(settings)
declaration = 'TEMPLATES = '
config_formatted = pprint.pformat(suggested_config)
config_formatted = "\n".join(' ' * len(declaration) + line
for line in config_formatted.splitlines())
config_formatted = declaration + config_formatted[len(declaration):]
issues.append(Warning(
"Please update your settings to use the TEMPLATES setting rather "
"than the deprecated individual TEMPLATE_ settings. The latter "
"are unsupported and correct behaviour is not guaranteed. Here's "
"a suggestion based on on your existing configuration:\n\n%s\n"
% config_formatted,
id="yacms.core.W01"
))
if settings.DEBUG != settings.TEMPLATE_DEBUG:
issues.append(Warning(
"TEMPLATE_DEBUG and DEBUG settings have different values, "
"which may not be what you want. yacms used to fix this "
"for you, but doesn't any more. Update your settings.py to "
"use the TEMPLATES setting to have template debugging "
"controlled by the DEBUG setting.",
id="yacms.core.W02"
))
else:
loader_tags_built_in = any(
'yacms.template.loader_tags'
in config.get('OPTIONS', {}).get('builtins', {})
for config in settings.TEMPLATES
)
if not DJANGO_VERSION < (1, 9) and not loader_tags_built_in:
issues.append(Warning(
"You haven't included 'yacms.template.loader_tags' as a "
"builtin in any of your template configurations. yacms's "
"'overextends' tag will not be available in your templates.",
id="yacms.core.W03"
))
return issues
def _build_suggested_template_config(settings):
suggested_templates_config = {
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"builtins": [
"yacms.template.loader_tags",
],
},
}
def set_setting(name, value, unconditional=False):
if value or unconditional:
suggested_templates_config[name] = value
def set_option(name, value):
if value:
suggested_templates_config["OPTIONS"][name.lower()] = value
def get_debug(_):
if settings.TEMPLATE_DEBUG != settings.DEBUG:
return settings.TEMPLATE_DEBUG
def get_default(default):
def getter(name):
value = getattr(settings, name)
if value == getattr(global_settings, name):
value = default
return value
return getter
default_context_processors = [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"yacms.conf.context_processors.settings",
"yacms.pages.context_processors.page",
]
def get_loaders(_):
"""
Django's default TEMPLATES setting doesn't specify loaders, instead
dynamically sets a default based on whether or not APP_DIRS is True.
We check here if the existing TEMPLATE_LOADERS setting matches one
of those default cases, and omit the 'loaders' option if so.
"""
template_loaders = list(settings.TEMPLATE_LOADERS)
default_loaders = list(global_settings.TEMPLATE_LOADERS)
if template_loaders == default_loaders:
# Equivalent to Django's default with APP_DIRS True
template_loaders = None
app_dirs = True
elif template_loaders == default_loaders[:1]:
# Equivalent to Django's default with APP_DIRS False
template_loaders = None
app_dirs = False
else:
# This project has a custom loaders setting, which we'll use.
# Custom loaders are incompatible with APP_DIRS.
app_dirs = False
return template_loaders, app_dirs
def set_loaders(name, value):
template_loaders, app_dirs = value
set_option(name, template_loaders)
set_setting('APP_DIRS', app_dirs, unconditional=True)
old_settings = [
('ALLOWED_INCLUDE_ROOTS', settings.__getattr__, set_option),
('TEMPLATE_STRING_IF_INVALID', settings.__getattr__, set_option),
('TEMPLATE_DIRS', settings.__getattr__, set_setting),
('TEMPLATE_CONTEXT_PROCESSORS',
get_default(default_context_processors), set_option),
('TEMPLATE_DEBUG', get_debug, set_option),
('TEMPLATE_LOADERS', get_loaders, set_loaders),
]
def convert_setting_name(old_name):
return old_name.rpartition('TEMPLATE_')[2]
for setting_name, getter, setter in old_settings:
value = getter(setting_name)
new_setting_name = convert_setting_name(setting_name)
setter(new_setting_name, value)
return [suggested_templates_config]
|
the-stack_106_21213
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class ManagedCluster(Resource):
"""Managed cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response.
:vartype provisioning_state: str
:param kubernetes_version: Version of Kubernetes specified when creating
the managed cluster.
:type kubernetes_version: str
:param dns_prefix: DNS prefix specified when creating the managed cluster.
:type dns_prefix: str
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
:param agent_pool_profiles: Properties of the agent pool.
:type agent_pool_profiles:
list[~azure.mgmt.containerservice.v2018_08_01_preview.models.ManagedClusterAgentPoolProfile]
:param linux_profile: Profile for Linux VMs in the container service
cluster.
:type linux_profile:
~azure.mgmt.containerservice.v2018_08_01_preview.models.ContainerServiceLinuxProfile
:param service_principal_profile: Information about a service principal
identity for the cluster to use for manipulating Azure APIs.
:type service_principal_profile:
~azure.mgmt.containerservice.v2018_08_01_preview.models.ManagedClusterServicePrincipalProfile
:param addon_profiles: Profile of managed cluster add-on.
:type addon_profiles: dict[str,
~azure.mgmt.containerservice.v2018_08_01_preview.models.ManagedClusterAddonProfile]
:ivar node_resource_group: Name of the resource group containing agent
pool nodes.
:vartype node_resource_group: str
:param enable_rbac: Whether to enable Kubernetes Role-Based Access
Control.
:type enable_rbac: bool
:param network_profile: Profile of network configuration.
:type network_profile:
~azure.mgmt.containerservice.v2018_08_01_preview.models.ContainerServiceNetworkProfile
:param aad_profile: Profile of Azure Active Directory configuration.
:type aad_profile:
~azure.mgmt.containerservice.v2018_08_01_preview.models.ManagedClusterAADProfile
:param api_server_authorized_ip_ranges: Authorized IP Ranges to kubernetes
API server.
:type api_server_authorized_ip_ranges: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'fqdn': {'readonly': True},
'node_resource_group': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'kubernetes_version': {'key': 'properties.kubernetesVersion', 'type': 'str'},
'dns_prefix': {'key': 'properties.dnsPrefix', 'type': 'str'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterAgentPoolProfile]'},
'linux_profile': {'key': 'properties.linuxProfile', 'type': 'ContainerServiceLinuxProfile'},
'service_principal_profile': {'key': 'properties.servicePrincipalProfile', 'type': 'ManagedClusterServicePrincipalProfile'},
'addon_profiles': {'key': 'properties.addonProfiles', 'type': '{ManagedClusterAddonProfile}'},
'node_resource_group': {'key': 'properties.nodeResourceGroup', 'type': 'str'},
'enable_rbac': {'key': 'properties.enableRBAC', 'type': 'bool'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'ContainerServiceNetworkProfile'},
'aad_profile': {'key': 'properties.aadProfile', 'type': 'ManagedClusterAADProfile'},
'api_server_authorized_ip_ranges': {'key': 'properties.apiServerAuthorizedIPRanges', 'type': '[str]'},
}
def __init__(self, *, location: str, tags=None, kubernetes_version: str=None, dns_prefix: str=None, agent_pool_profiles=None, linux_profile=None, service_principal_profile=None, addon_profiles=None, enable_rbac: bool=None, network_profile=None, aad_profile=None, api_server_authorized_ip_ranges=None, **kwargs) -> None:
super(ManagedCluster, self).__init__(location=location, tags=tags, **kwargs)
self.provisioning_state = None
self.kubernetes_version = kubernetes_version
self.dns_prefix = dns_prefix
self.fqdn = None
self.agent_pool_profiles = agent_pool_profiles
self.linux_profile = linux_profile
self.service_principal_profile = service_principal_profile
self.addon_profiles = addon_profiles
self.node_resource_group = None
self.enable_rbac = enable_rbac
self.network_profile = network_profile
self.aad_profile = aad_profile
self.api_server_authorized_ip_ranges = api_server_authorized_ip_ranges
|
the-stack_106_21216
|
#!/usr/bin/python3
import os
import json
from pprint import pprint
INSTANCE_FOLDER = './original'
TO_FOLDER = './json'
def convert():
files = os.listdir(INSTANCE_FOLDER)
for file in files:
filename = file.split('.')[0]
to_folder = os.path.join(TO_FOLDER, filename)
if not os.path.exists(to_folder):
os.makedirs(to_folder)
with open(os.path.join(INSTANCE_FOLDER, file)) as current:
content = current.readlines()
content = [x.strip() for x in content]
number_of_instances = int(content[0])
number_of_item_types = int(content[3])
for i in range(number_of_instances):
pos = i * (number_of_item_types + 3) + 1
w_container, h_container, d_container = content[pos + 1].split(" ")
containers = []
container = {}
container['Length'] = int(w_container)
container['Height'] = int(h_container)
container['Depth'] = int(d_container)
container['Stock'] = None
container['Cost'] = int(w_container) * int(h_container) * int(d_container)
containers.append(container)
items = []
# The 0/1 after each box dimension (C1 constraint, see
# Goncalves and Resende (2012) "A parallel multi-population
# biased random-key genetic algorithm for a container loading
# problem")indicates whether placement in the vertical
# orientation is permissible (=1) or not (=0).
for item in range(0, number_of_item_types):
_, w_item, w_c1, d_item, d_c1, h_item, h_c1, demand_item = content[(pos + 3) + item].split(" ")
item = {}
item['Length'] = int(w_item)
item['C1_Length'] = int(w_c1)
item['Height'] = int(h_item)
item['C1_Height'] = int(h_c1)
item['Depth'] = int(d_item)
item['C1_Depth'] = int(d_c1)
item['Demand'] = int(demand_item)
item['DemandMax'] = None
item['Value'] = int(w_item) * int(h_item) * int(d_item)
items.append(item)
json_data = {}
json_data["Name"] = str(i + 1)
json_data["Objects"] = containers
json_data["Items"] = items
out_file_name = os.path.join(to_folder, "{}.json".format(i + 1))
with open(out_file_name, 'w') as outfile:
json.dump(json_data, outfile)
print(out_file_name, ": done")
if __name__ == "__main__":
convert()
|
the-stack_106_21217
|
import unittest
from petsc4py import PETSc
import os
from PetscBinaryIO import *
class TestPetscBinaryIO(unittest.TestCase):
def setUp(self):
try:
os.remove('test.dat')
except OSError:
pass
try:
os.remove('test.dat.info')
except OSError:
pass
def test_VecRead(self):
"""Test reading a Vec"""
array = np.array([1.1, 2.2, 3.3])
vec = PETSc.Vec().createSeq(3)
vec[...] = array
viewer = PETSc.Viewer().createBinary('test.dat', PETSc.Viewer.Mode.W)
vec.view(viewer)
viewer.destroy()
vec.destroy()
result, = PetscBinaryIO().readBinaryFile('test.dat')
self.assertTrue(np.allclose(array, result))
def test_VecWrite(self):
"""Test writing a Vec"""
array = np.array([1.1, 2.2, 3.3])
PetscBinaryIO().writeBinaryFile('test.dat', [array.view(Vec),])
vec = PETSc.Vec().createSeq(3)
vec.set(0.)
viewer = PETSc.Viewer().createBinary('test.dat', PETSc.Viewer.Mode.R)
vec.load(viewer)
viewer.destroy()
self.assertTrue(np.allclose(array, vec[...]))
vec.destroy()
def test_ISRead(self):
"""Test reading an IS"""
indices = np.array([3,4,5])
anis = PETSc.IS().createGeneral(list(indices))
viewer = PETSc.Viewer().createBinary('test.dat', PETSc.Viewer.Mode.W)
anis.view(viewer)
viewer.destroy()
anis.destroy()
result, = PetscBinaryIO().readBinaryFile('test.dat')
self.assertTrue((indices == result).all())
def test_MatRead(self):
"""Test reading a Mat"""
mat = PETSc.Mat().createAIJ(2)
mat[0,0] = 1.1
mat[0,1] = 2.1
mat[1,1] = 3.1
mat.assemble()
vals = np.array([1.1,2.1,3.1])
counts = np.array([0,2,3])
cols = np.array([0,1,1])
viewer = PETSc.Viewer().createBinary('test.dat', PETSc.Viewer.Mode.W)
mat.view(viewer)
viewer.destroy()
mat.destroy()
result, = PetscBinaryIO().readBinaryFile('test.dat')
self.assertTrue(np.allclose(vals, result[1][2]))
self.assertTrue((counts == result[1][0]).all())
self.assertTrue((cols == result[1][1]).all())
self.assertTrue((2,2) == result[0])
def test_MatWrite(self):
"""Test writing a Mat"""
vals = np.array([1.1,2.1,3.1])
counts = np.array([0,2,3])
cols = np.array([0,1,1])
mat = MatSparse(((2,2),(counts,cols,vals)))
dense = np.array([1.1,2.1,0.0,3.1])
PetscBinaryIO().writeBinaryFile('test.dat', [mat,])
mat = PETSc.Mat().createAIJ(2)
viewer = PETSc.Viewer().createBinary('test.dat', PETSc.Viewer.Mode.R)
mat.load(viewer)
viewer.destroy()
self.assertTrue(np.allclose(dense, mat[:,:].ravel()))
mat.destroy()
if __name__ == '__main__':
unittest.main()
try:
os.remove('test.dat')
except OSError:
pass
try:
os.remove('test.dat.info')
except OSError:
pass
|
the-stack_106_21220
|
import os
import dmenu
import pyimgur
def upload():
client_id = "8e98531fa1631f6"
PATH = "/tmp/screenshot.png"
im = pyimgur.Imgur(client_id)
uploaded_image = im.upload_image(PATH, title="Uploaded with PyImgur")
print(uploaded_image.link)
os.system("rm /tmp/screenshot.png")
def save_local():
save_name = dmenu.show([''], prompt='type the filename (widout extension)')
os.system("mv /tmp/screenshot.png /home/philip/pics/screenshots/" + save_name + ".png")
os.system("gnome-screenshot -a -f /tmp/screenshot.png 2> /dev/null")
if dmenu.show(['local', 'imgur']) == 'imgur':
try:
upload()
except:
if dmenu.show(['yes', 'no'], prompt='could not upload to upload to Imgur! save local?') == 'yes':
save_local()
else:
os.system("rm /tmp/screenshot.png")
exit()
else:
save_local()
|
the-stack_106_21223
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.3
# kernelspec:
# display_name: Python [conda env:cpm]
# language: python
# name: conda-env-cpm-py
# ---
import pandas as pd
from IPython.display import display
import cpm.agenda as a
import cpm.functions as f
import gspread
import gspread_dataframe as gs_df
# +
from context import cpm
from cpm.variables import *
# Every time we change a module they are reload before executing
# %reload_ext autoreload
# %autoreload 2
# -
# ## Carrega Planilhas
# +
turmas = f.load_turmas("J23_2019_2S_Feedback")
prefix = r"https://drive.google.com/open?id="
for k, v in turmas.items():
print(k + "\t", prefix + v.id)
# -
# ### Lista de Presença
for name, sh in turmas.items():
# Carrega Lista de Presença
wb = f.load_wb_from_sheet(sh.title, "Lista de Presença")
nomes = wb.range("C4:C28")
# Select a range
situacao = wb.range("D4:D28")
for nome, sit in zip(nomes, situacao):
if nome.value != "" and sit.value == "":
sit.value = "Ativo"
# Update in batch
wb.update_cells(situacao)
# ###
# +
datas = """Data
04/08/2019
18/08/2019
25/08/2019
01/09/2019
15/09/2019
22/09/2019
29/09/2019
06/10/2019
20/10/2019
27/10/2019
03/11/2019
10/11/2019
24/11/2019
01/12/2019
08/12/2019
15/12/2019""".split(
"\n"
)
for name, sh in turmas.items():
print(name)
wb = f.load_wb_from_sheet(sh.title, "Alocação")
# Select a range
cell_list = wb.range("A1:A17")
for cell, value in zip(cell_list, datas):
cell.value = value
# Update in batch
wb.update_cells(cell_list)
# -
for name, sh in turmas.items():
print(name)
wb = f.load_wb_from_sheet(sh.title, "Alocação")
# Select a range
cell_list = wb.range("C3:C17")
for cell in cell_list:
cell.value = fr"='Parâmetros'!C{cell.row}"
# Update in batch
wb.update_cells(cell_list, "USER_ENTERED")
wb.update_acell("C2", "CANCELADO")
sheets = f.load_workbooks_from_drive()
sheets
# +
exp = "'\"&turma&\"'"
value = fr'=query(IMPORTRANGE(link_alunos, range_alunos), "select Col2,Col3,Col4,Col6,Col9,Col10,Col11,Col12,Col13,Col14, Col15 where Col1 = {exp}")'
for name, sh in turmas.items():
print(name)
wb = f.load_wb_from_sheet(sh.title, "Info_Students")
wb.update_cell(1, 1, value)
# -
alocação = f.load_df_from_sheet(
ALOCACAO, "Agenda", col_names=COLS_AGENDA + ["Presente"]
)
alocação.head()
listas = f.carrega_listas()
listas.head()
listas = listas[listas.Evasão == "Não"]
listas = listas[listas.Desistência == ""]
listas = listas[listas.Obs == ""]
listas.head()
# ## Define Aula a ser verificada e Filtra Lista de Presença
def checa_coluna(lista: pd.DataFrame, coluna: str) -> bool:
return sum(lista[coluna] == "") > 0
def checa_aula(lista: pd.DataFrame, aula: tuple) -> bool:
check = 0
for col in aula:
if "SPK" not in col:
check += int(checa_coluna(lista, col))
return not bool(check)
for turma in TURMAS:
df = listas[listas.Turma == turma]
for i, aula in enumerate(AULAS[10:11]):
if not checa_aula(df, aula):
# display(alocação[alocação.Aula == str(i + 1)])
display(df[["Nome", "Turma"] + list(aula)])
alocação[alocação.Aula == "1"]
for t in TURMAS:
df = listas[listas.Turma == t]
if checa_coluna(listas[listas.Turma == t], p):
display(df[["Turma", "Nome", p]])
listas[listas.Turma == "A2"]
# ## Analisa preenchimento de Aula
turmas = {}
for t in TURMAS:
turmas[t] = aula[aula.Turma == t]
# ### Presença em Branco
for df in turmas.values():
print(df[p] == False)
# display(alocação[alocação["Aula"] == n])
df.p
# +
labels = ["B1", "D1", "F1", "Teens2"]
for t in labels:
display(alocação[alocação.Turma == t])
# +
desist = (~listas.Desistência.isnull()) & (listas.Presença != "0")
obs = listas.Obs.isnull()
listas.loc[(desist & obs), ["Turma", "Nome", "Presença", "Obs"]]
# -
listas.loc[listas.Evasão == "Sim", ["Turma", "Nome", "Presença"]]
# +
def preenche_lacunas(planilhas_de_presença):
for k, (sh, df) in planilhas_de_presença.items():
print("Processing: {}\n".format(k))
print(110 * "=")
for item in f.AULAS:
if isinstance(item, tuple):
p, hw, cp = item
t1 = (df[p] == "NO") & (df[hw] == "")
t2 = (df[p] == "NO") & (df[cp] == "")
if df.loc[t2].shape[0]:
display(df.loc[t2])
df.loc[t1, hw] = "N"
if "SPK" in cp:
df.loc[t2, cp] = "0"
else:
df.loc[t2, cp] = "N/A"
df.loc[df[hw] == "+/-", hw] = "'+/-"
df.loc[df[hw] == "½", hw] = "'+/-"
df.loc[df[hw] == "1/2", hw] = "'+/-"
# corrige_preenchimento(sh, df, 7, 32)
# +
alunos = pd.DataFrame(columns=["Turma", "Nome", "P_15", "Nota_Final"])
for k, (sh, df) in planilhas_de_presença.items():
print(k)
p = (df["P_15"] != "YES") & (df["Nome"] != "")
df["Turma"] = k
subset = df.loc[:, ["Nome", "P_15", "Nota_Final", "Turma"]][p]
alunos = alunos.append(subset)
# -
# ### Checar Presença
iterator = iter(planilhas_de_presença.items())
# +
k, (sh, df) = next(iterator)
print("Turma: " + k)
for item in f.AULAS:
if isinstance(item, tuple) and len(item) == 3:
p, hw, cp = item
t1 = (df[p] == "NO") & (df[hw] == "")
t2 = (df[p] == "YES") & (~df[hw].isin(["Y", "N", "+/-", "N/H"]))
t3 = (df[p] == "NO") & (df[cp] == "")
t4 = (df[p] == "YES") & (~df[cp].isin(["A", "B", "C", "N/A"]))
if df.loc[t1].shape[0]:
print("Falta sem nota de Homework\n")
display(df.loc[t1, ["Nome", p, hw, cp]])
df.loc[t1, hw] = "N/H"
if df.loc[t2].shape[0]:
print("Presença sem Nota de Homework\n")
display(df.loc[t2, ["Nome", p, hw, cp]])
df.loc[t2, hw] = "Y"
if df.loc[t3].shape[0]:
print("Falta sem nota de Participação\n")
display(df.loc[t3, ["Nome", p, hw, cp]])
if "SPK" not in cp:
df.loc[t3, cp] = "N/A"
if df.loc[t4].shape[0]:
print("Presença sem nota de Participação")
display(df.loc[t4, ["Nome", p, hw, cp]])
if "SPK" not in cp:
df.loc[t4, cp] = "A"
# t5 = ((df['P_8'] == "") | (df['P_8'] == 'NO') & (df['Nota_Mid'] != '0.00'))
# t6 = ((df['P_15'] == "") | (df['P_15'] == 'NO') & (df['Nota_Final'] != '0.00'))
# if df.loc[t5].shape[0]:
# print(k)
# display(df.loc[t5, ["Nome",p, "Nota_Mid"]])
# df.loc[t5, ["P_8", "Nota_Mid"]] = ["NO", "0.00"]
# if df.loc[t6].shape[0]:
# print(k)
# display(df.loc[t6, ["Nome",p, "Nota_Final"]])
# df.loc[t6, ["P_15", "Nota_Final"]] = ["NO", "0.00"]
df["Nota_Mid"] = df["Nota_Mid"].str.replace(",", ".")
df["Nota_Final"] = df["Nota_Final"].str.replace(",", ".")
corrige_preenchimento(sh, df, 7, 54)
# +
notas_dos_alunos = {}
for nome, turma in turmas.items():
sh, df = f.load_sheet_from_workbook(
turma, "Notas dos Alunos", skiprows=[1, 2, 3, 4]
)
display(df)
lista = pd.DataFrame(columns=df.columns)
registros = sh.get_all_values()
# lista = f.nomeia_cols_lista(lista)
notas_dos_alunos[nome.split("_")[-1]] = df
break
# -
df.head()
# +
colunas_1 = ["Nome", "P_8", "Nota_Mid"]
colunas_2 = colunas_1 + ["Turma"]
notas_mid = pd.DataFrame(columns=colunas_2)
for k, (sh, df) in iterator:
df["Turma"] = k
notas_mid = notas_mid.append(df[df["Nome"] != ""][colunas_2])
# -
notas_mid.to_csv("Relatorio_Mid.csv", index=False)
# +
aula = "Aula 11"
colunas = ["Nome", "Nota_Mid"]
k, (sh, df) = next(iterator)
print("Processing: {}\n".format(k))
filtro = (agenda["Aula"].str.contains(aula)) & (agenda["Turma"] == k)
professores = agenda[filtro]
print("Professores: \n", professores[["Nome", "Email CPM", "Email Pessoal"]])
criterio = df["Nome"] != ""
display(df[criterio][colunas])
# -
agenda[agenda["Aula"].str.contains("11")]
alocacao[alocacao["Aula"].str.contains("12")]
for key, (sh, df) in planilhas_de_presença.items():
print(key)
display(df[["P_9", "HW_9", "CP_9"]])
presença = {}
for sh, wks in zip(planilhas, listas_de_presenças):
presença[sh.title] = wks[1].range("Q4:S25")
presença = {}
for sh, wks in zip(planilhas, listas_de_presenças):
presença[sh.title] = wks[1]
for plan in presença.values():
plan["Aula 4 (Speaking)"]
for key, value in presença.items():
print(key)
print("\n")
for v in value:
print(v.value)
print("\n")
desistencia = pd.DataFrame(columns=listas_de_presenças[0].columns)
for lista in listas_de_presenças:
desistencia = lista["Desistência"] == "Sim"
print(lista[desistencia][["Código", "NOME COMPLETO", "Desistência", "Evasão"]])
# +
presença = []
for lista in listas_de_presenças:
print(
"Planilha: {}, Coluna BC: {}".format(lista[0].title, lista[0].range("BC1:BC25"))
)
# -
presença[0]
# +
count = []
for turma in presença:
for i, aluno in enumerate(turma[0]):
if aluno.value == r"":
next
elif turma[1][i].value == r"0%":
count.append(aluno.value)
# -
count
counts = agenda.iloc[:184]["Nome"].value_counts()
counts
# #### Merge the Teachers Info from all Spreadsheets into one
# +
# for i in skills[0]:
# key = aula.cell(i, 1).value
# value = aula.cell(i, 2).value
# dict_[key] = value
# print(dict_exs)
# -
|
the-stack_106_21224
|
"""
@author: David Lei
@since: 22/04/2017
@modified:
https://www.hackerrank.com/challenges/ctci-comparator-sorting
Sample input:
5
amy 100
david 100
heraldo 50
aakansha 75
aleksa 150
"""
from functools import cmp_to_key
class Player:
def __init__(self, name, score):
self.name = name
self.score = score
def __repr__(self):
return self.name + " " + self.score
def comparator(a, b):
"""
Compares a and b where a is self and b is another object.
returns:
-1: if a < b
0: if a == b
1: a > b
-1 means appears in list earlier than 1.
Sort in decreasing order, largest first.
"""
if a.score < b.score:
# a < b based on score, want to return a > b so it placed towards the end of the
# return list as bigger elements are at the end of the array.
# Results in bigger score being seen as smaller so placed earlier in the array.
return 1
if b.score < a.score:
# Likewise with above, if b < a, put a it at the end as it is smaller return a > b as
# Normally larger gets put at end.
return -1
if a.name < b.name:
return -1
if b.name < a.name:
return 1
return 0
n = int(input())
data = []
for i in range(n):
name, score = input().split()
score = int(score)
p = Player(name, score)
data.append(p)
data = sorted(data, key=cmp_to_key(Player.comparator))
for i in data:
print(i.name, i.score)
|
the-stack_106_21226
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from paddle.fluid.framework import default_main_program
from paddle.fluid.incubate.fleet.parameter_server.ir.pserver_pass import _get_optimizer_input_shape
main_program = default_main_program()
class TestFleetPS(unittest.TestCase):
def test_version(self):
from paddle.fluid.incubate.fleet.parameter_server import version
transpiler = version.is_transpiler()
self.assertEqual(transpiler, True)
def test_optimizer_shape(self):
optimizers = []
optimizers.append(("adam", "Moment1", [100, 1], [50, 1]))
optimizers.append(("adam", "Moment2", [100, 1], [50, 1]))
optimizers.append(("adagrad", "Moment", [100, 1], [50, 1]))
optimizers.append(("adamax", "Moment", [100, 1], [50, 1]))
optimizers.append(("adamax", "InfNorm", [100, 1], [50, 1]))
optimizers.append(("momentum", "Velocity", [100, 1], [50, 1]))
optimizers.append(("lars_momentum", "Velocity", [100, 1], [50, 1]))
optimizers.append(("decayed_adagrad", "Moment", [100, 1], [50, 1]))
optimizers.append(("rmsprop", "Moment", [100, 1], [50, 1]))
optimizers.append(("rmsprop", "MeanSquare", [100, 1], [50, 1]))
optimizers.append(("ftrl", "SquaredAccumulator", [100, 1], [50, 1]))
optimizers.append(("ftrl", "LinearAccumulator", [100, 1], [50, 1]))
for attrs in optimizers:
op_type, varkey, orig_shape, param_shape = attrs
new_shape = _get_optimizer_input_shape(op_type, varkey, orig_shape,
param_shape)
self.assertListEqual(new_shape, param_shape)
optimizers = []
optimizers.append(("sgd", "", [100, 1], [50, 1]))
for attrs in optimizers:
op_type, varkey, orig_shape, param_shape = attrs
new_shape = _get_optimizer_input_shape(op_type, varkey, orig_shape,
param_shape)
self.assertListEqual(new_shape, orig_shape)
with self.assertRaises(ValueError):
optimizers = []
optimizers.append(("new_opti", "", [100, 1], [50, 1]))
for attrs in optimizers:
op_type, varkey, orig_shape, param_shape = attrs
_get_optimizer_input_shape(op_type, varkey, orig_shape,
param_shape)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_21228
|
"""
This code is modified from Hao Luo's repository.
Paper: Bag of Tricks and A Strong Baseline for Deep Person Re-identification
https://github.com/michuanhaohao/reid-strong-baseline
"""
import copy
import random
import torch
from collections import defaultdict
import numpy as np
from torch.utils.data.sampler import Sampler
class RandomIdentitySampler(Sampler):
"""
Randomly sample N identities, then for each identity,
randomly sample K instances, therefore batch size is N*K.
Args:
- data_source (list): list of (img_path, pid, camid).
- num_instances (int): number of instances per identity in a batch.
- batch_size (int): number of examples in a batch.
"""
def __init__(self, data_source, batch_size, num_instances):
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = self.batch_size // self.num_instances
self.index_dic = defaultdict(list)
for index, (_, pid, _, _, _, _) in enumerate(self.data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
# estimate number of examples in an epoch
self.length = 0
for pid in self.pids:
idxs = self.index_dic[pid]
num = len(idxs)
if num < self.num_instances:
num = self.num_instances
self.length += num - num % self.num_instances
def __iter__(self):
batch_idxs_dict = defaultdict(list)
for pid in self.pids:
idxs = copy.deepcopy(self.index_dic[pid])
if len(idxs) < self.num_instances:
idxs = np.random.choice(idxs, size=self.num_instances, replace=True)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if len(batch_idxs) == self.num_instances:
batch_idxs_dict[pid].append(batch_idxs)
batch_idxs = []
avai_pids = copy.deepcopy(self.pids)
final_idxs = []
while len(avai_pids) >= self.num_pids_per_batch:
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
for pid in selected_pids:
batch_idxs = batch_idxs_dict[pid].pop(0)
final_idxs.extend(batch_idxs)
if len(batch_idxs_dict[pid]) == 0:
avai_pids.remove(pid)
self.length = len(final_idxs)
return iter(final_idxs)
def __len__(self):
return self.length
# New add by gu
class RandomIdentitySampler_alignedreid(Sampler):
"""
Randomly sample N identities, then for each identity,
randomly sample K instances, therefore batch size is N*K.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/data/sampler.py.
Args:
data_source (Dataset): dataset to sample from.
num_instances (int): number of instances per identity.
"""
def __init__(self, data_source, num_instances):
self.data_source = data_source
self.num_instances = num_instances
self.index_dic = defaultdict(list)
for index, (_, pid, _) in enumerate(data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.num_identities = len(self.pids)
def __iter__(self):
indices = torch.randperm(self.num_identities)
ret = []
for i in indices:
pid = self.pids[i]
t = self.index_dic[pid]
replace = False if len(t) >= self.num_instances else True
t = np.random.choice(t, size=self.num_instances, replace=replace)
ret.extend(t)
return iter(ret)
def __len__(self):
return self.num_identities * self.num_instances
|
the-stack_106_21229
|
import networkx as nx
import matplotlib.pyplot as plt
import v_parser
'''
in_n: input nodes;
out_n: output nodes;
nodes: gates;
edges: wire connections
'''
def grapher(in_n, out_n, nodes, edges):
# in_n, out_n, nodes, edges = verilog_parser.parser(file_)
G=nx.DiGraph()#graph creation
G.add_nodes_from(in_n)
G.add_nodes_from(out_n)
G.add_nodes_from(nodes)
colour_map = []
size = []
line_size = []
for node in G:
if node in in_n:
colour_map.append('magenta')
elif node in out_n:
colour_map.append('yellow')
else:
colour_map.append('cyan')
size.append(300*len(node))
for i in edges:
for j in i[2]:
G.add_edge(i[1], j, weight=6)
nx.draw(G, with_labels=True, node_color=colour_map, node_size=size, arrowsize=20, pos=nx.spring_layout(G, k=7))
# plt.savefig("path_graph1.png")
#plt.show()
print("\nAll paths from input to output:\n")
for i in in_n:
for j in out_n:
for path in nx.all_simple_paths(G, source=i, target=j):
for k in range(len(path)-1):
print(path[k],end="->")
print(path[len(path)-1])
plt.show()
|
the-stack_106_21230
|
# coding: utf-8
"""
Training module
"""
import argparse
import time
import shutil
from typing import List
import logging
import os
import sys
import queue
import pickle
import json
import numpy as np
import torch
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from torchtext.data import Dataset
from joeynmt.model import build_model
from joeynmt.batch import Batch
from joeynmt.helpers import log_data_info, load_config, log_cfg, \
store_attention_plots, load_checkpoint, make_model_dir, \
make_logger, set_seed, symlink_update, ConfigurationError, \
make_retro_logger
from joeynmt.model import Model, _DataParallel
from joeynmt.prediction import validate_on_data
from joeynmt.loss import XentLoss, ReinforceLoss
from joeynmt.data import load_data, make_data_iter
from joeynmt.builders import build_optimizer, build_scheduler, \
build_gradient_clipper
from joeynmt.prediction import test
# for fp16 training
try:
from apex import amp
amp.register_half_function(torch, "einsum")
except ImportError as no_apex:
# error handling in TrainManager object construction
pass
logger = logging.getLogger(__name__)
# pylint: disable=too-many-instance-attributes
class TrainManager:
""" Manages training loop, validations, learning rate scheduling
and early stopping."""
def __init__(self, model: Model, config: dict, critic_model: Model =False) -> None:
"""
Creates a new TrainManager for a model, specified as in configuration.
:param model: torch module defining the model
:param config: dictionary containing the training configurations
"""
train_config = config["training"]
self.config = config
# files for logging and storing
train_config = config["training"]
# files for logging and storing
self.model_dir = train_config["model_dir"]
assert os.path.exists(self.model_dir)
self.logging_freq = train_config.get("logging_freq", 100)
self.valid_report_file = "{}/validations.txt".format(self.model_dir)
self.tb_writer = SummaryWriter(
log_dir=self.model_dir + "/tensorboard/")
# reinforcement learning parameters
self.reinforcement_learning = train_config.get("reinforcement_learning", {}).get("use_reinforcement_learning", False)
self.temperature = train_config.get("reinforcement_learning", {}).get("hyperparameters", {}).get("temperature", 1)
self.baseline = train_config.get("reinforcement_learning", {}).get("hyperparameters", {}).get("baseline", False)
self.reward = train_config.get("reinforcement_learning", {}).get("hyperparameters", {}).get("reward", 'bleu')
self.method = train_config.get("reinforcement_learning", {}).get("method", 'reinforce')
self.samples = train_config.get("reinforcement_learning", {}).get("hyperparameters", {}).get("samples", 5)
self.alpha = train_config.get("reinforcement_learning", {}).get("hyperparameters", {}).get("alpha", 0.005)
self.add_gold = train_config.get("reinforcement_learning", {}).get("hyperparameters", {}).get("add_gold", False)
self.log_probabilities = train_config.get("reinforcement_learning", {}).get("log_probabilities", False)
self.pickle_logs = train_config.get("reinforcement_learning", {}).get("pickle_logs", False)
self.topk = train_config.get("reinforcement_learning", {}).get("topk", 20)
if self.log_probabilities:
self.entropy_logger = make_retro_logger("{}/entropy.log".format(self.model_dir), "entropy_logger")
self.probability_logger = make_retro_logger("{}/probability.log".format(self.model_dir), "probability_logger")
if self.pickle_logs:
self.collected_gold_ranks = []
self.collected_top10_probabilities = []
self.collected_highest_probabilities = []
self.collected_gold_probabilities = []
self.critic = None
if self.method == "a2c":
self.critic = critic_model
# model
self.model = model
self._log_parameters_list()
# objective
self.label_smoothing = train_config.get("label_smoothing", 0.0)
# CPU / GPU
self.use_cuda = train_config["use_cuda"] and torch.cuda.is_available()
self.n_gpu = torch.cuda.device_count() if self.use_cuda else 0
self.device = torch.device("cuda" if self.use_cuda else "cpu")
if self.reinforcement_learning:
self.model.loss_function = ReinforceLoss(baseline=self.baseline, use_cuda=self.use_cuda, reward=self.reward)
else:
self.model.loss_function = XentLoss(pad_index=self.model.pad_index,
smoothing=self.label_smoothing)
self.normalization = train_config.get("normalization", "batch")
if self.normalization not in ["batch", "tokens", "none"]:
raise ConfigurationError("Invalid normalization option."
"Valid options: "
"'batch', 'tokens', 'none'.")
# optimization
self.learning_rate_min = train_config.get("learning_rate_min", 1.0e-8)
self.clip_grad_fun = build_gradient_clipper(config=train_config)
self.optimizer = build_optimizer(config=train_config,
parameters=model.parameters())
if self.method == "a2c":
self.critic_optimizer = build_optimizer(config=train_config,
parameters=self.critic.parameters(), critic=True)
# validation & early stopping
self.validation_freq = train_config.get("validation_freq", 1000)
self.log_valid_sents = train_config.get("print_valid_sents", [0, 1, 2])
self.ckpt_queue = queue.Queue(
maxsize=train_config.get("keep_last_ckpts", 5))
self.eval_metric = train_config.get("eval_metric", "bleu")
if self.eval_metric not in ['bleu',
'chrf',
'token_accuracy',
'sequence_accuracy']:
raise ConfigurationError("Invalid setting for 'eval_metric', "
"valid options: 'bleu', 'chrf', "
"'token_accuracy', 'sequence_accuracy'.")
self.early_stopping_metric = train_config.get("early_stopping_metric",
"eval_metric")
# early_stopping_metric decides on how to find the early stopping point:
# ckpts are written when there's a new high/low score for this metric.
# If we schedule after BLEU/chrf/accuracy, we want to maximize the
# score, else we want to minimize it.
if self.early_stopping_metric in ["ppl", "loss"]:
self.minimize_metric = True
elif self.early_stopping_metric == "eval_metric":
if self.eval_metric in ["bleu", "chrf",
"token_accuracy", "sequence_accuracy"]:
self.minimize_metric = False
# eval metric that has to get minimized (not yet implemented)
else:
self.minimize_metric = True
else:
raise ConfigurationError(
"Invalid setting for 'early_stopping_metric', "
"valid options: 'loss', 'ppl', 'eval_metric'.")
# eval options
test_config = config["testing"]
self.bpe_type = test_config.get("bpe_type", "subword-nmt")
self.sacrebleu = {"remove_whitespace": True, "tokenize": "13a"}
if "sacrebleu" in config["testing"].keys():
self.sacrebleu["remove_whitespace"] = test_config["sacrebleu"] \
.get("remove_whitespace", True)
self.sacrebleu["tokenize"] = test_config["sacrebleu"] \
.get("tokenize", "13a")
# learning rate scheduling
self.scheduler, self.scheduler_step_at = build_scheduler(
config=train_config,
scheduler_mode="min" if self.minimize_metric else "max",
optimizer=self.optimizer,
hidden_size=config["model"]["encoder"]["hidden_size"])
if self.method == "a2c":
self.critic_scheduler, self.critic_scheduler_step_at = build_scheduler(
config=train_config,
scheduler_mode="min" if self.minimize_metric else "max",
optimizer=self.critic_optimizer,
hidden_size=config["model"]["encoder"]["hidden_size"])
# data & batch handling
self.level = config["data"]["level"]
if self.level not in ["word", "bpe", "char"]:
raise ConfigurationError("Invalid segmentation level. "
"Valid options: 'word', 'bpe', 'char'.")
self.shuffle = train_config.get("shuffle", True)
self.epochs = train_config["epochs"]
self.batch_size = train_config["batch_size"]
# per-device batch_size = self.batch_size // self.n_gpu
self.batch_type = train_config.get("batch_type", "sentence")
self.eval_batch_size = train_config.get("eval_batch_size",
self.batch_size)
# per-device eval_batch_size = self.eval_batch_size // self.n_gpu
self.eval_batch_type = train_config.get("eval_batch_type",
self.batch_type)
self.batch_multiplier = train_config.get("batch_multiplier", 1)
# generation
self.max_output_length = train_config.get("max_output_length", None)
if self.use_cuda:
self.model.cuda()
if self.method == "a2c":
self.critic.cuda()
# fp16
self.fp16 = train_config.get("fp16", False)
if self.fp16:
if 'apex' not in sys.modules:
raise ImportError(
"Please install apex from "
"https://www.github.com/nvidia/apex "
"to use fp16 training.") from no_apex
self.model, self.optimizer = amp.initialize(
self.model, self.optimizer, opt_level='O1')
# opt level: one of {"O0", "O1", "O2", "O3"}
# see https://nvidia.github.io/apex/amp.html#opt-levels
# initialize training statistics
self.stats = self.TrainStatistics(
steps=0,
stop=False,
total_tokens=0,
best_ckpt_iter=0,
best_ckpt_score=np.inf if self.minimize_metric else -np.inf,
minimize_metric=self.minimize_metric
)
# model parameters
if "load_model" in train_config.keys():
self.init_from_checkpoint(train_config["load_model"],
reset_best_ckpt=train_config.get("reset_best_ckpt", False),
reset_scheduler=train_config.get("reset_scheduler", False),
reset_optimizer=train_config.get("reset_optimizer", False))
# multi-gpu training (should be after apex fp16 initialization)
if self.n_gpu > 1:
self.model = _DataParallel(self.model)
def _save_checkpoint(self) -> None:
"""
Save the model's current parameters and the training state to a
checkpoint.
The training state contains the total number of training steps,
the total number of training tokens,
the best checkpoint score and iteration so far,
and optimizer and scheduler states.
"""
model_path = "{}/{}.ckpt".format(self.model_dir, self.stats.steps)
model_state_dict = self.model.module.state_dict() \
if isinstance(self.model, torch.nn.DataParallel) \
else self.model.state_dict()
state = {
"steps": self.stats.steps,
"total_tokens": self.stats.total_tokens,
"best_ckpt_score": self.stats.best_ckpt_score,
"best_ckpt_iteration": self.stats.best_ckpt_iter,
"model_state": model_state_dict,
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict() if
self.scheduler is not None else None,
'amp_state': amp.state_dict() if self.fp16 else None
}
torch.save(state, model_path)
if self.ckpt_queue.full():
to_delete = self.ckpt_queue.get() # delete oldest ckpt
try:
os.remove(to_delete)
except FileNotFoundError:
logger.warning("Wanted to delete old checkpoint %s but "
"file does not exist.", to_delete)
self.ckpt_queue.put(model_path)
best_path = "{}/best.ckpt".format(self.model_dir)
try:
# create/modify symbolic link for best checkpoint
symlink_update("{}.ckpt".format(self.stats.steps), best_path)
except OSError:
# overwrite best.ckpt
torch.save(state, best_path)
def init_from_checkpoint(self, path: str,
reset_best_ckpt: bool = False,
reset_scheduler: bool = False,
reset_optimizer: bool = False) -> None:
"""
Initialize the trainer from a given checkpoint file.
This checkpoint file contains not only model parameters, but also
scheduler and optimizer states, see `self._save_checkpoint`.
:param path: path to checkpoint
:param reset_best_ckpt: reset tracking of the best checkpoint,
use for domain adaptation with a new dev
set or when using a new metric for fine-tuning.
:param reset_scheduler: reset the learning rate scheduler, and do not
use the one stored in the checkpoint.
:param reset_optimizer: reset the optimizer, and do not use the one
stored in the checkpoint.
"""
logger.info("Loading model from %s", path)
model_checkpoint = load_checkpoint(path=path, use_cuda=self.use_cuda)
# restore model and optimizer parameters
self.model.load_state_dict(model_checkpoint["model_state"])
if not reset_optimizer:
self.optimizer.load_state_dict(model_checkpoint["optimizer_state"])
else:
logger.info("Reset optimizer.")
if not reset_scheduler:
if model_checkpoint["scheduler_state"] is not None and \
self.scheduler is not None:
self.scheduler.load_state_dict(
model_checkpoint["scheduler_state"])
else:
logger.info("Reset scheduler.")
# restore counts
self.stats.steps = model_checkpoint["steps"]
self.stats.total_tokens = model_checkpoint["total_tokens"]
if not reset_best_ckpt:
self.stats.best_ckpt_score = model_checkpoint["best_ckpt_score"]
self.stats.best_ckpt_iter = model_checkpoint["best_ckpt_iteration"]
else:
logger.info("Reset tracking of the best checkpoint.")
# move parameters to cuda
if self.use_cuda:
self.model.cuda()
# fp16
if self.fp16 and model_checkpoint.get("amp_state", None) is not None:
amp.load_state_dict(model_checkpoint['amp_state'])
# pylint: disable=unnecessary-comprehension
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def train_and_validate(self, train_data: Dataset, valid_data: Dataset) \
-> None:
"""
Train the model and validate it from time to time on the validation set.
:param train_data: training data
:param valid_data: validation data
"""
train_iter = make_data_iter(train_data,
batch_size=self.batch_size,
batch_type=self.batch_type,
train=True, shuffle=self.shuffle)
#################################################################
# simplify accumulation logic:
#################################################################
# for epoch in range(epochs):
# self.model.zero_grad()
# epoch_loss = 0.0
# batch_loss = 0.0
# for i, batch in enumerate(iter(train_iter)):
#
# # gradient accumulation:
# # loss.backward() inside _train_step()
# batch_loss += self._train_step(inputs)
#
# if (i + 1) % self.batch_multiplier == 0:
# self.optimizer.step() # update!
# self.model.zero_grad() # reset gradients
# self.steps += 1 # increment counter
#
# epoch_loss += batch_loss # accumulate batch loss
# batch_loss = 0 # reset batch loss
#
# # leftovers are just ignored.
#################################################################
logger.info(
"Train stats:\n"
"\tdevice: %s\n"
"\tn_gpu: %d\n"
"\t16-bits training: %r\n"
"\tgradient accumulation: %d\n"
"\tbatch size per device: %d\n"
"\ttotal batch size (w. parallel & accumulation): %d",
self.device, self.n_gpu, self.fp16, self.batch_multiplier,
self.batch_size//self.n_gpu if self.n_gpu > 1 else self.batch_size,
self.batch_size * self.batch_multiplier)
for epoch_no in range(self.epochs):
logger.info("EPOCH %d", epoch_no + 1)
if self.scheduler is not None and self.scheduler_step_at == "epoch":
self.scheduler.step(epoch=epoch_no)
logger.info("EPOCH %d", epoch_no + 1)
if self.scheduler is not None and self.scheduler_step_at == "epoch":
self.scheduler.step(epoch=epoch_no)
# validate before training begins
if self.stats.steps % self.validation_freq == 0:
self._validate(valid_data, epoch_no)
self.model.train()
if self.method == "a2c":
self.critic.train()
# Reset statistics for each epoch.
start = time.time()
total_valid_duration = 0
start_tokens = self.stats.total_tokens
self.model.zero_grad()
epoch_loss = 0
batch_loss = 0
for i, batch in enumerate(iter(train_iter)):
# create a Batch object from torchtext batch
batch = Batch(batch, self.model.pad_index,
use_cuda=self.use_cuda)
# get batch loss
batch_loss += self._train_step(batch)
# update!
if (i + 1) % self.batch_multiplier == 0:
# clip gradients (in-place)
if self.clip_grad_fun is not None:
if self.fp16:
self.clip_grad_fun(
params=amp.master_params(self.optimizer))
else:
self.clip_grad_fun(params=self.model.parameters())
# make gradient step
self.optimizer.step()
# decay lr
if self.scheduler is not None \
and self.scheduler_step_at == "step":
self.scheduler.step()
# reset gradients
self.model.zero_grad()
# increment step counter
self.stats.steps += 1
# log learning progress
if self.stats.steps % self.logging_freq == 0:
self.tb_writer.add_scalar("train/train_batch_loss",
batch_loss, self.stats.steps)
elapsed = time.time() - start - total_valid_duration
elapsed_tokens = self.stats.total_tokens - start_tokens
logger.info(
"Epoch %3d, Step: %8d, Batch Loss: %12.6f, "
"Tokens per Sec: %8.0f, Lr: %.6f",
epoch_no + 1, self.stats.steps, batch_loss,
elapsed_tokens / elapsed,
self.optimizer.param_groups[0]["lr"])
start = time.time()
total_valid_duration = 0
start_tokens = self.stats.total_tokens
# Only add complete loss of full mini-batch to epoch_loss
epoch_loss += batch_loss # accumulate epoch_loss
batch_loss = 0 # rest batch_loss
# validate on the entire dev set
if self.stats.steps % self.validation_freq == 0:
valid_duration = self._validate(valid_data, epoch_no)
total_valid_duration += valid_duration
if self.stats.stop:
break
if self.stats.stop:
logger.info(
'Training ended since minimum lr %f was reached.',
self.learning_rate_min)
break
logger.info('Epoch %3d: total training loss %.2f',
epoch_no + 1, epoch_loss)
else:
logger.info('Training ended after %3d epochs.', epoch_no + 1)
logger.info('Best validation result (greedy) at step %8d: %6.2f %s.',
self.stats.best_ckpt_iter, self.stats.best_ckpt_score,
self.early_stopping_metric)
self.tb_writer.close() # close Tensorboard writer
def _train_step(self, batch: Batch) -> Tensor:
"""
Train the model on one batch: Compute the loss.
:param batch: training batch
:return: loss for batch (sum)
"""
# reactivate training
self.model.train()
if self.method == "a2c":
self.critic.train()
# get loss
if self.reinforcement_learning:
batch_loss, distribution, _, _ = self.model(
return_type=self.method,
critic=self.critic,
src=batch.src, trg=batch.trg,
trg_input=batch.trg_input, src_mask=batch.src_mask,
src_length=batch.src_length, trg_mask=batch.trg_mask,
max_output_length=self.max_output_length,
temperature = self.temperature,
samples=self.samples, alpha = self.alpha,
add_gold=self.add_gold,
topk=self.topk,
log_probabilities=self.log_probabilities,
pickle_logs=self.pickle_logs)
if self.method == "a2c":
losses = batch_loss
batch_loss = losses[0]
critic_loss = losses[1]
else:
batch_loss, distribution, _, _ = self.model(
return_type="loss", src=batch.src, trg=batch.trg,
trg_input=batch.trg_input, src_mask=batch.src_mask,
max_output_length=self.max_output_length,
src_length=batch.src_length, trg_mask=batch.trg_mask)
# average on multi-gpu parallel training
if self.n_gpu > 1:
batch_loss = batch_loss.mean()
# normalize batch loss
if self.normalization == "batch":
normalizer = batch.nseqs
elif self.normalization == "tokens":
normalizer = batch.ntokens
elif self.normalization == "none":
normalizer = 1
else:
raise NotImplementedError(
"Only normalize by 'batch' or 'tokens' "
"or summation of loss 'none' implemented")
norm_batch_loss = batch_loss / normalizer
if self.method == "a2c":
norm_critic_loss = critic_loss / normalizer
if self.batch_multiplier > 1:
norm_batch_loss = norm_batch_loss / self.batch_multiplier
if self.method == "a2c":
norm_critic_loss = norm_critic_loss / self.batch_multiplier
# accumulate gradients
if self.fp16:
with amp.scale_loss(norm_batch_loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
norm_batch_loss.backward(retain_graph=True)
# perform critic backward and optimization step
# TODO move out of fcn
if self.method == "a2c":
#norm_batch_loss.backward(retain_graph=True)
norm_critic_loss.backward()
if self.clip_grad_fun is not None:
self.clip_grad_fun(params=self.critic.parameters())
self.critic_optimizer.step()
self.critic_optimizer.zero_grad()
# increment token counter
self.stats.total_tokens += batch.ntokens
return norm_batch_loss.item()
def _validate(self, valid_data, epoch_no):
valid_start_time = time.time()
valid_score, valid_loss, valid_ppl, valid_sources, \
valid_sources_raw, valid_references, valid_hypotheses, \
valid_hypotheses_raw, valid_attention_scores, valid_logs = \
validate_on_data(
batch_size=self.eval_batch_size,
data=valid_data,
config=self.config,
eval_metric=self.eval_metric,
level=self.level, model=self.model,
use_cuda=self.use_cuda,
max_output_length=self.max_output_length,
compute_loss=True,
beam_size=1, # greedy validations
batch_type=self.eval_batch_type,
postprocess=True, # always remove BPE for validation
bpe_type=self.bpe_type, # "subword-nmt" or "sentencepiece"
sacrebleu=self.sacrebleu, # sacrebleu options
n_gpu=self.n_gpu,
critic=self.critic
)
self.tb_writer.add_scalar(
"valid/valid_loss", valid_loss, self.stats.steps)
self.tb_writer.add_scalar(
"valid/valid_score", valid_score, self.stats.steps)
self.tb_writer.add_scalar(
"valid/valid_ppl", valid_ppl, self.stats.steps)
if self.early_stopping_metric == "loss":
ckpt_score = valid_loss
elif self.early_stopping_metric in ["ppl", "perplexity"]:
ckpt_score = valid_ppl
else:
ckpt_score = valid_score
new_best = False
if self.stats.is_best(ckpt_score):
self.stats.best_ckpt_score = ckpt_score
self.stats.best_ckpt_iter = self.stats.steps
logger.info('Hooray! New best validation result [%s]!',
self.early_stopping_metric)
if self.ckpt_queue.maxsize > 0:
logger.info("Saving new checkpoint.")
new_best = True
self._save_checkpoint()
if self.scheduler is not None \
and self.scheduler_step_at == "validation":
self.scheduler.step(ckpt_score)
# append to validation report
self._add_report(
valid_score=valid_score, valid_loss=valid_loss,
valid_ppl=valid_ppl, eval_metric=self.eval_metric,
new_best=new_best)
self._log_examples(
sources_raw=[v for v in valid_sources_raw],
sources=valid_sources,
hypotheses_raw=valid_hypotheses_raw,
hypotheses=valid_hypotheses,
references=valid_references
)
valid_duration = time.time() - valid_start_time
logger.info(
'Validation result (greedy) at epoch %3d, '
'step %8d: %s: %6.2f, loss: %8.4f, ppl: %8.4f, '
'duration: %.4fs', epoch_no + 1, self.stats.steps,
self.eval_metric, valid_score, valid_loss,
valid_ppl, valid_duration)
# store validation set outputs
self._store_outputs(valid_hypotheses)
# store attention plots for selected valid sentences
if valid_attention_scores:
store_attention_plots(
attentions=valid_attention_scores,
targets=valid_hypotheses_raw,
sources=[s for s in valid_data.src],
indices=self.log_valid_sents,
output_prefix="{}/att.{}".format(
self.model_dir, self.stats.steps),
tb_writer=self.tb_writer, steps=self.stats.steps)
if self.reinforcement_learning and self.log_probabilities:
self._log_reinforcement_learning(valid_logs, epoch_no, valid_hypotheses)
return valid_duration
def _add_report(self, valid_score: float, valid_ppl: float,
valid_loss: float, eval_metric: str,
new_best: bool = False) -> None:
"""
Append a one-line report to validation logging file.
:param valid_score: validation evaluation score [eval_metric]
:param valid_ppl: validation perplexity
:param valid_loss: validation loss (sum over whole validation set)
:param eval_metric: evaluation metric, e.g. "bleu"
:param new_best: whether this is a new best model
"""
current_lr = -1
# ignores other param groups for now
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
if current_lr < self.learning_rate_min:
self.stats.stop = True
with open(self.valid_report_file, 'a') as opened_file:
opened_file.write(
"Steps: {}\tLoss: {:.5f}\tPPL: {:.5f}\t{}: {:.5f}\t"
"LR: {:.8f}\t{}\n".format(
self.stats.steps, valid_loss, valid_ppl, eval_metric,
valid_score, current_lr, "*" if new_best else ""))
def _log_parameters_list(self) -> None:
"""
Write all model parameters (name, shape) to the log.
"""
model_parameters = filter(lambda p: p.requires_grad,
self.model.parameters())
n_params = sum([np.prod(p.size()) for p in model_parameters])
logger.info("Total params: %d", n_params)
trainable_params = [n for (n, p) in self.model.named_parameters()
if p.requires_grad]
logger.debug("Trainable parameters: %s", sorted(trainable_params))
assert trainable_params
def _log_examples(self, sources: List[str], hypotheses: List[str],
references: List[str],
sources_raw: List[List[str]] = None,
hypotheses_raw: List[List[str]] = None,
references_raw: List[List[str]] = None) -> None:
"""
Log a the first `self.log_valid_sents` sentences from given examples.
:param sources: decoded sources (list of strings)
:param hypotheses: decoded hypotheses (list of strings)
:param references: decoded references (list of strings)
:param sources_raw: raw sources (list of list of tokens)
:param hypotheses_raw: raw hypotheses (list of list of tokens)
:param references_raw: raw references (list of list of tokens)
"""
for p in self.log_valid_sents:
if p >= len(sources):
continue
logger.info("Example #%d", p)
if sources_raw is not None:
logger.debug("\tRaw source: %s", sources_raw[p])
if references_raw is not None:
logger.debug("\tRaw reference: %s", references_raw[p])
if hypotheses_raw is not None:
logger.debug("\tRaw hypothesis: %s", hypotheses_raw[p])
logger.info("\tSource: %s", sources[p])
logger.info("\tReference: %s", references[p])
logger.info("\tHypothesis: %s", hypotheses[p])
def _log_reinforcement_learning(self, valid_logs, epoch_no, valid_hypotheses):
entropy, gold_strings, predicted_strings, highest_words, total_probability, \
highest_word, highest_prob, gold_probabilities, gold_token_ranks, rewards, old_bleus = valid_logs
self.probability_logger.info(
"Epoch %3d Step: %8d \n",
epoch_no + 1, self.stats.steps)
self.entropy_logger.info(
"Epoch %3d Step: %8d \n"
"Entropy: %12.8f",
epoch_no + 1, self.stats.steps, entropy)
total_probability = [torch.stack(el) for el in total_probability if el != []]
highest_prob = [torch.stack(el) for el in highest_prob if el != []]
gold_probabilities = [torch.stack(el) for el in gold_probabilities if el != []]
average_total_prob = torch.mean(torch.stack([torch.mean(el) for el in total_probability]))
average_highest_prob = torch.mean(torch.stack([torch.mean(el) for el in highest_prob]))
average_gold_prob = torch.mean(torch.stack([torch.mean(el) for el in gold_probabilities]))
self.probability_logger.info(
"Average Top10 Probability: %2.4f \n"
"Average Highest Probability: %2.4f \n"
"Average Gold Probability: %2.4f \n", \
average_total_prob, average_highest_prob, average_gold_prob)
if self.pickle_logs:
self.collected_top10_probabilities.append(total_probability)
self.collected_highest_probabilities.append(highest_prob)
self.collected_gold_probabilities.append(gold_probabilities)
self.collected_gold_ranks.append(gold_token_ranks)
with open(self.model_dir+"/top10.pickle", "wb") as f:
pickle.dump(self.collected_top10_probabilities, f)
with open(self.model_dir+"/highest_prob.pickle", "wb") as f:
pickle.dump(self.collected_highest_probabilities, f)
with open(self.model_dir+"/gold_token.pickle", "wb") as f:
pickle.dump(self.collected_gold_probabilities, f)
with open(self.model_dir+"/gold_ranks.pickle", "wb") as f:
pickle.dump(self.collected_gold_ranks, f)
def _store_outputs(self, hypotheses: List[str]) -> None:
"""
Write current validation outputs to file in `self.model_dir.`
:param hypotheses: list of strings
"""
current_valid_output_file = "{}/{}.hyps".format(self.model_dir,
self.stats.steps)
with open(current_valid_output_file, 'w') as opened_file:
for hyp in hypotheses:
opened_file.write("{}\n".format(hyp))
class TrainStatistics:
def __init__(self, steps: int = 0, stop: bool = False,
total_tokens: int = 0, best_ckpt_iter: int = 0,
best_ckpt_score: float = np.inf,
minimize_metric: bool = True) -> None:
# global update step counter
self.steps = steps
# stop training if this flag is True
# by reaching learning rate minimum
self.stop = stop
# number of total tokens seen so far
self.total_tokens = total_tokens
# store iteration point of best ckpt
self.best_ckpt_iter = best_ckpt_iter
# initial values for best scores
self.best_ckpt_score = best_ckpt_score
# minimize or maximize score
self.minimize_metric = minimize_metric
def is_best(self, score):
if self.minimize_metric:
is_best = score < self.best_ckpt_score
else:
is_best = score > self.best_ckpt_score
return is_best
def train(cfg_file: str) -> None:
"""
Main training function. After training, also test on test data if given.
:param cfg_file: path to configuration yaml file
"""
cfg = load_config(cfg_file)
# make logger
model_dir = make_model_dir(cfg["training"]["model_dir"],
overwrite=cfg["training"].get("overwrite", False))
_ = make_logger(model_dir, mode="train") # version string returned
# TODO: save version number in model checkpoints
# set the random seed
set_seed(seed=cfg["training"].get("random_seed", 42))
# load the data
train_data, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"])
rl_method = cfg["training"].get("reinforcement_learning", {}).get("method", False)
# build an encoder-decoder model
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
if rl_method=="a2c":
critic_model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab, is_critic=True)
# for training management, e.g. early stopping and model selection
if rl_method=="a2c":
trainer = TrainManager(model=model, config=cfg, critic_model=critic_model)
else:
trainer = TrainManager(model=model, config=cfg)
# store copy of original training config in model dir
shutil.copy2(cfg_file, model_dir + "/config.yaml")
# log all entries of config
log_cfg(cfg)
log_data_info(train_data=train_data, valid_data=dev_data,
test_data=test_data, src_vocab=src_vocab, trg_vocab=trg_vocab)
logger.info(str(model))
# store the vocabs
src_vocab_file = "{}/src_vocab.txt".format(cfg["training"]["model_dir"])
src_vocab.to_file(src_vocab_file)
trg_vocab_file = "{}/trg_vocab.txt".format(cfg["training"]["model_dir"])
trg_vocab.to_file(trg_vocab_file)
# train the model
trainer.train_and_validate(train_data=train_data, valid_data=dev_data)
# predict with the best model on validation and test
# (if test data is available)
ckpt = "{}/{}.ckpt".format(model_dir, trainer.stats.best_ckpt_iter)
output_name = "{:08d}.hyps".format(trainer.stats.best_ckpt_iter)
output_path = os.path.join(model_dir, output_name)
datasets_to_test = {"dev": dev_data, "test": test_data,
"src_vocab": src_vocab, "trg_vocab": trg_vocab}
test(cfg_file, ckpt=ckpt, output_path=output_path,
datasets=datasets_to_test)
if __name__ == "__main__":
parser = argparse.ArgumentParser('Joey-NMT')
parser.add_argument("config", default="configs/default.yaml", type=str,
help="Training configuration file (yaml).")
args = parser.parse_args()
train(cfg_file=args.config)
|
the-stack_106_21232
|
import urllib.request, json
import pandas as pd
import os
import datetime as dt
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from alpha_vantage.timeseries import TimeSeries
class DataLoader:
scaler = MinMaxScaler(feature_range=(0, 1))
api_key = "<Alpha Vantage API KEY>"
def get_historical_data(self, ticker, outputsize="full"):
ts = TimeSeries(key=self.api_key, output_format="pandas")
data, test = ts.get_daily(symbol=ticker, outputsize=outputsize)
data.drop(["1. open", "2. high", "3. low", "5. volume"], inplace=True, axis=1)
# data.drop(["3. low", "5. volume"], inplace=True, axis=1)
# data.columns = ["Open", "High", "Close"]
data.columns = ["Close"]
data.index = pd.to_datetime(data.index)
return data
def initalise_database_scheme(self, df):
df["Prediction"] = 0.00
df["Polarity"] = 0.00
df["Subjectivity"] = 0.00
return df
def get_current_price(self, ticker):
ts = TimeSeries(key=self.api_key)
data, _ = ts.get_intraday(symbol=ticker, interval="1min", outputsize="compact")
most_recent_time = list(data.keys())[0]
current_price = data[most_recent_time]
return current_price
def split_data(self, df):
dataset = df.values
train = dataset[0:2500, :]
valid = dataset[2500:, :]
return dataset, train, valid
def get_train_data(self, df):
dataset, train, _ = self.split_data(df)
# converting dataset into x_train and y_train
scaler = self.scaler
scaled_data = scaler.fit_transform(dataset)
x_train, y_train = [], []
for i in range(60, len(train)):
x_train.append(scaled_data[i - 60 : i, 0])
y_train.append(scaled_data[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
return x_train, y_train
def df_to_array(self, df, column, number):
outcome = df.tail(number)
timestamp = outcome.index.tolist()
prediction = outcome[column].values.tolist()
alist = []
listoflist = []
for i in range(len(timestamp)):
alist.append(str(timestamp[i]))
alist.append(prediction[i])
listoflist.append(alist)
alist = []
return listoflist
def df_to_dic(self, df, number):
outcome = df.tail(number)
timestamp = outcome.index.tolist()
prediction = outcome["Prediction"].values.tolist()
stocks = dict()
for i in range(len(timestamp)):
stocks[i] = {"date": str(timestamp[i]), "prediction": prediction[i]}
return stocks
def df_append_future(self, df, days):
for _ in range(days):
isWeekday = False
day = 1
while isWeekday is False:
last_day = df.tail(1).index[0]
next_day = last_day + dt.timedelta(days=day)
day += 1
if next_day.weekday() <= 4:
isWeekday = True
new_row = pd.DataFrame([[0.00]], columns=["Close"], index=[next_day])
df = pd.concat([df, pd.DataFrame(new_row)], ignore_index=False)
return df
def db_to_df(self, query_output):
df = pd.DataFrame(
columns=["Date", "Close", "Polarity", "Prediction", "Subjectivity"]
)
df = df.fillna(0)
for i in query_output:
df2 = pd.DataFrame(
[i], columns=["Date", "Close", "Polarity", "Prediction", "Subjectivity"]
)
df = df.append(df2)
df["Date"] = pd.to_datetime(df.Date, format="%Y-%m-%d")
df.index = df["Date"]
df.drop(["Date"], inplace=True, axis=1)
return df
def df_to_csv(self, df, file_name):
df.to_csv(file_name)
def csv_to_df(self, file_name):
df = pd.read_csv(file_name)
df["Date"] = pd.to_datetime(df.Date, format="%Y-%m-%d")
df.index = df["Date"]
df.drop(["Date"], inplace=True, axis=1)
return df
##Old Code
# def get_df(self, ticker):
# api_key = "BY8JL5USPR4S629O"
# # JSON file with all the stock market data for AAL from the last 20 years
# url_string = (
# "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=%s&outputsize=full&apikey=%s"
# % (ticker, api_key)
# )
# # Save data to this file
# file_to_save = "stock_market_data-%s.csv" % ticker
# # If you haven't already saved data,
# # Go ahead and grab the data from the url
# # And store date, low, high, volume, close, open values to a Pandas DataFrame
# if not os.path.exists(file_to_save):
# with urllib.request.urlopen(url_string) as url:
# data = json.loads(url.read().decode())
# # extract stock market data
# data = data["Time Series (Daily)"]
# df = pd.DataFrame(columns=["Date", "Low", "High", "Close", "Open"])
# for k, v in data.items():
# date = dt.datetime.strptime(k, "%Y-%m-%d")
# data_row = [
# date.date(),
# float(v["3. low"]),
# float(v["2. high"]),
# float(v["4. close"]),
# float(v["1. open"]),
# ]
# print("Data saved to : %s" % file_to_save)
# df.to_csv(file_to_save)
# # If the data is already there, just load it from the CSV
# else:
# print("File already exists. Loading data from CSV")
# df = pd.read_csv(file_to_save)
# df["Date"] = pd.to_datetime(df.Date, format="%Y-%m-%d")
# df.index = df["Date"]
# df = df.sort_values("Date")
# df.drop(["Date", "Open", "Low", "High", "Unnamed: 0"], inplace=True, axis=1)
# return df
|
the-stack_106_21233
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import getopt
import traceback
import re
import ast
print("PYTHON::: Starting imports")
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
print("PYTHON::: Py4J imported")
from py4j.protocol import Py4JJavaError
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.storagelevel import StorageLevel
from pyspark.accumulators import Accumulator, AccumulatorParam
from pyspark.broadcast import Broadcast
from pyspark.serializers import MarshalSerializer, PickleSerializer
from time import sleep
# for back compatibility
from pyspark.sql import SparkSession, DataFrame, Row
client = GatewayClient(port=int(sys.argv[1]))
sparkVersion = sys.argv[2]
print("PYTHON:: Starting gateway")
if re.match("^1\.[456]\..*$", sparkVersion) or re.match("^2\..*$", sparkVersion):
gateway = JavaGateway(client, auto_convert=True)
else:
gateway = JavaGateway(client)
print("PYTHON:: Gateway started")
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
bridge = gateway.entry_point
state = bridge.state()
state.markReady()
if sparkVersion.startswith("1.2"):
java_import(gateway.jvm, "org.apache.spark.sql.SparkSession")
java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
elif sparkVersion.startswith("1.3"):
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
elif re.match("^1\.[456]\..*$", sparkVersion):
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
elif re.match("^2\..*$", sparkVersion):
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "scala.Tuple2")
conf = None
sc = None
spark = None
code_info = None
class Logger(object):
def __init__(self):
self.out = ""
def write(self, message):
state.sendOutput(code_info.codeId(), message)
self.out = self.out + message
def get(self):
return self.out
def reset(self):
self.out = ""
def flush(self):
pass
output = Logger()
sys.stdout = output
sys.stderr = output
class Kernel(object):
def __init__(self, jkernel):
self._jvm_kernel = jkernel
def __getattr__(self, name):
return self._jvm_kernel.__getattribute__(name)
def __dir__(self):
parent = super().__dir__()
return parent + [x for x in self._jvm_kernel.__dir__() if x not in parent]
def createSparkContext(self, config):
global conf, sc, sqlContext
jconf = gateway.jvm.org.apache.spark.SparkConf(False)
for key,value in config.getAll():
jconf.set(key, value)
self._jvm_kernel.createSparkContext(jconf)
conf = None
sc = None
sqlContext = None
self.refreshContext()
def refreshContext(self):
global conf, sc, spark
# This is magic. Please look away. I was never here (prevents multiple gateways being instantiated)
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway
SparkContext._jvm = gateway.jvm
if sc is None:
jsc = self._jvm_kernel.javaSparkContext()
if jsc is not None:
jconf = self._jvm_kernel.sparkConf()
conf = SparkConf(_jvm=gateway.jvm, _jconf=jconf)
sc = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
if spark is None:
jspark = self._jvm_kernel.sparkSession()
if jspark is not None and sc is not None:
spark = SparkSession(sc, jsparkSession=jspark)
kernel = Kernel(bridge.kernel())
while True:
try:
next_code_info = state.nextCode()
# If code is not available, try again later
if next_code_info is None:
sleep(1)
continue
code_info = next_code_info
code_lines = code_info.code().split("\n")
final_code = None
for s in code_lines:
if s is None or len(s.strip()) == 0:
continue
# skip comment
if s.strip().startswith("#"):
continue
if final_code:
final_code += "\n" + s
else:
final_code = s
# Ensure the appropriate variables are set in the module namespace
kernel.refreshContext()
if final_code:
'''Parse the final_code to an AST parse tree. If the last node is an expression (where an expression
can be a print function or an operation like 1+1) turn it into an assignment where temp_val = last expression.
The modified parse tree will get executed. If the variable temp_val introduced is not none then we have the
result of the last expression and should return it as an execute result. The sys.stdout sendOutput logic
gets triggered on each logger message to support long running code blocks instead of bulk'''
ast_parsed = ast.parse(final_code)
the_last_expression_to_assign_temp_value = None
if isinstance(ast_parsed.body[-1], ast.Expr):
new_node = (ast.Assign(targets=[ast.Name(id='the_last_expression_to_assign_temp_value', ctx=ast.Store())], value=ast_parsed.body[-1].value))
ast_parsed.body[-1] = ast.fix_missing_locations(new_node)
compiled_code = compile(ast_parsed, "<string>", "exec")
eval(compiled_code)
if the_last_expression_to_assign_temp_value is not None:
state.markSuccess(code_info.codeId(), str(the_last_expression_to_assign_temp_value))
else:
state.markSuccess(code_info.codeId(), "")
del the_last_expression_to_assign_temp_value
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
state.markFailure(code_info.codeId(), excInnerError + str(sys.exc_info()))
except:
state.markFailure(code_info.codeId(), traceback.format_exc())
output.reset()
|
the-stack_106_21234
|
import logging
import sys
import pdb
import shutil
from tensorboardX import SummaryWriter
class TensorboardHandler(logging.Handler):
def __init__(self, writer, tag):
self.writer = writer
self.tag = tag
super().__init__()
def emit(self, record):
log_entry = self.format(record)
#tag, text_string, global_step=None, walltime=None
if self.writer.file_writer is not None:
self.writer.add_text(self.tag, log_entry)
class LoggerWriter:
def __init__(self, level, stream):
self.level = level
self._stream = stream
def write(self, message):
if message != '\n':
self.level(message)
self._stream.write(message)
self._stream.flush()
def flush(self):
self.level("")
self._stream.flush()
class LoggingMixin(object):
def initial_setup(self, args):
#print(f"Setting up logging, rank: {args.rank}")
root = logging.getLogger()
root.handlers = []
if args.debug:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(message)s')
# When using distributed training only send a single process to stdout
if args.rank == 0:
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
root.addHandler(ch)
# send log to a file as well
fh = logging.FileHandler(self.exp_dir / f'stdout_{args.rank}.log', 'w')
fh.setFormatter(formatter)
root.addHandler(fh)
# For debug messages
debugh = logging.FileHandler(self.exp_dir / f'debug_{args.rank}.log', 'w')
debugh.setFormatter(formatter)
debugh.setLevel(logging.DEBUG)
root.addHandler(debugh)
self.tensorboard_dir = self.exp_dir / 'tensorboard'
if not args.is_distributed:
shutil.rmtree(str(self.tensorboard_dir), ignore_errors=True)
self.tensorboard_dir.mkdir(exist_ok=True)
if args.rank == 0:
log_dir = self.tensorboard_dir / "main"
else:
log_dir = self.tensorboard_dir / f"node{args.rank:03}"
self.tensorboard = SummaryWriter(log_dir=str(log_dir))
root.addHandler(TensorboardHandler(self.tensorboard, f"log{args.rank}"))
logging.info(f"Tensorboard logging to {self.tensorboard_dir.resolve()}")
self.global_step = 0
super().initial_setup(args)
def count_parameters(self, model):
nparams = 0
group_idx = 0
nlayers = 0
for param in model.parameters():
group_size = 1
for g in param.size():
group_size *= g
nparams += group_size
group_idx += 1
if len(param.shape) >= 2:
nlayers += 1
return nparams, nlayers
def model_setup(self, args):
super().model_setup(args)
nparams, nlayers = self.count_parameters(self.model)
logging.info(f"Model parameters: {nparams:,} layers: {nlayers}")
def start_of_batch_hook(self, progress, logging_epoch):
super().start_of_batch_hook(progress, logging_epoch)
self.global_step += 1
def add_losses_to_tensorboard(self, losses):
for loss_key, loss_value in losses.items():
self.tensorboard.add_scalar(loss_key, loss_value, global_step=self.global_step)
def training_loss_hook(self, progress, losses, logging_epoch):
super().training_loss_hook(progress, losses, logging_epoch)
self.add_losses_to_tensorboard(losses)
def test_loss_hook(self, losses):
super().test_loss_hook(losses)
self.add_losses_to_tensorboard(losses)
def postrun(self):
logging.info(f"Tensorboard logs at {self.tensorboard_dir.resolve()}")
if self.args.rank == 0:
self.tensorboard.export_scalars_to_json(self.exp_dir / "json_tensorboard.json")
self.tensorboard.close()
|
the-stack_106_21238
|
"""Tests for importing OAS by Toolbox"""
import json
import random
import re
import string
from urllib.parse import urlparse
import importlib_resources as resources
import pytest
import yaml
from testsuite.config import settings
from testsuite import rawobj
from testsuite.rhsso.rhsso import OIDCClientAuth
from testsuite.toolbox import toolbox
from testsuite.utils import blame
# authentization in 3scale and mapping to OAS(http://spec.openapis.org/oas/v3.0.3#security-scheme-object):
#
# - 1 token -> see uber.json
# "securityDefinitions": {
# "apiKey": { "type": "apiKey", "in": "header", "name": "X-API-KEY"}
# },
# "security": { "apiKey": [ ] }
#
# - 2 tokens - not implemented yet https://issues.redhat.com/browse/THREESCALE-3279
#
# - RHSSO -> oauth2 -> see petstore-expanded.yaml which contains flow used in other OAuth2 tests
#
# - tokenUrl and authorizationUrl are ignored, so it is not possible to do any api calls,
# see https://issues.redhat.com/browse/THREESCALE-5925
#
# - RHSSO -> openId
# https://issues.redhat.com/browse/THREESCALE-5919
OAS_FILES = {'oas2': ['testsuite.resources.oas2', 'uber.json'],
'oas3': ['testsuite.resources.oas3', 'petstore-expanded.yaml']}
USER_KEY = '123456'
POLICIES = {'policies_config': [
{'name': 'apicast', 'version': 'builtin', 'configuration': {}, 'enabled': True},
{'name': 'keycloak_role_check', 'version': 'builtin', 'configuration': {
'type': 'blacklist', 'scopes': [
{'realm_roles': [], 'client_roles': [{'name': 'read'}, {'name': 'write'}]}]},
'enabled': True}]}
@pytest.fixture(scope="module", params=['oas2', 'oas3'])
def oas(request):
"""Loads oas file"""
fil_oas = None
fil_txt = None
if request.param == 'oas2':
src = resources.files(OAS_FILES[request.param][0]).joinpath(OAS_FILES[request.param][1])
with src.open('r') as opened_file:
fil_oas = json.load(opened_file)
parsed_url = urlparse(settings['threescale']['service']['backends']['httpbin'])
fil_oas['host'] = parsed_url.netloc
else:
src = resources.files(OAS_FILES[request.param][0]).joinpath(OAS_FILES[request.param][1])
with src.open('r') as oas3_fil:
fil_oas = yaml.load(oas3_fil, Loader=yaml.SafeLoader)
fil_oas['servers'][0]['url'] = settings['threescale']['service']['backends']['httpbin'] + '/anything'
with src.open('r') as oas3_fil:
fil_txt = oas3_fil.read()
new_url = settings['threescale']['service']['backends']['httpbin'] + '/anything'
fil_txt = fil_txt.replace('http://petstore.swagger.io/api', new_url)
fil_name = settings['toolbox']['podman_cert_dir'] + '/'
fil_name += ''.join(random.choice(string.ascii_letters) for _ in range(16))
if request.param == 'oas2':
toolbox.copy_string_to_remote_file(json.dumps(fil_oas), fil_name)
else:
toolbox.copy_string_to_remote_file(fil_txt, fil_name)
return {'type': request.param, 'file': fil_oas, 'file_name': fil_name}
@pytest.fixture(scope="module")
def import_oas(threescale_dst1, dest_client, request, oas):
"""Import OAS by Toolbox"""
import_cmd = f"import openapi -d {threescale_dst1} "
import_cmd += oas['file_name']
import_cmd += f" --default-credentials-userkey={USER_KEY} "
import_cmd += f"--target_system_name={blame(request, 'svc').translate(''.maketrans({'-':'_', '.':'_'}))}"
ret = toolbox.run_cmd(import_cmd)
(_, service_id, service_name) = re.findall(
r'^(Created|Updated) service id: (\d+), name: (.+)$', ret['stdout'], re.MULTILINE)[0]
service = dest_client.services[int(service_id)]
yield (ret, service_id, service_name, service)
if not settings["skip_cleanup"]:
service.delete()
toolbox.run_cmd(f"rm -f {oas['file_name']}", False)
@pytest.fixture(scope="module")
def import_oas_backend(threescale_dst1, dest_client, oas, import_oas):
"""Import backend OAS by Toolbox
It is imported only one backend based on servers[0].url.
For multiple servers, see https://issues.redhat.com/browse/THREESCALE-6197
"""
if oas['type'] == 'oas2':
pytest.skip("This testcase is oas3 only.")
service = import_oas[3]
import_cmd = f"import openapi -d {threescale_dst1} "
import_cmd += oas['file_name']
import_cmd += f" --target_system_name={service['system_name']} "
import_cmd += ' --backend'
ret = toolbox.run_cmd(import_cmd)
output = json.loads(ret['stdout'])
backend = dest_client.backends[int(output['id'])]
yield (ret, output, backend)
if not settings["skip_cleanup"]:
for bus in service.backend_usages.list():
bus.delete()
backend.delete()
toolbox.run_cmd(f"rm -f {oas['file_name']}", False)
@pytest.fixture(scope="module")
def account(custom_account, request, testconfig, dest_client, import_oas):
"Preconfigured account existing over whole testing session"
# pylint: disable=unused-argument
iname = blame(request, "account")
account = rawobj.Account(org_name=iname, monthly_billing_enabled=None, monthly_charging_enabled=None)
account.update(dict(name=iname, username=iname, email=f"{iname}@anything.invalid"))
return custom_account(threescale_client=dest_client, params=account)
@pytest.fixture(scope="module")
def app_plan(import_oas, account, custom_app_plan, request, oas):
"app plan bound to the service"
# pylint: disable=unused-argument
return custom_app_plan(rawobj.ApplicationPlan(blame(request, "aplan")), service=import_oas[3], autoclean=False)
@pytest.fixture(scope="module")
def application(import_oas, account, custom_app_plan, custom_application, request, oas, app_plan):
"application bound to the account, app_plan and service"
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
return custom_application(rawobj.Application(blame(request, "app"), app_plan))
def test_import(import_oas, oas):
"""Checks import results"""
ret, service_id, service_name, service = import_oas
assert not ret['stderr']
assert int(service_id) == int(service['id'])
assert oas['file']['info']['title'] == service_name
assert re.findall(r'^Service proxy updated$', ret['stdout'], re.MULTILINE)
assert re.findall(r'^destroying all mapping rules$', ret['stdout'], re.MULTILINE)
for path in oas['file']['paths'].keys():
for method in oas['file']['paths'][path].keys():
path_url = {'oas2': lambda: f"{oas['file']['basePath']}",
'oas3': lambda: f"{urlparse(oas['file']['servers'][0]['url']).path}"}[oas['type']]
path_url = f"{path_url()}{path}"
assert re.findall(
rf"^Created {method.upper()} {path_url}\$ endpoint$",
ret['stdout'],
re.MULTILINE)
if oas['type'] == 'oas3':
assert re.findall(r'^Service policies updated$', ret['stdout'], re.MULTILINE)
def test_import_backend(oas, import_oas_backend):
"""Checks import backend result."""
if oas['type'] == 'oas2':
pytest.skip("This testcase is oas3 only.")
ret, output, backend = import_oas_backend
assert not ret['stderr']
assert output['system_name'] == backend['system_name']
assert output['private_endpoint'] == backend['private_endpoint']
for key, value in output['mapping_rules'].items():
metric = backend.metrics.read(int(value['metric_id']))
assert metric['friendly_name'] == key
back_map = backend.mapping_rules.select_by(metric_id=int(value['metric_id']))[0]
assert back_map['pattern'] == value['pattern']
assert back_map['http_method'] == value['http_method']
assert back_map['delta'] == value['delta']
def test_service(import_oas, oas):
"""Checks importes service"""
service = import_oas[3]
assert service['description'] == oas['file']['info']['description']
assert service['name'] == oas['file']['info']['title']
def test_metrics_mappings_oas2(import_oas, oas):
"""Checks imported metrics - oas2"""
if oas['type'] == 'oas3':
pytest.skip("This testcase is oas2 only.")
service = import_oas[3]
metrics = service.metrics.list()
mappings = service.proxy.list().mapping_rules.list()
metr_number = 0
for path in oas['file']['paths'].keys():
metr_number += len(oas['file']['paths'][path].keys())
# +1 is 'hits' metric
assert metr_number + 1 == len(metrics)
for mapp in mappings:
path = mapp['pattern'].split(oas['file']['basePath'])[1].rstrip('$')
method = mapp['http_method'].lower()
assert oas['file']['paths'][path][method]
met = service.metrics[int(mapp['metric_id'])]
assert oas['file']['paths'][path][method]['description'] == met['description']
def test_metrics_mappings_oas3(import_oas, oas):
"""Checks imported metrics - oas3"""
# pylint: disable=too-many-nested-blocks
if oas['type'] == 'oas2':
pytest.skip("This testcase is oas3 only.")
service = import_oas[3]
metrics = service.metrics.list()
mappings = service.proxy.list().mapping_rules.list()
pet_number = 0
base_path = urlparse(oas['file']['servers'][0]['url']).path
for path in oas['file']['paths'].keys():
for method in oas['file']['paths'][path].keys():
pet = oas['file']['paths'][path][method]
name = pet['operationId']
for met in metrics:
if met['friendly_name'] == name:
pet_number += 1
assert pet['description'] == met['description']
for mapp in mappings:
if int(mapp['metric_id']) == int(met['id']):
assert mapp['pattern'] == f"{base_path}{path}$"
assert mapp['http_method'] == method.upper()
if met['name'] == 'hits':
methods = met.methods.list()
# +1 is 'hits' metric
assert len(methods) + 1 == len(metrics)
for meth in methods:
if meth['friendly_name'] == name:
assert meth['description'] == pet['description']
# +1 is 'hits' metric
assert pet_number + 1 == len(metrics)
def test_activedocs(import_oas, oas, dest_client):
"""Checks imported activedocs"""
service = import_oas[3]
acdoc = dest_client.active_docs.select_by(**{'service_id': service['id']})[0]
assert acdoc['name'] == oas['file']['info']['title']
if oas['type'] == 'oas3':
assert acdoc['name'] == oas['file']['info']['title']
assert acdoc['description'] == oas['file']['info']['description']
assert acdoc['published']
assert not acdoc['skip_swagger_validations']
assert acdoc['body']
def test_security(import_oas, oas):
"""Checks imported ANON policy"""
service = import_oas[3]
proxy = service.proxy.list()
policies = proxy.policies.list()['policies_config']
oidc = service.oidc().read()['oidc_configuration']
# this is used only for oas without security and param --default-credentials-userkey
# assert policies[0]['configuration']['auth_type'] == 'user_key'
# assert policies[0]['configuration']['user_key'] == USER_KEY
# assert policies[0]['enabled']
# assert policies[0]['name'] == 'default_credentials'
if oas['type'] == 'oas2':
assert len(policies) == 1
assert proxy['credentials_location'] == 'headers'
assert proxy['auth_app_key'] == 'app_key'
assert proxy['auth_app_id'] == 'app_id'
assert proxy['auth_user_key'] == 'X-API-KEY'
assert oidc['standard_flow_enabled']
if oas['type'] == 'oas3':
assert len(policies) == 2
assert policies[1]['name'] == 'keycloak_role_check'
assert policies[1]['configuration']['type'] == 'whitelist'
assert policies[1]['configuration']['scopes'][0]['client_roles'] == [{'name': 'read'}, {'name': 'write'}]
assert policies[1]['enabled']
assert oidc['direct_access_grants_enabled']
def test_request(import_oas, oas, rhsso_service_info, application):
"test request using one api endpoint, tune oidc setup for oas3"
service = import_oas[3]
path = '/anything/products'
if oas['type'] == 'oas3':
# this url should be updated because of https://issues.redhat.com/browse/THREESCALE-5925
update_params = dict(credentials_location='authorization',
oidc_issuer_endpoint=rhsso_service_info.authorization_url())
proxy = service.proxy.list()
proxy.update(params=update_params)
pol = proxy.policies.list()
pol.update(POLICIES)
application.register_auth(
'oidc', OIDCClientAuth.partial(rhsso_service_info, location='authorization'))
path = '/anything/pets'
client = application.api_client()
response = client.get(path)
assert response.status_code == 200
|
the-stack_106_21239
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import pickle
import dill
from pathlib import Path
from typing import Union
class Serializable:
"""
Serializable will change the behaviors of pickle.
- It only saves the state whose name **does not** start with `_`
It provides a syntactic sugar for distinguish the attributes which user doesn't want.
- For examples, a learnable Datahandler just wants to save the parameters without data when dumping to disk
"""
pickle_backend = "pickle" # another optional value is "dill" which can pickle more things of python.
default_dump_all = False # if dump all things
FLAG_KEY = "_qlib_serial_flag"
def __init__(self):
self._dump_all = self.default_dump_all
self._exclude = []
def __getstate__(self) -> dict:
return {
k: v for k, v in self.__dict__.items() if k not in self.exclude and (self.dump_all or not k.startswith("_"))
}
def __setstate__(self, state: dict):
self.__dict__.update(state)
@property
def dump_all(self):
"""
will the object dump all object
"""
return getattr(self, "_dump_all", False)
@property
def exclude(self):
"""
What attribute will not be dumped
"""
return getattr(self, "_exclude", [])
def config(self, dump_all: bool = None, exclude: list = None, recursive=False):
"""
configure the serializable object
Parameters
----------
dump_all : bool
will the object dump all object
exclude : list
What attribute will not be dumped
recursive : bool
will the configuration be recursive
"""
params = {"dump_all": dump_all, "exclude": exclude}
for k, v in params.items():
if v is not None:
attr_name = f"_{k}"
setattr(self, attr_name, v)
if recursive:
for obj in self.__dict__.values():
# set flag to prevent endless loop
self.__dict__[self.FLAG_KEY] = True
if isinstance(obj, Serializable) and self.FLAG_KEY not in obj.__dict__:
obj.config(**params, recursive=True)
del self.__dict__[self.FLAG_KEY]
def to_pickle(self, path: Union[Path, str], dump_all: bool = None, exclude: list = None):
"""
Dump self to a pickle file.
Args:
path (Union[Path, str]): the path to dump
dump_all (bool, optional): if need to dump all things. Defaults to None.
exclude (list, optional): will exclude the attributes in this list when dumping. Defaults to None.
"""
self.config(dump_all=dump_all, exclude=exclude)
with Path(path).open("wb") as f:
self.get_backend().dump(self, f)
@classmethod
def load(cls, filepath):
"""
Load the serializable class from a filepath.
Args:
filepath (str): the path of file
Raises:
TypeError: the pickled file must be `type(cls)`
Returns:
`type(cls)`: the instance of `type(cls)`
"""
with open(filepath, "rb") as f:
object = cls.get_backend().load(f)
if isinstance(object, cls):
return object
else:
raise TypeError(f"The instance of {type(object)} is not a valid `{type(cls)}`!")
@classmethod
def get_backend(cls):
"""
Return the real backend of a Serializable class. The pickle_backend value can be "pickle" or "dill".
Returns:
module: pickle or dill module based on pickle_backend
"""
if cls.pickle_backend == "pickle":
return pickle
elif cls.pickle_backend == "dill":
return dill
else:
raise ValueError("Unknown pickle backend, please use 'pickle' or 'dill'.")
@staticmethod
def general_dump(obj, path: Union[Path, str]):
"""
A general dumping method for object
Parameters
----------
obj : object
the object to be dumped
path : Union[Path, str]
the target path the data will be dumped
"""
path = Path(path)
if isinstance(obj, Serializable):
obj.to_pickle(path)
else:
with path.open("wb") as f:
pickle.dump(obj, f)
|
the-stack_106_21241
|
import os
import errno
import numpy as np
from torch.nn import init
import torch
import torch.nn as nn
from PIL import Image, ImageDraw, ImageFont
from copy import deepcopy
import skimage.transform
from miscc.config import cfg
# For visualization ################################################
COLOR_DIC = {0:[128,64,128], 1:[244, 35,232],
2:[70, 70, 70], 3:[102,102,156],
4:[190,153,153], 5:[153,153,153],
6:[250,170, 30], 7:[220, 220, 0],
8:[107,142, 35], 9:[152,251,152],
10:[70,130,180], 11:[220,20, 60],
12:[255, 0, 0], 13:[0, 0, 142],
14:[119,11, 32], 15:[0, 60,100],
16:[0, 80, 100], 17:[0, 0, 230],
18:[0, 0, 70], 19:[0, 0, 0]}
FONT_MAX = 50
def drawCaption(convas, captions, ixtoword, vis_size, off1=2, off2=2):
num = captions.size(0)
img_txt = Image.fromarray(convas)
# get a font
# fnt = None # ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
# get a drawing context
d = ImageDraw.Draw(img_txt)
sentence_list = []
for i in range(num):
cap = captions[i].data.cpu().numpy()
sentence = []
for j in range(len(cap)):
if cap[j] == 0:
break
word = ixtoword[cap[j]].encode('ascii', 'ignore').decode('ascii')
d.text(((j + off1) * (vis_size + off2), i * FONT_MAX), '%d:%s' % (j, word[:6]),
font=fnt, fill=(255, 255, 255, 255))
sentence.append(word)
sentence_list.append(sentence)
return img_txt, sentence_list
def build_super_images(real_imgs, captions, ixtoword,
attn_maps, att_sze, lr_imgs=None,
batch_size=cfg.TRAIN.BATCH_SIZE,
max_word_num=cfg.TEXT.WORDS_NUM):
nvis = 8
real_imgs = real_imgs[:nvis]
if lr_imgs is not None:
lr_imgs = lr_imgs[:nvis]
if att_sze == 17:
vis_size = att_sze * 16
else:
vis_size = real_imgs.size(2)
text_convas = \
np.ones([batch_size * FONT_MAX,
(max_word_num + 2) * (vis_size + 2), 3],
dtype=np.uint8)
for i in range(max_word_num):
istart = (i + 2) * (vis_size + 2)
iend = (i + 3) * (vis_size + 2)
text_convas[:, istart:iend, :] = COLOR_DIC[i]
real_imgs = \
nn.functional.interpolate(real_imgs,size=(vis_size, vis_size),
mode='bilinear', align_corners=False)
# [-1, 1] --> [0, 1]
real_imgs.add_(1).div_(2).mul_(255)
real_imgs = real_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
real_imgs = np.transpose(real_imgs, (0, 2, 3, 1))
pad_sze = real_imgs.shape
middle_pad = np.zeros([pad_sze[2], 2, 3])
post_pad = np.zeros([pad_sze[1], pad_sze[2], 3])
if lr_imgs is not None:
lr_imgs = \
nn.functional.interpolate(lr_imgs,size=(vis_size, vis_size),
mode='bilinear', align_corners=False)
# [-1, 1] --> [0, 1]
lr_imgs.add_(1).div_(2).mul_(255)
lr_imgs = lr_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
lr_imgs = np.transpose(lr_imgs, (0, 2, 3, 1))
# batch x seq_len x 17 x 17 --> batch x 1 x 17 x 17
seq_len = max_word_num
img_set = []
num = nvis # len(attn_maps)
text_map, sentences = \
drawCaption(text_convas, captions, ixtoword, vis_size)
text_map = np.asarray(text_map).astype(np.uint8)
bUpdate = 1
for i in range(num):
attn = attn_maps[i].cpu().view(1, -1, att_sze, att_sze)
# --> 1 x 1 x 17 x 17
attn_max = attn.max(dim=1, keepdim=True)
attn = torch.cat([attn_max[0], attn], 1)
#
attn = attn.view(-1, 1, att_sze, att_sze)
attn = attn.repeat(1, 3, 1, 1).data.numpy()
# n x c x h x w --> n x h x w x c
attn = np.transpose(attn, (0, 2, 3, 1))
num_attn = attn.shape[0]
#
img = real_imgs[i]
if lr_imgs is None:
lrI = img
else:
lrI = lr_imgs[i]
row = [lrI, middle_pad]
row_merge = [img, middle_pad]
row_beforeNorm = []
minVglobal, maxVglobal = 1, 0
for j in range(num_attn):
one_map = attn[j]
if (vis_size // att_sze) > 1:
one_map = \
skimage.transform.pyramid_expand(one_map, sigma=20,
upscale=vis_size // att_sze,
multichannel=True)
row_beforeNorm.append(one_map)
minV = one_map.min()
maxV = one_map.max()
if minVglobal > minV:
minVglobal = minV
if maxVglobal < maxV:
maxVglobal = maxV
for j in range(seq_len + 1):
if j < num_attn:
one_map = row_beforeNorm[j]
one_map = (one_map - minVglobal) / (maxVglobal - minVglobal)
one_map *= 255
#
PIL_im = Image.fromarray(np.uint8(img))
PIL_att = Image.fromarray(np.uint8(one_map))
merged = \
Image.new('RGBA', (vis_size, vis_size), (0, 0, 0, 0))
mask = Image.new('L', (vis_size, vis_size), (210))
merged.paste(PIL_im, (0, 0))
merged.paste(PIL_att, (0, 0), mask)
merged = np.array(merged)[:, :, :3]
else:
one_map = post_pad
merged = post_pad
row.append(one_map)
row.append(middle_pad)
#
row_merge.append(merged)
row_merge.append(middle_pad)
row = np.concatenate(row, 1)
row_merge = np.concatenate(row_merge, 1)
txt = text_map[i * FONT_MAX: (i + 1) * FONT_MAX]
if txt.shape[1] != row.shape[1]:
print('txt', txt.shape, 'row', row.shape)
bUpdate = 0
break
row = np.concatenate([txt, row, row_merge], 0)
img_set.append(row)
if bUpdate:
img_set = np.concatenate(img_set, 0)
img_set = img_set.astype(np.uint8)
return img_set, sentences
else:
return None
def build_super_images2(real_imgs, captions, cap_lens, ixtoword,
attn_maps, att_sze, vis_size=256, topK=5):
batch_size = real_imgs.size(0)
max_word_num = np.max(cap_lens)
text_convas = np.ones([batch_size * FONT_MAX,
max_word_num * (vis_size + 2), 3],
dtype=np.uint8)
real_imgs = \
nn.functional.interpolate(real_imgs,size=(vis_size, vis_size),
mode='bilinear', align_corners=False)
# [-1, 1] --> [0, 1]
real_imgs.add_(1).div_(2).mul_(255)
real_imgs = real_imgs.data.numpy()
# b x c x h x w --> b x h x w x c
real_imgs = np.transpose(real_imgs, (0, 2, 3, 1))
pad_sze = real_imgs.shape
middle_pad = np.zeros([pad_sze[2], 2, 3])
# batch x seq_len x 17 x 17 --> batch x 1 x 17 x 17
img_set = []
num = len(attn_maps)
text_map, sentences = \
drawCaption(text_convas, captions, ixtoword, vis_size, off1=0)
text_map = np.asarray(text_map).astype(np.uint8)
bUpdate = 1
for i in range(num):
attn = attn_maps[i].cpu().view(1, -1, att_sze, att_sze)
#
attn = attn.view(-1, 1, att_sze, att_sze)
attn = attn.repeat(1, 3, 1, 1).data.numpy()
# n x c x h x w --> n x h x w x c
attn = np.transpose(attn, (0, 2, 3, 1))
num_attn = cap_lens[i]
thresh = 2./float(num_attn)
#
img = real_imgs[i]
row = []
row_merge = []
row_txt = []
row_beforeNorm = []
conf_score = []
for j in range(num_attn):
one_map = attn[j]
mask0 = one_map > (2. * thresh)
conf_score.append(np.sum(one_map * mask0))
mask = one_map > thresh
one_map = one_map * mask
if (vis_size // att_sze) > 1:
one_map = \
skimage.transform.pyramid_expand(one_map, sigma=20,
upscale=vis_size // att_sze,
multichannel=True)
minV = one_map.min()
maxV = one_map.max()
one_map = (one_map - minV) / (maxV - minV)
row_beforeNorm.append(one_map)
sorted_indices = np.argsort(conf_score)[::-1]
for j in range(num_attn):
one_map = row_beforeNorm[j]
one_map *= 255
#
PIL_im = Image.fromarray(np.uint8(img))
PIL_att = Image.fromarray(np.uint8(one_map))
merged = \
Image.new('RGBA', (vis_size, vis_size), (0, 0, 0, 0))
mask = Image.new('L', (vis_size, vis_size), (180)) # (210)
merged.paste(PIL_im, (0, 0))
merged.paste(PIL_att, (0, 0), mask)
merged = np.array(merged)[:, :, :3]
row.append(np.concatenate([one_map, middle_pad], 1))
#
row_merge.append(np.concatenate([merged, middle_pad], 1))
#
txt = text_map[i * FONT_MAX:(i + 1) * FONT_MAX,
j * (vis_size + 2):(j + 1) * (vis_size + 2), :]
row_txt.append(txt)
# reorder
row_new = []
row_merge_new = []
txt_new = []
for j in range(num_attn):
idx = sorted_indices[j]
row_new.append(row[idx])
row_merge_new.append(row_merge[idx])
txt_new.append(row_txt[idx])
row = np.concatenate(row_new[:topK], 1)
row_merge = np.concatenate(row_merge_new[:topK], 1)
txt = np.concatenate(txt_new[:topK], 1)
if txt.shape[1] != row.shape[1]:
print('Warnings: txt', txt.shape, 'row', row.shape,
'row_merge_new', row_merge_new.shape)
bUpdate = 0
break
row = np.concatenate([txt, row_merge], 0)
img_set.append(row)
if bUpdate:
img_set = np.concatenate(img_set, 0)
img_set = img_set.astype(np.uint8)
return img_set, sentences
else:
return None
####################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.orthogonal_(m.weight.data, 1.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal_(m.weight.data, 1.0)
if m.bias is not None:
m.bias.data.fill_(0.0)
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
the-stack_106_21242
|
import sys
import argparse
import torch
import marius as m
def main():
parser = argparse.ArgumentParser(
description='Configuration file based training', prog='train')
parser.add_argument('config',
metavar='config',
type=str,
help='Path to YAML configuration file that describes the training process. See documentation docs/config_interface for more details.')
args = parser.parse_args()
config = m.config.loadConfig(args.config, save=True)
m.manager.marius_train(config)
if __name__ == "__main__":
sys.exit(main())
|
the-stack_106_21244
|
from typing import List, Optional, Tuple
import numpy as np
import mindspore
from mindspore import Tensor
from mindspore.ops import operations as P
import mindspore.common.dtype as mstype
def generate(
model=None,
config=None,
input_ids: Optional[Tensor] = None,
input_mask: Optional[Tensor] = None,
max_length: Optional[int] = 1024,
min_length: Optional[int] = 200,
do_sample: Optional[bool] = False,
early_stopping: Optional[bool] = False,
num_beams: Optional[int] = 1,
temperature: Optional[float] = 1.0,
top_k: Optional[int] = 50,
top_p: Optional[float] = 1.0,
repetition_penalty: Optional[float] = 1.0,
bos_token_id: Optional[int] = 50256,
pad_token_id: Optional[int] = 50256,
eos_token_id: Optional[int] = 50256,
length_penalty: Optional[float] = 1.0,
no_repeat_ngram_size: Optional[int] = 0,
num_return_sequences: Optional[int] = 1,
attention_mask: Optional[Tensor] = None,
use_cache: Optional[bool] = True,
):
r"""
Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Args:
config: the config of gpt2 model which you want to use to generate.
input_ids (Tensor): shape with (batch_size, seq_length)
max_length (int): The maximum length of the sequence to be generated.
min_length: The minimum length of the sequence to be generated.
do_sample: Whether or not to use sampling ; use greedy decoding otherwise.
early_stopping: Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not.
num_beams: Number of beams for beam search. 1 means no beam search.
temperature: The value used to module the next token probabilities.
top_k: The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p: If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation.
repetition_penalty: Default 1.0 .The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
bos_token_id: The id of the `padding` token.
pad_token_id: The id of the `beginning-of-sequence` token.
eos_token_id: The id of the `end-of-sequence` token.
length_penalty: Exponential penalty to the length. 1.0 means no penalty. Default: 1.0.
no_repeat_ngram_size: If set to int > 0, all ngrams of that size can only occur once. Default: 0.
num_return_sequences: The number of independently computed returned sequences for each element in the batch. Default: 1.
attention_mask: shape with (batch_size, seq_length)
Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for
tokens that are not masked, and 0 for masked tokens.
use_cache: Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding. Default: True .
Returns:
List of shape (batch_size * num_return_sequences, seq_length)
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter
if all batches finished early due to the :obj:`eos_token_id`.
"""
if input_ids is not None:
batch_size, seq_len = P.Shape()(input_ids)
else:
batch_size = 1
assert model is not None, "model should not be a None object."
assert config is not None, "config of gpt2_model is a must input param."
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
assert attention_mask is not None, "`attention_mask` should be provided。"
vocab_size = config.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if num_return_sequences > 1 or num_beams > 1:
expand_shape = (batch_size, effective_batch_mult * num_beams, seq_len)
broadcast_to = P.BroadcastTo(expand_shape)
input_ids = P.ExpandDims()(input_ids, 1) # [batch_size, 1, seq_len]
input_ids = broadcast_to(input_ids)
attention_mask = P.ExpandDims()(attention_mask, 1)
attention_mask = broadcast_to(attention_mask)
input_ids = P.Reshape()(input_ids, (effective_batch_size * num_beams, seq_len))
# shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = P.Reshape()(attention_mask, (effective_batch_size * num_beams, seq_len))
# shape: (batch_size * num_return_sequences * num_beams, cur_len)
cur_len = seq_len
assert (cur_len < max_length), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`"
if num_beams > 1:
output = generate_beam_search(
model=model,
config=config,
input_ids=input_ids,
input_mask=input_mask,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
#batch_size=effective_batch_size,
#num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
#vocab_size=vocab_size,
#attention_mask=attention_mask,
use_cache=use_cache,
)
else:
'''
output = generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
attention_mask=attention_mask,
use_cache=use_cache,
)
'''
def generate_no_beam_search(
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
pad_token_id,
eos_token_id,
batch_size,
attention_mask,
use_cache,
):
raise NotImplementedError('not implemented yet')
past = None
while cur_len < max_length:
pass
def generate_beam_search(
model,
config,
input_ids,
input_mask,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
pad_token_id,
eos_token_id,
#batch_size,
length_penalty,
num_beams:int,
#attention_mask,
use_cache
):
generated_ids = []
max_length = min(max_length,config.seq_length)
batch_size = config.batch_size
vocab_size = config.vocab_size
assert batch_size == 1, "For now, it only generates 1 sentence per batch."
#initialize beam_score as 0 tensor
init_beam_prob = np.zeros((batch_size,num_beams),dtype=float)
reshape = P.Reshape()
squeeze_shape = (-1,)
top_k = P.TopK(sorted=False)
if do_sample is False:
init_beam_prob[:,1:] = -1e9
# beam_scores in form of Tensor:
# beam_scores = Tensor(init_beam_prob,dtype=mstype.float32)
# beam_scores = reshape(beam_scores,squeeze_shape)
#Use numpy for now, since batch size is only 1
beam_scores = init_beam_prob
#beam_scores: shape (batch_size*num_beams,)
#cache states
past_states = None
done_sentences = [False for _ in range(batch_size)]
input_ids_expand = replicate_input(input_ids,time=num_beams)
log_softmax = P.LogSoftmax(axis = -1)
first_token = True
while cur_len < max_length:
lst_logits = []
generated_ids.append([])
for i in range(num_beams):
lst_logits.append( model.predict(input_ids_expand,input_mask))
tuple_logits = tuple(lst_logits)
concat = P.Concat(axis = 0)
#concat from tuple of logits
logits = concat(tuple_logits)
next_token_logits = logits[::,cur_len,0:vocab_size]
# (num_beams,vocab_size)
scores = log_softmax(next_token_logits)
candidates = None
sentence_prefix = None
squeezed_scores = reshape(scores,squeeze_shape)
#(num_beam*vocab_size)
#indices_np = None
if first_token :
first_token = False
values,indices = top_k(squeezed_scores[0:vocab_size],num_beams)
#indices (num_beams)
indices_np = indices.asnumpy()
values_np = indices.asnumpy()
candidates = indices_np.tolist()
#for the first token, we choose 0 as default for all situations since the model is not .
sentence_prefix = [ 0 for _ in range(num_beams)]
for i in range(num_beams):
beam_scores[i] += values_np[i]
generated_ids[-1].append(candidates[i])
else:
# need to choose top beams^2 prob of token
values,indices = top_k(squeezed_scores,num_beams*num_beams)
indices_np = indices.asnumpy()
indices_np = indices.asnumpy()
values_np = indices.asnumpy()
tmp_candidates = indices_np.tolist()
tmp_candidates_scores = []
for i in range(num_beams*num_beams):
sentence_index = indices_np[i]//vocab_size
# index of token, tmp_beam_score, sentence_index of token
tmp_candidates_scores.append((tmp_candidates[i]%vocab_size,values_np[i]+beam_scores[sentence_index],sentence_index))
#sort by beam_score
tmp_candidates_scores.sort(key=lambda x:x[1],reverse=True)
sentence_prefix = []
candidates = []
for i in range(num_beams):
sentence_prefix.append(tmp_candidates_scores[i][2])
candidates.append(tmp_candidates_scores[i][0])
beam_scores[i] += tmp_candidates_scores[i][1]
input_np = input_ids_expand.asnumpy()
#(num_beams,seq_length)
new_input = np.zeros_like(input_np)
for i in range(num_beams):
new_input[i] = input_np[sentence_prefix[i]]
new_input[i][cur_len] = candidates[i]
generated_ids[-1].append(candidates[i])
input_ids_expand = Tensor(input_np,dtype = mstype.float32)
cur_len += 1
pass
#(seq_length,num_beams) -> (num_beams,seq_length)
generated_ids_np = np.array(generated_ids).T
token_ids = generated_ids_np.tolist()
return token_ids
def top_k_top_p_filtering(
logits: Tensor,
top_k: int = 0,
top_p: float = 1.0,
filter_value: float = -float("Inf"),
min_tokens_to_keep: int = 1,
) -> Tensor:
raise NotImplementedError
'''
Replicate input_ids from (batch_size,seq_length) --> (batch_size*time,seq_length)
'''
def replicate_input(input_ids:Tensor,time:int):
tile = P.Tile()
replicate_shape = (time,1)
ret = tile(input_ids,replicate_shape)
return ret
|
the-stack_106_21246
|
# -*- coding: UTF-8 -*-
from flask import Blueprint, Flask, jsonify, request, make_response
import pdfkit
from utils.cm.utils import is_exist
from utils.cm.files import delete_dir
from utils.pdf.pdfkits import *
app = Blueprint('pdfapi', __name__)
# curl -v -H "Content-type: application/json" -X POST http://192.168.10.126:8084/pdf
# curl -XPOST -F [email protected] -F [email protected] -F [email protected] http://192.168.10.126:8084/pdf > test.pdf
@app.route('/pdf', methods=[ 'GET', 'POST' ])
def pdf():
obj = {}
if request.method == 'POST':
if request.json is not None:
if is_json(request.json):
obj = request.json
else:
obj = get_forms(request)
# options = {}
# options['orientation'] = 'Portrait'
# obj['options'] = options
result = get_pdf(obj)
if result is not None and is_exist(result, 'msg') == False:
response = make_response()
filename = result['filename']
fullpath = result['path'] + '/' + filename
response.data = open(fullpath, 'rb').read()
response.headers['Content-Disposition'] = "attachment; filename=" + filename
response.mimetype = 'application/pdf'
delete_dir(result['path'])
return response
else:
if result is None:
result = { 'msg': 'Json Data is error !!!' }
return jsonify(result), 200
|
the-stack_106_21247
|
import os, sys
file_path = os.path.abspath(__file__)
project_path = os.path.dirname(os.path.dirname(file_path))
sys.path.append(project_path)
import glob
import open3d as o3d
import numpy as np
from tqdm import tqdm
from dataset.base import DatasetBase
from geometry.pointcloud import make_o3d_pointcloud, extract_feats, match_feats, solve, refine
PSEUDO_LABEL_FNAME = 'pseudo-label.log'
class Dataset3DMatchSGP(DatasetBase):
'''
During teaching: labels are written to a separate directory
During learning: it acts like the train, with labels in a separate directory
'''
def __init__(self, data_root, scenes, label_root, mode, overlap_thr=0.3):
self.label_root = label_root
self.overlap_thr = overlap_thr
if not os.path.exists(label_root):
print(
'label root {} does not exist, entering teaching mode.'.format(
label_root))
self.mode = 'teaching'
os.makedirs(label_root, exist_ok=True)
elif mode == 'teaching':
print('label root {} will be overwritten to enter teaching mode'.
format(label_root))
self.mode = 'teaching'
else:
print('label root {} exists, entering learning mode.'.format(
label_root))
self.mode = 'learning'
super(Dataset3DMatchSGP, self).__init__(data_root, scenes)
# override
def parse_scene(self, root, scene):
if self.mode == 'teaching':
return self._parse_scene_teaching(root, scene)
elif self.mode == 'learning':
return self._parse_scene_learning(root, scene)
else:
print('Unsupported mode, abort')
exit()
# override
def load_data(self, folder, fname):
fname = os.path.join(self.root, folder, fname)
return make_o3d_pointcloud(np.load(fname)['pcd'])
def write_pseudo_label(self, idx, label, overlap):
scene_idx = self.scene_idx_map[idx]
pair_idx = self.pair_idx_map[idx]
# Access actual data
scene = self.scenes[scene_idx]
i, j = scene['pairs'][pair_idx]
folder = scene['folder']
label_file = os.path.join(self.label_root, folder, PSEUDO_LABEL_FNAME)
with open(label_file, 'a') as f:
f.write('{} {} {} '.format(i, j, overlap))
label_str = ' '.join(map(str, label.flatten()))
f.write(label_str)
f.write('\n')
def _parse_scene_teaching(self, root, scene):
# Generate pseudo labels
label_path = os.path.join(self.label_root, scene)
os.makedirs(label_path, exist_ok=True)
label_file = os.path.join(label_path, PSEUDO_LABEL_FNAME)
if os.path.exists(label_file):
os.remove(label_file)
with open(label_file, 'w') as f:
pass
# Load actual data
scene_path = os.path.join(root, scene)
# Load filenames
l = len(scene_path)
fnames = sorted(glob.glob(os.path.join(scene_path, '*.npz')))
fnames = [fname[l + 1:] for fname in fnames]
# Load overlaps.txt
pair_fname = os.path.join(scene_path, 'overlaps.txt')
with open(pair_fname, 'r') as f:
pair_content = f.readlines()
pairs = []
binary_info = []
# For a 3DMatch dataset for teaching,
# binary_info is (optional) for filtering: overlap
for line in pair_content:
lst = line.strip().split(' ')
src_idx = int(lst[0].split('.')[0].split('_')[-1])
dst_idx = int(lst[1].split('.')[0].split('_')[-1])
overlap = float(lst[2])
if overlap >= self.overlap_thr:
pairs.append((src_idx, dst_idx))
binary_info.append(overlap)
return {
'folder': scene,
'fnames': fnames,
'pairs': pairs,
'unary_info': [None for i in range(len(fnames))],
'binary_info': binary_info
}
'''
Pseudo-Labels not available. Generate paths for writing to them later.
'''
def _parse_scene_learning(self, root, scene):
# Load pseudo labels
label_path = os.path.join(self.label_root, scene, PSEUDO_LABEL_FNAME)
if not os.path.exists(label_path):
raise Exception('{} not found', label_path)
# Load actual data
scene_path = os.path.join(root, scene)
# Load filenames
l = len(scene_path)
fnames = sorted(glob.glob(os.path.join(scene_path, '*.npz')))
fnames = [fname[l + 1:] for fname in fnames]
# Load overlaps.txt
with open(label_path, 'r') as f:
pair_content = f.readlines()
pairs = []
binary_info = []
# For a 3DMatch dataset for learning,
# binary_info is the pseudo label: src to dst transformation.
for line in pair_content:
lst = line.strip().split(' ')
src_idx = int(lst[0].split('.')[0].split('_')[-1])
dst_idx = int(lst[1].split('.')[0].split('_')[-1])
overlap = float(lst[2])
T_data = list(map(float, lst[3:]))
T = np.array(T_data).reshape((4, 4))
if overlap >= self.overlap_thr:
pairs.append((src_idx, dst_idx))
binary_info.append(T)
return {
'folder': scene,
'fnames': fnames,
'pairs': pairs,
'unary_info': [None for i in range(len(fnames))],
'binary_info': binary_info
}
|
the-stack_106_21248
|
#!/usr/bin/env python3
"""A dead simple aiohttp-based library for weeb.sh. Nothing more. Honest."""
from typing import List
import urllib
import asyncio
import aiohttp
BASE_URL_TYPES = "https://api.weeb.sh/images/types"
BASE_URL_TAGS = "https://api.weeb.sh/images/tags"
BASE_URL_RANDOM = "https://api.weeb.sh/images/random?{0}"
class Owoe:
"""A class that contains a simple interface for weeb.sh.
This will typically be used compositionally, as a component of a larger class.
"""
def __init__(self, token: str=None, session: aiohttp.ClientSession=None):
"""Constructor method for `Owoe`.
* `token` - An `str` containing your token from Wolke.
* `session` - An optional `aiohttp.ClientSession` to use Owoe with another program. If
not supplied, Owoe will create one for itself to be used standalone.
**Fields not in the constructor**
* `types` - A `list` of `str` containing all valid image types. It's recommended not to
update this yourself; instead, call `update_image_types()`.
* `tags` - A `list` of `str` containing all valid image tags. It's recommended not to
update this yourself; instead, call `update_image_tags()`.
* `headers` - A `dict` for simple HTTP authorization.
"""
self.token = token
self.headers = {"Authorization": f"Wolke {token}"}
self.types = []
self.tags = []
if not session:
loop = asyncio.get_event_loop()
self.session = aiohttp.ClientSession(loop=loop)
else:
self.session = session
async def update_image_types(self):
"""Update the image types `list` by calling the `/types` endpoint. This is a coroutine.
You must call this to populate the types `list`.
If successful, returns a `None`, otherwise returns an `int` with an HTTP status code.
"""
async with self.session.get(BASE_URL_TYPES, headers=self.headers) as response:
if response.status == 200:
data = await response.json()
types = data["types"]
self.types = []
for type_ in types:
self.types.append(type_)
return
return response.status
async def update_image_tags(self):
"""Update the image tags `list` by calling the `/tags` endpoint. This is a coroutine.
You must call this to populate the tags `list`.
If successful, returns a `None`, otherwise returns an `int` with an HTTP status code.
"""
async with self.session.get(BASE_URL_TAGS, headers=self.headers) as response:
if response.status == 200:
data = await response.json()
tags = data["tags"]
self.tags = []
for tag in tags:
self.tags.append(tag)
return
return response.status
async def random_image(self, type_: str=None, tags: List[str]=[]):
"""Get a random image from weeb.sh by calling the `/random` endpoint. This is a coroutine.
Possible return values are as follows:
* If successful, returns an `str` with the URL of the image.
* If an HTTP status error occurs, returns an `int` with the status code.
* `type_` - An `str` representing the type of the image to be obtained.
Must be in `self.types`. Has an underscore to avoid colliding with
built-in Python `type`.
* `tags` - A `list` of `str` to use in the image search.
"""
parameters_url = {}
if type_:
parameters_url["type"] = type_
if tags:
parameters_url["tags"] = urllib.parse.quote_plus(" ".join(tags))
parameters_url = urllib.parse.urlencode(parameters_url)
url_random = BASE_URL_RANDOM.format(parameters_url)
async with self.session.get(url_random, headers=self.headers) as response:
if response.status == 200:
data = await response.json()
url_image = data["url"]
return url_image
return response.status
|
the-stack_106_21249
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
#the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a Kibble JSON API plugin.
"""
import requests
import json
import time
import re
import base64
CONNECT_TIMEOUT = 2 # Max timeout for the connect part of a request.
# Should be set low as it may otherwise freeze the scanner.
def get(url, cookie = None, auth = None, token = None, retries = 5, timeout = 30):
headers = {
"Content-type": "application/json",
"Accept": "application/json",
"User-Agent": "Apache Kibble",
}
if auth:
xcreds = auth.encode(encoding='ascii', errors='replace')
bauth = base64.encodebytes(xcreds).decode('ascii', errors='replace').replace("\n", '')
headers["Authorization"] = "Basic %s" % bauth
if token:
headers["Authorization"] = "token %s" % token
if cookie:
headers["Cookie"] = cookie
rv = requests.get(url, headers = headers, timeout = (CONNECT_TIMEOUT, timeout))
# Some services may be rate limited. We'll try sleeping it off in 60 second
# intervals for a max of five minutes, then give up.
if rv.status_code == 429:
if retries > 0:
time.sleep(60)
retries -= 1
return get(url, cookie = cookie, auth = auth, token = token, retries = retries, timeout = timeout)
if rv.status_code < 400:
return rv.json()
raise requests.exceptions.ConnectionError("Could not fetch JSON, server responded with status code %u" % rv.status_code, response = rv)
def gettxt(url, cookie = None, auth = None):
""" Same as above, but returns as text blob """
headers = {
"Content-type": "application/json",
"Accept": "*/*"
}
if auth:
xcreds = auth.encode(encoding='ascii', errors='replace')
bauth = base64.encodebytes(xcreds).decode('ascii', errors='replace').replace("\n", '')
headers["Authorization"] = "Basic %s" % bauth
if cookie:
headers["Cookie"] = cookie
rv = requests.get(url, headers = headers)
js = rv.text
if rv.status_code != 404:
return js
return None
def post(url, data, cookie = None, auth = None):
headers = {
"Content-type": "application/json",
"Accept": "*/*",
"User-Agent": "Apache Kibble",
}
if auth:
xcreds = auth.encode(encoding='ascii', errors='replace')
bauth = base64.encodebytes(xcreds).decode('ascii', errors='replace').replace("\n", '')
headers["Authorization"] = "Basic %s" % bauth
if cookie:
headers["Cookie"] = cookie
rv = requests.post(url, headers = headers, json = data)
js = rv.json()
return js
|
the-stack_106_21252
|
import logging
import os
from django.conf import settings
APP_DIR = settings.LOG_DIR
LOG_FILE = os.path.join(APP_DIR, 'mics_odk.log')
logger = logging.getLogger('audit_logger')
handler = logging.FileHandler(LOG_FILE)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if settings.DEBUG:
LOG_LEVEL = logging.DEBUG
else:
LOG_LEVEL = logging.INFO
logger.setLevel(LOG_LEVEL)
class Enum(object):
__name__ = "Enum"
def __init__(self, **enums):
self.enums = enums
def __getattr__(self, item):
return self.enums[item]
def __getitem__(self, item):
return self.__getattr__(item)
def __iter__(self):
return self.enums.itervalues()
Actions = Enum(
PROFILE_ACCESSED="profile-accessed",
PUBLIC_PROFILE_ACCESSED="public-profile-accessed",
PROFILE_SETTINGS_UPDATED="profile-settings-updated",
USER_LOGIN="user-login",
USER_LOGOUT="user-logout",
USER_BULK_SUBMISSION="bulk-submissions-made",
USER_FORMLIST_REQUESTED="formlist-requested",
FORM_ACCESSED="form-accessed",
FORM_PUBLISHED="form-published",
FORM_UPDATED="form-updated",
FORM_XLS_DOWNLOADED="form-xls-downloaded",
FORM_XLS_UPDATED="form-xls-updated",
FORM_DELETED="form-deleted",
FORM_CLONED="form-cloned",
FORM_XML_DOWNLOADED="form-xml-downloaded",
FORM_JSON_DOWNLOADED="form-json-downloaded",
FORM_PERMISSIONS_UPDATED="form-permissions-updated",
FORM_ENTER_DATA_REQUESTED="form-enter-data-requested",
FORM_MAP_VIEWED="form-map-viewed",
FORM_DATA_VIEWED="form-data-viewed",
EXPORT_CREATED="export-created",
EXPORT_DOWNLOADED="export-downloaded",
EXPORT_DELETED="export-deleted",
EXPORT_LIST_REQUESTED="export-list-requested",
SUBMISSION_CREATED="submission-created",
SUBMISSION_UPDATED="submission-updated",
SUBMISSION_DELETED="submission-deleted",
SUBMISSION_ACCESSED="submission-accessed",
SUBMISSION_EDIT_REQUESTED="submission-edit-requested",
SUBMISSION_REQUESTED="submission-requested",
BAMBOO_LINK_CREATED="bamboo-link-created",
BAMBOO_LINK_DELETED="bamboo-link-deleted",
SMS_SUPPORT_ACTIVATED="sms-support-activated",
SMS_SUPPORT_DEACTIVATED="sms-support-deactivated",
)
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def audit_log(
action,
request_user,
investigator,
message,
audit,
request,
level=LOG_LEVEL
):
"""
Create a log message based on these params
@param action: Action performed e.g. form-deleted
@param request_username: User performing the action
@param account_username: The investigator name the action was performed on
@param message: The message to be displayed on the log
@param level: log level
@param audit: a dict of key/values of other \
info pertaining to the action e.g. form's id_string, submission uuid
@return: None
"""
extra = {
'action': action,
'request_username': str(request_user),
'account_username': investigator.name if investigator.name
else str(investigator),
'client_ip': get_client_ip(request),
'audit': audit
}
logger.log(level, message, extra=extra)
|
the-stack_106_21253
|
import json
import logging
import os
from collections import OrderedDict
from mgl2d.graphics.texture import Texture
from mgl2d.math.rect import Rect
from mgl2d.math.vector2 import Vector2
logger = logging.getLogger(__name__)
# NOTE: This class needs to be tested and updated
class FramesStore:
DEFAULT_FPS = 30
FLAG_FLIP_X = 1
FLAG_FLIP_Y = 2
FLAG_LOOP_ANIMATION = 16
def __init__(self):
self.images = {}
self.frames = {}
self.animations = {}
self.animation_fps = self.DEFAULT_FPS
def load(self, path, file_name):
logger.info("Loading '%s/%s'" % (path, file_name))
json_data = open(os.path.join(path, file_name))
data = json.load(json_data)
json_data.close()
meta = data.get('meta')
if meta:
fps = meta.get('fps')
if fps:
self.animation_fps = fps
for animation_name in data['animations']:
animation_data = data['animations'][animation_name]
# Check if this is a clone of another one
clone_of = animation_data.get('clone_of')
if clone_of:
animation_data = data['animations'][clone_of]
animation_data['clone_of'] = clone_of
self.animations[animation_name] = Animation(animation_data, animation_name)
# Load images and frames
for frame_name in data['frames']:
frame = data['frames'][frame_name]
if not frame['image_file'] in self.images:
self.images[frame['image_file']] = frame['image'] = Texture.load_from_file(
os.path.join(path, frame['image_file']))
self.frames[frame_name] = Frame(frame, frame_name)
def get_frame(self, frame_name):
return self.frames[frame_name]
def get_animation(self, animation_name):
return self.animations[animation_name]
def get_animations(self):
return self.animations
def to_dictionary(self):
d = OrderedDict()
d['animations'] = {}
for a_name in self.animations.keys():
a = self.animations[a_name]
d['animations'][a.name] = a.to_dictionary()
d['frames'] = {}
for f_name in self.frames:
f = self.frames[f_name]
d['frames'][f.name] = f.to_dictionary()
return d
class Frame:
def __init__(self, data, frame_name):
self.name = frame_name
self.image_file = data['image_file']
self.image = data['image']
anchor = data.get('anchor')
if anchor:
self.anchor = Vector2(anchor['x'], anchor['y'])
else:
self.anchor = Vector2(0, 0)
rect = data.get('rect')
if rect:
self.rect = Rect(rect['x'], rect['y'], rect['width'], rect['height'])
else:
self.rect = Rect(0, 0, self.image.width, self.image.height)
attack_box = data.get('attack_box')
if attack_box:
if attack_box['width'] < 0:
attack_box['x'] += attack_box['width']
attack_box['width'] = -attack_box['width']
if attack_box['height'] < 0:
attack_box['y'] += attack_box['height']
attack_box['height'] = -attack_box['height']
self.attack_box = Rect(attack_box['x'], attack_box['y'], attack_box['width'], attack_box['height'])
else:
self.attack_box = None
hit_box = data.get('hit_box')
if hit_box:
if hit_box['width'] < 0:
hit_box['x'] += hit_box['width']
hit_box['width'] = -hit_box['width']
if hit_box['height'] < 0:
hit_box['y'] += hit_box['height']
hit_box['height'] = -hit_box['height']
self.hit_box = Rect(hit_box['x'], hit_box['y'], hit_box['width'], hit_box['height'])
else:
self.hit_box = Rect(0, 0, 0, 0)
def to_dictionary(self):
d = OrderedDict()
d['anchor'] = OrderedDict()
d['anchor']['x'] = self.anchor.x
d['anchor']['y'] = self.anchor.y
if not (self.rect.x == self.rect.y == 0 and self.rect.w == self.image.width
and self.rect.h == self.image.height):
# Save the rect only if it is different from the whole image
d['rect'] = OrderedDict()
d['rect']['x'] = self.rect.x
d['rect']['y'] = self.rect.y
d['rect']['width'] = self.rect.w
d['rect']['height'] = self.rect.h
if self.hit_box and self.hit_box.w != 0 and self.hit_box.h != 0:
d['hit_box'] = OrderedDict()
d['hit_box']['x'] = self.hit_box.x
d['hit_box']['y'] = self.hit_box.y
d['hit_box']['width'] = self.hit_box.w
d['hit_box']['height'] = self.hit_box.h
if self.attack_box and self.attack_box.w != 0 and self.attack_box.h != 0:
d['attack_box'] = OrderedDict()
d['attack_box']['x'] = self.attack_box.x
d['attack_box']['y'] = self.attack_box.y
d['attack_box']['width'] = self.attack_box.w
d['attack_box']['height'] = self.attack_box.h
d['image_file'] = self.image_file
return d
class AnimationFrame:
def __init__(self, data):
self.frame_name = data['frame']
self.flip_x = int(data.get('flip_x', 0))
self.flip_y = int(data.get('flip_y', 0))
self.delay = int(data.get('delay', 1))
def to_dictionary(self):
d = OrderedDict()
d['frame'] = self.frame_name
if self.flip_x == 1:
d['flip_x'] = 1
if self.flip_y == 1:
d['flip_y'] = 1
if self.delay > 1:
d['delay'] = self.delay
return d
class Animation:
def __init__(self, data, name):
self.name = name
self.clone_of = data.get('clone_of')
self.frames = []
if data.get('frames'):
for frame in data['frames']:
self.frames.append(AnimationFrame(frame))
def to_dictionary(self):
d = OrderedDict()
if self.clone_of:
d['clone_of'] = self.clone_of
else:
d['frames'] = [f.to_dictionary() for f in self.frames]
return d
|
the-stack_106_21254
|
import asyncio
import discord
from discord import HTTPException, InvalidArgument, Embed, Role, Emoji
from discord.ext import commands
from discord.ext.commands import Greedy
from Cogs.BaseCog import BaseCog
from Util import Permissioncheckers, MessageUtils, Translator, Pages, Utils
from Util.Converters import EmojiName
class Emoji(BaseCog):
def __init__(self, bot):
super().__init__(bot, {
"min": 2,
"max": 6,
"required": 3,
"commands": {
"emoji": {
"min": 2,
"max": 6,
"required": 3,
"commands": {
"list": {
"min": 0,
"max": 6,
"required": 3
}
}
}
}
})
Pages.register("emoji", self.emoji_list_init, self.emoji_list_update)
def cog_unload(self):
Pages.unregister("emoji")
async def cog_check (self, ctx):
return Permissioncheckers.check_permission(ctx) or ctx.channel.permissions_for(ctx.author).manage_emojis
@commands.group(aliases=["emote"])
@commands.guild_only()
async def emoji(self, ctx):
"""emoji_help"""
if ctx.subcommand_passed is None:
await ctx.invoke(self.bot.get_command("help"), query="emoji")
@emoji.command("list")
async def emoji_list(self, ctx):
await Pages.create_new(self.bot, "emoji", ctx)
async def emoji_list_init(self, ctx):
return None, self.gen_emoji_page(ctx.guild, 0), len(ctx.guild.emojis) > 0
async def emoji_list_update(self, ctx, message, page_num, action, data):
page_count = len(message.guild.emojis) + 1
if action == "PREV":
page_num -= 1
elif action == "NEXT":
page_num += 1
if page_num < 0:
page_num = page_count - 1
if page_num >= page_count:
page_num = 0
data["page"] = page_num
return None, self.gen_emoji_page(message.guild, page_num), data
def gen_emoji_page(self, guild, page):
se = sorted(guild.emojis, key=lambda e: e.name)
embed = Embed(color=0x2db1f3)
embed.set_author(name=Translator.translate('emoji_server', guild, server=guild.name, page=page + 1,
pages=len(guild.emojis) + 1), url=guild.icon_url)
if page is 0:
for chunk in Utils.chunks(se, 18):
embed.add_field(name="\u200b", value=" ".join(str(e) for e in chunk))
animated = set()
static = set()
for e in guild.emojis:
(animated if e.animated else static).add(str(e))
max_emoji = 200 if "MORE_EMOJI" in guild.features else 50
embed.add_field(name=Translator.translate('static_emoji', guild), value=f"{len(static)} / {max_emoji}")
embed.add_field(name=Translator.translate('animated_emoji', guild), value=f"{len(animated)} / {max_emoji}")
else:
self.add_emoji_info(guild, embed, se[page - 1])
return embed
@staticmethod
def add_emoji_info(location, embed, emoji):
embed.set_image(url=emoji.url)
embed.add_field(name=Translator.translate('id', location), value=emoji.id)
embed.add_field(name=Translator.translate('name', location), value=emoji.name)
for t in ["require_colons", "animated", "managed"]:
v = str(getattr(emoji, t)).lower()
embed.add_field(name=Translator.translate(f'emoji_{t}', location),
value=MessageUtils.assemble(location, 'YES' if v == 'true' else 'NO', v))
if len(emoji.roles) > 0:
roles = ", ".join(r.mention for r in emoji.roles)
else:
roles = Translator.translate("emoji_role_no_restrictions", location)
embed.add_field(name=Translator.translate("emoji_role_restrictions", location), value=roles)
@emoji.command("info")
async def emoji_info(self, ctx, emoji: Emoji):
embed = Embed(color=0x2db1f3)
self.add_emoji_info(ctx, embed, emoji)
await ctx.send(embed=embed)
@emoji.command("add", aliases=["upload", "create"])
@commands.bot_has_permissions(manage_emojis=True)
async def emoji_add(self, ctx, name: EmojiName, roles: Greedy[Role] = None):
"""emoji_upload_help"""
if len(ctx.message.attachments) is 0:
await MessageUtils.send_to(ctx, "NO", "emoji_upload_no_attachments")
use_counter = len(ctx.message.attachments) > 1
counter = 1
for attachment in ctx.message.attachments:
message = await MessageUtils.send_to(ctx, "YES", "emoji_upload_downloading")
async with self.bot.aiosession.get(attachment.proxy_url) as resp:
data = await resp.read()
if len(data) > 256000:
return await MessageUtils.try_edit(message, emoji="NO", string_name="emoji_upload_invalid_filesize",
filesize=round(len(data) / 1000))
try:
emote = await ctx.guild.create_custom_emoji(name=f"{name}{counter}" if use_counter else name, image=data, roles=roles)
counter += 1
embed = Embed(color=0x2db1f3)
self.add_emoji_info(ctx, embed, emote)
return await MessageUtils.try_edit(message, emoji="YES", string_name="emoji_upload_success",
emote=emote, embed=embed)
except HTTPException as msg:
if msg.code == 50035:
await MessageUtils.send_to(ctx, 'NO', 'emoji_upload_rejected')
else:
return await ctx.send(msg.text)
except InvalidArgument as msg:
return await MessageUtils.try_edit(message, emoji="NO", string_name="emoji_upload_invalid_file")
@emoji.command(aliases=["change", "rename", "redefine"])
@commands.bot_has_permissions(manage_emojis=True)
async def update(self, ctx, emote: discord.Emoji, new_name: EmojiName):
"""emoji_update_help"""
try:
await emote.edit(name=new_name, roles=emote.roles,
reason=Translator.translate("emoji_update_reason", ctx.guild.id,
user=str(ctx.author)))
except HTTPException as msg:
await ctx.send(msg.text)
else:
await asyncio.sleep(1) # sleep so the cache can update
embed = Embed(color=0x2db1f3)
self.add_emoji_info(ctx, embed, emote)
await MessageUtils.send_to(ctx, "YES", "emoji_update_success", new_name=new_name,
embed=embed)
@emoji.command(aliases=["remove", "nuke", "rmv", "del", "👋", "🗑"])
@commands.bot_has_permissions(manage_emojis=True)
async def delete(self, ctx, emote: discord.Emoji):
"""emoji_delete_help"""
try:
await emote.delete()
return await MessageUtils.send_to(ctx, "YES", "emoji_delete_success")
except HTTPException as msg:
return await ctx.send(msg.text)
@emoji.group("roles", aliases=["role"])
async def emoji_roles(self, ctx):
"""emoji_roles_help"""
if ctx.invoked_subcommand is self.emoji_roles:
await ctx.invoke(self.bot.get_command("help"), query="emoji roles")
@emoji_roles.command("add")
@commands.bot_has_permissions(manage_emojis=True)
async def emoji_roles_add(self, ctx, emote: discord.Emoji, roles: Greedy[discord.Role] = None):
if roles is None:
return MessageUtils.send_to(ctx, 'NO', 'roles_no_roles')
todo = set()
refused = set()
for role in roles:
(refused if role in emote.roles else todo).add(role)
new_roles = list(emote.roles)
new_roles.extend(todo)
await emote.edit(name=emote.name, roles=new_roles)
await asyncio.sleep(1) # sleep so the cache can update
embed = Embed(color=0x2db1f3)
self.add_emoji_info(ctx, embed, emote)
if len(todo) > 0:
message = MessageUtils.assemble(ctx, "YES", "emoji_roles_add_success", roles=self.pretty_role_list(todo, ctx))
else:
message = ""
if len(refused) > 0:
message += "\n" + MessageUtils.assemble(ctx, "NO", "emoji_roles_add_roles_already_in_list",
roles=self.pretty_role_list(refused, ctx))
await ctx.send(message)
@emoji_roles.command("remove")
@commands.bot_has_permissions(manage_emojis=True)
async def emoji_roles_remove(self, ctx, emote: discord.Emoji, roles: Greedy[discord.Role]):
if roles is None:
return MessageUtils.send_to(ctx, 'NO', 'roles_no_roles')
todo = set()
refused = set()
for role in roles:
(refused if role not in emote.roles else todo).add(role)
new_roles = list(emote.roles)
for role in todo:
new_roles.remove(role)
await emote.edit(name=emote.name, roles=new_roles)
await asyncio.sleep(1) # sleep so the cache can update
embed = Embed(color=0x2db1f3)
self.add_emoji_info(ctx, embed, emote)
message = MessageUtils.assemble(ctx, "YES", "emoji_roles_remove_success",
roles=self.pretty_role_list(todo, ctx))
if len(refused) > 0:
message += "\n" + MessageUtils.assemble(ctx, "NO", "emoji_roles_remove_role_not_in_list",
roles=self.pretty_role_list(refused, ctx))
await ctx.send(message)
def pretty_role_list(self, roles, destination):
out = ", ".join(f"`{role.name}`" for role in roles)
if len(out) > 900:
out = Translator.translate('too_many_roles_to_list', destination)
return out
def setup(bot):
bot.add_cog(Emoji(bot))
|
the-stack_106_21256
|
from util import *
import matplotlib.pyplot as plt
plt.ion()
def distmat(p, q):
"""Computes pair-wise L2-distance between columns of p and q."""
d, pn = p.shape
d, qn = q.shape
pmag = np.sum(p**2, axis=0).reshape(1, -1)
qmag = np.sum(q**2, axis=0).reshape(1, -1)
dist = qmag + pmag.T - 2 * np.dot(p.T, q)
dist = (dist >= 0) * dist # Avoid small negatives due to numerical errors.
return np.sqrt(dist)
def KMeans(x, K, iters):
"""Cluster x into K clusters using K-Means.
Inputs:
x: Data matrix, with one data vector per column.
K: Number of clusters.
iters: Number of iterations of K-Means to run.
Outputs:
means: Cluster centers, with one cluster center in each column.
"""
N = x.shape[1]
perm = np.arange(N)
np.random.shuffle(perm)
means = x[:, perm[:K]]
dist = np.zeros((K, N))
for ii in xrange(iters):
print('Kmeans iteration = %04d' % (ii+1))
for k in xrange(K):
dist[k, :] = distmat(means[:, k].reshape(-1, 1), x)
assigned_class = np.argmin(dist, axis=0)
for k in xrange(K):
means[:, k] = np.mean(x[:, (assigned_class == k).nonzero()[0]], axis=1)
return means
def ShowMeans(means, number=0):
"""Show the cluster centers as images."""
plt.figure(number)
plt.clf()
for i in xrange(means.shape[1]):
plt.subplot(1, means.shape[1], i+1)
plt.imshow(means[:, i].reshape(48, 48), cmap=plt.cm.gray)
plt.draw()
plt.pause(1)
#raw_input('Press Enter.')
def main():
K = 7
iters = 200
inputs_train, inputs_valid, inputs_test, target_train, target_valid, target_test = LoadData('toronto_face.npz')
means = KMeans(inputs_train, K, iters)
ShowMeans(means, 0)
if __name__ == '__main__':
main()
|
the-stack_106_21257
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl.testing import parameterized
import numpy
import six
from tensorflow.contrib.estimator.python.estimator import extenders
from tensorflow.contrib.timeseries.examples import lstm as lstm_example
from tensorflow.contrib.timeseries.python.timeseries import ar_model
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import head as ts_head_lib
from tensorflow.contrib.timeseries.python.timeseries import input_pipeline
from tensorflow.contrib.timeseries.python.timeseries import model
from tensorflow.contrib.timeseries.python.timeseries import state_management
from tensorflow.core.example import example_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import adam
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import training as train
class HeadTest(test.TestCase):
def test_labels_provided_error(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL,
estimator_lib.ModeKeys.PREDICT]:
with self.assertRaisesRegexp(ValueError, "received a `labels`"):
model_fn(features={}, labels={"a": "b"}, mode=mode)
with self.assertRaisesRegexp(ValueError, "received a `labels`"):
model_fn(features={}, labels=array_ops.zeros([]), mode=mode)
def test_unknown_mode(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Unknown mode 'Not a mode'"):
model_fn(features={}, labels={}, mode="Not a mode")
class _TickerModel(object):
num_features = 1
dtype = dtypes.float32
def initialize_graph(self, input_statistics):
pass
def define_loss(self, features, mode):
del mode # unused
return model.ModelOutputs(
loss=features["ticker"],
end_state=(features["ticker"], features["ticker"]),
prediction_times=array_ops.zeros(()),
predictions={"ticker": features["ticker"]})
class EvaluationMetricsTests(test.TestCase):
def test_metrics_consistent(self):
# Tests that the identity metrics used to report in-sample predictions match
# the behavior of standard metrics.
g = ops.Graph()
with g.as_default():
features = {
feature_keys.TrainEvalFeatures.TIMES:
array_ops.zeros((1, 1)),
feature_keys.TrainEvalFeatures.VALUES:
array_ops.zeros((1, 1, 1)),
"ticker":
array_ops.reshape(
math_ops.cast(
variables.VariableV1(
name="ticker",
initial_value=0,
dtype=dtypes.int64,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
.count_up_to(10),
dtype=dtypes.float32), (1, 1, 1))
}
model_fn = ts_head_lib.TimeSeriesRegressionHead(
model=_TickerModel(),
state_manager=state_management.PassthroughStateManager(),
optimizer=train.GradientDescentOptimizer(0.001)).create_estimator_spec
outputs = model_fn(
features=features, labels=None, mode=estimator_lib.ModeKeys.EVAL)
metric_update_ops = [
metric[1] for metric in outputs.eval_metric_ops.values()]
loss_mean, loss_update = metrics.mean(outputs.loss)
metric_update_ops.append(loss_update)
with self.cached_session() as sess:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
variables.local_variables_initializer().run()
sess.run(metric_update_ops)
loss_evaled, metric_evaled, nested_metric_evaled = sess.run(
(loss_mean, outputs.eval_metric_ops["ticker"][0],
outputs.eval_metric_ops[feature_keys.FilteringResults.STATE_TUPLE][
0][0]))
# The custom model_utils metrics for in-sample predictions should be in
# sync with the Estimator's mean metric for model loss.
self.assertAllClose(0., loss_evaled)
self.assertAllClose((((0.,),),), metric_evaled)
self.assertAllClose((((0.,),),), nested_metric_evaled)
coordinator.request_stop()
coordinator.join()
def test_custom_metrics(self):
"""Tests that the custom metrics can be applied to the estimator."""
model_dir = self.get_temp_dir()
estimator = ts_estimators.TimeSeriesRegressor(
model=lstm_example._LSTMModel(num_features=1, num_units=4),
optimizer=adam.AdamOptimizer(0.001),
config=estimator_lib.RunConfig(tf_random_seed=4),
model_dir=model_dir)
def input_fn():
return {
feature_keys.TrainEvalFeatures.TIMES: [[1, 2, 3], [7, 8, 9]],
feature_keys.TrainEvalFeatures.VALUES:
numpy.array([[[0.], [1.], [0.]], [[2.], [3.], [2.]]])
}
def metrics_fn(predictions, features):
# checking that the inputs are properly passed.
predict = predictions["mean"]
target = features[feature_keys.TrainEvalFeatures.VALUES][:, -1, 0]
return {
"plain_boring_metric386":
(math_ops.reduce_mean(math_ops.abs(predict - target)),
control_flow_ops.no_op()),
"fun_metric101": (math_ops.reduce_sum(predict + target),
control_flow_ops.no_op()),
}
# Evaluation without training is enough for testing custom metrics.
estimator = extenders.add_metrics(estimator, metrics_fn)
evaluation = estimator.evaluate(input_fn, steps=1)
self.assertIn("plain_boring_metric386", evaluation)
self.assertIn("fun_metric101", evaluation)
self.assertIn("average_loss", evaluation)
# The values are deterministic because of fixed tf_random_seed.
# However if they become flaky, remove such exacts comparisons.
self.assertAllClose(evaluation["plain_boring_metric386"], 1.130380)
self.assertAllClose(evaluation["fun_metric101"], 10.435442)
class _StubModel(object):
num_features = 3
dtype = dtypes.float64
def initialize_graph(self, input_statistics):
del input_statistics # unused
def _stub_model_fn():
return ts_head_lib.TimeSeriesRegressionHead(
model=_StubModel(),
state_manager=state_management.PassthroughStateManager(),
optimizer=train.AdamOptimizer(0.001)).create_estimator_spec
class TrainEvalFeatureCheckingTests(test.TestCase):
def test_no_time_feature(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.TrainEvalFeatures.TIMES)):
model_fn(
features={feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]},
labels=None,
mode=mode)
def test_no_value_feature(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={feature_keys.TrainEvalFeatures.TIMES: [[1]]},
labels=None,
mode=mode)
def test_bad_time_rank(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.TIMES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[[1]]],
feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]
},
labels=None,
mode=mode)
def test_bad_value_rank(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[1.]]
},
labels=None,
mode=mode)
def test_bad_value_num_features(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(
ValueError, "Expected shape.*, 3.*for feature '{}'".format(
feature_keys.TrainEvalFeatures.VALUES)):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[[1.]]]
},
labels=None,
mode=mode)
def test_bad_exogenous_shape(self):
model_fn = _stub_model_fn()
for mode in [estimator_lib.ModeKeys.TRAIN, estimator_lib.ModeKeys.EVAL]:
with self.assertRaisesRegexp(
ValueError,
"Features must have shape.*for feature 'exogenous'"):
model_fn(
features={
feature_keys.TrainEvalFeatures.TIMES: [[1]],
feature_keys.TrainEvalFeatures.VALUES: [[[1., 2., 3.]]],
"exogenous": [[1], [2]]
},
labels=None,
mode=mode)
class PredictFeatureCheckingTests(test.TestCase):
def test_no_time_feature(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.PredictionFeatures.TIMES)):
model_fn(
features={
feature_keys.PredictionFeatures.STATE_TUPLE: ([[[1.]]], 1.)
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_no_start_state_feature(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError, "Expected a '{}' feature".format(
feature_keys.PredictionFeatures.STATE_TUPLE)):
model_fn(
features={feature_keys.PredictionFeatures.TIMES: [[1]]},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_bad_time_rank(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(ValueError,
"Expected shape.*for feature '{}'".format(
feature_keys.PredictionFeatures.TIMES)):
model_fn(
features={
feature_keys.PredictionFeatures.TIMES: 1,
feature_keys.PredictionFeatures.STATE_TUPLE: (1, (2, 3.))
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def test_bad_exogenous_shape(self):
model_fn = _stub_model_fn()
with self.assertRaisesRegexp(
ValueError,
"Features must have shape.*for feature 'exogenous'"):
model_fn(
features={
feature_keys.PredictionFeatures.TIMES: [[1]],
feature_keys.PredictionFeatures.STATE_TUPLE: (1, (2, 3.)),
"exogenous": 1.
},
labels=None,
mode=estimator_lib.ModeKeys.PREDICT)
def _custom_time_series_regressor(
model_dir, head_type, exogenous_feature_columns):
return ts_estimators.TimeSeriesRegressor(
model=lstm_example._LSTMModel(
num_features=5, num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=adam.AdamOptimizer(0.001),
config=estimator_lib.RunConfig(tf_random_seed=4),
state_manager=state_management.ChainingStateManager(),
head_type=head_type,
model_dir=model_dir)
def _structural_ensemble_regressor(
model_dir, head_type, exogenous_feature_columns):
return ts_estimators.StructuralEnsembleRegressor(
periodicities=None,
num_features=5,
exogenous_feature_columns=exogenous_feature_columns,
head_type=head_type,
model_dir=model_dir)
def _ar_lstm_regressor(
model_dir, head_type, exogenous_feature_columns):
return ts_estimators.TimeSeriesRegressor(
model=ar_model.ARModel(
periodicities=10, input_window_size=10, output_window_size=6,
num_features=5,
exogenous_feature_columns=exogenous_feature_columns,
prediction_model_factory=functools.partial(
ar_model.LSTMPredictionModel,
num_units=10)),
head_type=head_type,
model_dir=model_dir)
class OneShotTests(parameterized.TestCase):
@parameterized.named_parameters(
{"testcase_name": "ar_lstm_regressor",
"estimator_factory": _ar_lstm_regressor},
{"testcase_name": "custom_time_series_regressor",
"estimator_factory": _custom_time_series_regressor},
{"testcase_name": "structural_ensemble_regressor",
"estimator_factory": _structural_ensemble_regressor})
def test_one_shot_prediction_head_export(self, estimator_factory):
def _new_temp_dir():
return os.path.join(test.get_temp_dir(), str(ops.uid()))
model_dir = _new_temp_dir()
categorical_column = feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = estimator_factory(
model_dir=model_dir,
exogenous_feature_columns=exogenous_feature_columns,
head_type=ts_head_lib.OneShotPredictionHead)
train_features = {
feature_keys.TrainEvalFeatures.TIMES: numpy.arange(
20, dtype=numpy.int64),
feature_keys.TrainEvalFeatures.VALUES: numpy.tile(numpy.arange(
20, dtype=numpy.float32)[:, None], [1, 5]),
"2d_exogenous_feature": numpy.ones([20, 2]),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 20)[:, None]
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(train_features), shuffle_seed=2,
num_threads=1, batch_size=16, window_size=16)
estimator.train(input_fn=train_input_fn, steps=5)
result = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn("average_loss", result)
self.assertNotIn(feature_keys.State.STATE_TUPLE, result)
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(_new_temp_dir(),
input_receiver_fn)
graph = ops.Graph()
with graph.as_default():
with session_lib.Session() as session:
signatures = loader.load(
session, [tag_constants.SERVING], export_location)
self.assertEqual([feature_keys.SavedModelLabels.PREDICT],
list(signatures.signature_def.keys()))
predict_signature = signatures.signature_def[
feature_keys.SavedModelLabels.PREDICT]
six.assertCountEqual(
self,
[feature_keys.FilteringFeatures.TIMES,
feature_keys.FilteringFeatures.VALUES,
"2d_exogenous_feature",
"categorical_exogenous_feature"],
predict_signature.inputs.keys())
features = {
feature_keys.TrainEvalFeatures.TIMES: numpy.tile(
numpy.arange(35, dtype=numpy.int64)[None, :], [2, 1]),
feature_keys.TrainEvalFeatures.VALUES: numpy.tile(numpy.arange(
20, dtype=numpy.float32)[None, :, None], [2, 1, 5]),
"2d_exogenous_feature": numpy.ones([2, 35, 2]),
"categorical_exogenous_feature": numpy.tile(numpy.array(
["strkey"] * 35)[None, :, None], [2, 1, 1])
}
feeds = {
graph.as_graph_element(input_value.name): features[input_key]
for input_key, input_value in predict_signature.inputs.items()}
fetches = {output_key: graph.as_graph_element(output_value.name)
for output_key, output_value
in predict_signature.outputs.items()}
output = session.run(fetches, feed_dict=feeds)
self.assertEqual((2, 15, 5), output["mean"].shape)
# Build a parsing input function, then make a tf.Example for it to parse.
export_location = estimator.export_savedmodel(
_new_temp_dir(),
estimator.build_one_shot_parsing_serving_input_receiver_fn(
filtering_length=20, prediction_length=15))
graph = ops.Graph()
with graph.as_default():
with session_lib.Session() as session:
example = example_pb2.Example()
times = example.features.feature[feature_keys.TrainEvalFeatures.TIMES]
values = example.features.feature[feature_keys.TrainEvalFeatures.VALUES]
times.int64_list.value.extend(range(35))
for i in range(20):
values.float_list.value.extend(
[float(i) * 2. + feature_number
for feature_number in range(5)])
real_feature = example.features.feature["2d_exogenous_feature"]
categortical_feature = example.features.feature[
"categorical_exogenous_feature"]
for i in range(35):
real_feature.float_list.value.extend([1, 1])
categortical_feature.bytes_list.value.append(b"strkey")
# Serialize the tf.Example for feeding to the Session
examples = [example.SerializeToString()] * 2
signatures = loader.load(
session, [tag_constants.SERVING], export_location)
predict_signature = signatures.signature_def[
feature_keys.SavedModelLabels.PREDICT]
((_, input_value),) = predict_signature.inputs.items()
feeds = {graph.as_graph_element(input_value.name): examples}
fetches = {output_key: graph.as_graph_element(output_value.name)
for output_key, output_value
in predict_signature.outputs.items()}
output = session.run(fetches, feed_dict=feeds)
self.assertEqual((2, 15, 5), output["mean"].shape)
if __name__ == "__main__":
test.main()
|
the-stack_106_21258
|
from pystac import Extensions
from pystac.item import Item
from pystac.extensions.base import (ItemExtension, ExtensionDefinition, ExtendedObject)
class EOItemExt(ItemExtension):
"""EOItemExt is the extension of the Item in the eo extension which
represents a snapshot of the earth for a single date and time.
Args:
item (Item): The item to be extended.
Attributes:
item (Item): The Item that is being extended.
Note:
Using EOItemExt to directly wrap an item will add the 'eo' extension ID to
the item's stac_extensions.
"""
def __init__(self, item):
if item.stac_extensions is None:
item.stac_extensions = [Extensions.EO]
elif Extensions.EO not in item.stac_extensions:
item.stac_extensions.append(Extensions.EO)
self.item = item
def apply(self, bands, cloud_cover=None):
"""Applies label extension properties to the extended Item.
Args:
bands (List[Band]): a list of :class:`~pystac.Band` objects that represent
the available bands.
cloud_cover (float or None): The estimate of cloud cover as a percentage (0-100) of the
entire scene. If not available the field should not be provided.
"""
self.bands = bands
self.cloud_cover = cloud_cover
@property
def bands(self):
"""Get or sets a list of :class:`~pystac.Band` objects that represent
the available bands.
Returns:
List[Band]
"""
return self.get_bands()
@bands.setter
def bands(self, v):
self.set_bands(v)
def get_bands(self, asset=None):
"""Gets an Item or an Asset bands.
If an Asset is supplied and the bands property exists on the Asset,
returns the Asset's value. Otherwise returns the Item's value or
all the asset's eo bands
Returns:
List[Band]
"""
if asset is not None and 'eo:bands' in asset.properties:
bands = asset.properties.get('eo:bands')
else:
bands = self.item.properties.get('eo:bands')
# get assets with eo:bands even if not in item
if asset is None and bands is None:
bands = []
for (key, value) in self.item.get_assets().items():
if 'eo:bands' in value.properties:
bands.extend(value.properties.get('eo:bands'))
if bands is not None:
bands = [Band(b) for b in bands]
return bands
def set_bands(self, bands, asset=None):
"""Set an Item or an Asset bands.
If an Asset is supplied, sets the property on the Asset.
Otherwise sets the Item's value.
"""
band_dicts = [b.to_dict() for b in bands]
if asset is not None:
asset.properties['eo:bands'] = band_dicts
else:
self.item.properties['eo:bands'] = band_dicts
@property
def cloud_cover(self):
"""Get or sets the estimate of cloud cover as a percentage (0-100) of the
entire scene. If not available the field should not be provided.
Returns:
float or None
"""
return self.get_cloud_cover()
@cloud_cover.setter
def cloud_cover(self, v):
self.set_cloud_cover(v)
def get_cloud_cover(self, asset=None):
"""Gets an Item or an Asset cloud_cover.
If an Asset is supplied and the Item property exists on the Asset,
returns the Asset's value. Otherwise returns the Item's value
Returns:
float
"""
if asset is None or 'eo:cloud_cover' not in asset.properties:
return self.item.properties.get('eo:cloud_cover')
else:
return asset.properties.get('eo:cloud_cover')
def set_cloud_cover(self, cloud_cover, asset=None):
"""Set an Item or an Asset cloud_cover.
If an Asset is supplied, sets the property on the Asset.
Otherwise sets the Item's value.
"""
if asset is None:
self.item.properties['eo:cloud_cover'] = cloud_cover
else:
asset.properties['eo:cloud_cover'] = cloud_cover
def __repr__(self):
return '<EOItemExt Item id={}>'.format(self.item.id)
@classmethod
def _object_links(cls):
return []
@classmethod
def from_item(cls, item):
return cls(item)
class Band:
"""Represents Band information attached to an Item that implements the eo extension.
Use Band.create to create a new Band.
"""
def __init__(self, properties):
self.properties = properties
def apply(self,
name,
common_name=None,
description=None,
center_wavelength=None,
full_width_half_max=None):
"""
Sets the properties for this Band.
Args:
name (str): The name of the band (e.g., "B01", "B02", "B1", "B5", "QA").
common_name (str): The name commonly used to refer to the band to make it easier
to search for bands across instruments. See the `list of accepted common names
<https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/eo#common-band-names>`_.
description (str): Description to fully explain the band.
center_wavelength (float): The center wavelength of the band, in micrometers (μm).
full_width_half_max (float): Full width at half maximum (FWHM). The width of the band,
as measured at half the maximum transmission, in micrometers (μm).
"""
self.name = name
self.common_name = common_name
self.description = description
self.center_wavelength = center_wavelength
self.full_width_half_max = full_width_half_max
@classmethod
def create(cls,
name,
common_name=None,
description=None,
center_wavelength=None,
full_width_half_max=None):
"""
Creates a new band.
Args:
name (str): The name of the band (e.g., "B01", "B02", "B1", "B5", "QA").
common_name (str): The name commonly used to refer to the band to make it easier
to search for bands across instruments. See the `list of accepted common names
<https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/eo#common-band-names>`_.
description (str): Description to fully explain the band.
center_wavelength (float): The center wavelength of the band, in micrometers (μm).
full_width_half_max (float): Full width at half maximum (FWHM). The width of the band,
as measured at half the maximum transmission, in micrometers (μm).
"""
b = cls({})
b.apply(name=name,
common_name=common_name,
description=description,
center_wavelength=center_wavelength,
full_width_half_max=full_width_half_max)
return b
@property
def name(self):
"""Get or sets the name of the band (e.g., "B01", "B02", "B1", "B5", "QA").
Returns:
str
"""
return self.properties.get('name')
@name.setter
def name(self, v):
self.properties['name'] = v
@property
def common_name(self):
"""Get or sets the name commonly used to refer to the band to make it easier
to search for bands across instruments. See the `list of accepted common names
<https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/eo#common-band-names>`_.
Returns:
str
"""
return self.properties.get('common_name')
@common_name.setter
def common_name(self, v):
if v is not None:
self.properties['common_name'] = v
else:
self.properties.pop('common_name', None)
@property
def description(self):
"""Get or sets the description to fully explain the band. CommonMark 0.29 syntax MAY be
used for rich text representation.
Returns:
str
"""
return self.properties.get('description')
@description.setter
def description(self, v):
if v is not None:
self.properties['description'] = v
else:
self.properties.pop('description', None)
@property
def center_wavelength(self):
"""Get or sets the center wavelength of the band, in micrometers (μm).
Returns:
float
"""
return self.properties.get('center_wavelength')
@center_wavelength.setter
def center_wavelength(self, v):
if v is not None:
self.properties['center_wavelength'] = v
else:
self.properties.pop('center_wavelength', None)
@property
def full_width_half_max(self):
"""Get or sets the full width at half maximum (FWHM). The width of the band,
as measured at half the maximum transmission, in micrometers (μm).
Returns:
[float]
"""
return self.properties.get('full_width_half_max')
@full_width_half_max.setter
def full_width_half_max(self, v):
if v is not None:
self.properties['full_width_half_max'] = v
else:
self.properties.pop('full_width_half_max', None)
def __repr__(self):
return '<Band name={}>'.format(self.name)
def to_dict(self):
"""Returns the dictionary representing the JSON of this Band.
Returns:
dict: The wrapped dict of the Band that can be written out as JSON.
"""
return self.properties
@staticmethod
def band_range(common_name):
"""Gets the band range for a common band name.
Args:
common_name (str): The common band name. Must be one of the `list of accepted common names <https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/eo#common-band-names>`_.
Returns:
Tuple[float, float] or None: The band range for this name as (min, max), or
None if this is not a recognized common name.
""" # noqa E501
name_to_range = {
'coastal': (0.40, 0.45),
'blue': (0.45, 0.50),
'green': (0.50, 0.60),
'red': (0.60, 0.70),
'yellow': (0.58, 0.62),
'pan': (0.50, 0.70),
'rededge': (0.70, 0.75),
'nir': (0.75, 1.00),
'nir08': (0.75, 0.90),
'nir09': (0.85, 1.05),
'cirrus': (1.35, 1.40),
'swir16': (1.55, 1.75),
'swir22': (2.10, 2.30),
'lwir': (10.5, 12.5),
'lwir11': (10.5, 11.5),
'lwir12': (11.5, 12.5)
}
return name_to_range.get(common_name)
@staticmethod
def band_description(common_name):
"""Returns a description of the band for one with a common name.
Args:
common_name (str): The common band name. Must be one of the `list of accepted common names <https://github.com/radiantearth/stac-spec/tree/v0.8.1/extensions/eo#common-band-names>`_.
Returns:
str or None: If a recognized common name, returns a description including the
band range. Otherwise returns None.
""" # noqa E501
r = Band.band_range(common_name)
if r is not None:
r = "Common name: {}, Range: {} to {}".format(common_name, r[0], r[1])
return r
EO_EXTENSION_DEFINITION = ExtensionDefinition(Extensions.EO, [ExtendedObject(Item, EOItemExt)])
|
the-stack_106_21259
|
import abc, json
from .yaml_parser import SimpleRegex, NestedRegex, ParamsRegex, Dictionary, List
class CodeWriter:
def __init__(self, name):
self.name = name
@abc.abstractmethod
def write(self):
pass
class DefaultWriter(CodeWriter):
def __init__(self, name, definition):
CodeWriter.__init__(self, name)
self.definition = sanitize(definition)
def write(self):
return f'{self.name} = \'{self.definition}\''
class BooleanWriter(CodeWriter):
def __init__(self, name, definition):
CodeWriter.__init__(self, name)
self.definition = definition
def write(self):
return f'{self.name} = {self.definition}'
class SimpleRegexWriter(CodeWriter):
def __init__(self, name, definition):
CodeWriter.__init__(self, name)
self.definition = sanitize(definition)
def write(self):
return f'{self.name} = f\'{self.definition}\''
class NestedRegexWriter(SimpleRegexWriter):
def __init__(self, name, definition, references):
CodeWriter.__init__(self, name)
self.definition = sanitize(definition, None, references)
class ParamsRegexWriter(SimpleRegexWriter):
def __init__(self, name, definition, params):
CodeWriter.__init__(self, name)
self.definition = sanitize(definition, None, params)
self.params = ', '.join(params)
def write(self):
return f'{self.name} = lambda {self.params}: f\'{self.definition}\''
class DictionaryWriter(CodeWriter):
def __init__(self, name, key_type, value_type, entries):
CodeWriter.__init__(self, name)
self.entries = []
for key, value in entries.items():
key = create_entry(key, key_type)
if isinstance(value, list):
value = f"[{', '.join(map(lambda x: json.dumps(x.value), value))}]"
else:
value = create_entry(value, value_type)
self.entries.append(f'({key}, {value})')
def write(self):
spaces = ' ' * (len(f'{self.name} = dict([') + 4)
joined_entries = f',\n{spaces}'.join(self.entries)
return f'{self.name} = dict([{joined_entries}])'
class ArrayWriter(CodeWriter):
def __init__(self, name, value_type, entries):
CodeWriter.__init__(self, name)
self.entries = []
value_type = to_python_type(value_type)
value_quote = '\'' if value_type == 'string' else ''
for value in entries:
value = value.replace('\'', '\\\'')
self.entries.append(f'r{value_quote}{value}{value_quote}')
def write(self):
joined_entries = ', '.join(self.entries)
return f'{self.name} = [{joined_entries}]'
def sanitize(value: str, value_type=None, tokens=None):
value = value.replace('{', '{{').replace('}', '}}')
if tokens:
for token in tokens:
value = value.replace(f'{{{token}}}', token)
try:
stringified = json.dumps(value, ensure_ascii=False)
except:
stringified = '"' + value + '"'
return stringified[1:len(stringified) - 1].replace("'", r"\'")
def create_entry(entry, entry_type: str) -> str:
p_type = to_python_type(entry_type)
if p_type == 'string':
quote = '"'
entry = entry.replace('\\', r'\\').replace('"', r'\"')
elif p_type == 'bool':
quote = ""
entry = bool(entry)
else:
quote = ""
return f'{quote}{entry}{quote}'
def to_python_type(type_: str) -> str:
if type_ == 'long':
return 'float'
elif type_ == 'char':
return 'string'
elif type_ == 'bool':
return 'bool'
else:
return type_
def generate_code(root):
lines = []
for token_name in root:
token = root[token_name]
if type(token) is SimpleRegex:
lines.append(SimpleRegexWriter(token_name, token.def_))
elif type(token) is NestedRegex:
lines.append(NestedRegexWriter(token_name, token.def_, token.references))
elif type(token) is ParamsRegex:
lines.append(ParamsRegexWriter(token_name, token.def_, token.params))
elif type(token) is Dictionary:
lines.append(DictionaryWriter(token_name, token.key_type, token.value_type, token.entries))
elif type(token) is List:
lines.append(ArrayWriter(token_name, token.type_, token.entries))
elif type(token) is list:
inferred_type = 'string'
lines.append(ArrayWriter(token_name, inferred_type, token))
elif type(token) is bool:
lines.append(BooleanWriter(token_name, token))
else:
lines.append(DefaultWriter(token_name, str(token)))
return lines
|
the-stack_106_21261
|
# Code from Chapter 15 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
# The Box-Muller algorithm for constructing pseudo-random Gaussian-distributed numbers
import pylab as pl
import numpy as np
def boxmuller(n):
x = np.zeros((n, 2))
y = np.zeros((n, 2))
for i in range(n):
x[i, :] = np.array([2, 2])
x2 = x[i, 0] * x[i, 0] + x[i, 1] * x[i, 1]
while (x2) > 1:
x[i, :] = np.random.rand(2) * 2 - 1
x2 = x[i, 0] * x[i, 0] + x[i, 1] * x[i, 1]
y[i, :] = x[i, :] * np.sqrt((-2 * np.log(x2)) / x2)
y = np.reshape(y, 2 * n, 1)
return y
y = boxmuller(1000)
pl.hist(y, normed=1, fc='k')
x = np.arange(-4, 4, 0.1)
pl.plot(x, 1 / np.sqrt(2 * np.pi) * np.exp(-0.5 * x ** 2), 'k', lw=6)
pl.xlabel('x', fontsize=24)
pl.ylabel('p(x)', fontsize=24)
pl.show()
|
the-stack_106_21265
|
from itertools import repeat, zip_longest
from typing import Iterator, Iterable
import tensorflow as tf
K = tf.keras
DISCARD_REMAINDER = 'DISCARD_REMAINDER'
def next_n(it: Iterator, n: int):
return list(map(next, repeat(it, n)))
def longest_grouper(iterable: Iterable, group_size: int, fillvalue=None):
"""
Collect data into fixed-length chunks or blocks, filling `fillvalue` for when the shorter iterables stop
:param iterable:
:param group_size:
:param fillvalue:
:return:
>>> list(longest_grouper('ABCDEFG', 3, 'x'))
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
"""
#
args = [iter(iterable)] * group_size
return zip_longest(*args, fillvalue=fillvalue)
def shortest_grouper(iterable: Iterable, group_size: int):
"""
Collect data into fixed-length chunks or blocks, stopping with the shortest iterable
:param iterable:
:param group_size:
:return:
>>> list(shortest_grouper('ABCDEFG', 3))
[('A', 'B', 'C'), ('D', 'E', 'F')]
"""
return zip(*[iter(iterable)] * group_size)
def grouper(iterable: Iterable, group_size: int, fill_value=DISCARD_REMAINDER):
if fill_value == DISCARD_REMAINDER:
return shortest_grouper(iterable, group_size)
else:
return longest_grouper(iterable, group_size, fill_value)
def with_device(device, op):
with device:
return op()
to_keras_lambda = lambda fn, name, *args: K.layers.Lambda(lambda args: fn(*args), name=name)(args)
index_by = lambda tensor, indices, axis=-1: tf.reduce_sum(
tensor * tf.one_hot(tf.cast(indices, tf.int32), tf.shape(tensor)[axis]), axis)
def extend_model(model, extension):
if isinstance(extension, list):
extension = K.Sequential([
K.layers.InputLayer(),
*extension
])
return K.Model(inputs=model.input, outputs=extension(model.output))
|
the-stack_106_21268
|
import setuptools
with open("README.md") as fp:
long_description = fp.read()
setuptools.setup(
name="cdk_aws_cookbook_206",
version="0.0.1",
description="An empty CDK Python app",
long_description=long_description,
long_description_content_type="text/markdown",
author="author",
package_dir={"": "cdk_aws_cookbook_206"},
packages=setuptools.find_packages(where="cdk_aws_cookbook_206"),
install_requires=[
"aws-cdk-lib>=2.0.0rc1",
"constructs>=10.0.0",
],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Code Generators",
"Topic :: Utilities",
"Typing :: Typed",
],
)
|
the-stack_106_21270
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Create a base docker image for building pyflann
"""
from __future__ import absolute_import, division, print_function
import os
from os.path import join
import ubelt as ub
def main():
# TODO: find a better place for root
ROOT = join(os.getcwd())
# ROOT = '.'
os.chdir(ROOT)
NAME = 'pyhesaff'
VERSION = '0.1.2'
DOCKER_TAG = '{}-{}'.format(NAME, VERSION)
QUAY_REPO = 'quay.io/erotemic/manylinux-for'
DOCKER_URI = '{QUAY_REPO}:{DOCKER_TAG}'.format(**locals())
dockerfile_fpath = join(ROOT, 'Dockerfile')
# This docker code is very specific for building linux binaries.
# We will need to do a bit of refactoring to handle OSX and windows.
# But the goal is to get at least one OS working end-to-end.
"""
Notes:
docker run --rm -it quay.io/pypa/manylinux2010_x86_64 /bin/bash
---
ls /opt/python
"""
BASE_IMAGE = 'quay.io/pypa/manylinux2010_x86_64'
docker_code = ub.codeblock(
f'''
FROM {BASE_IMAGE}
RUN yum install lz4-devel -y
RUN MB_PYTHON_TAG=cp27-cp27m && \
/opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
/opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
source ./venv-$MB_PYTHON_TAG/bin/activate && \
pip install scikit-build cmake ninja
RUN MB_PYTHON_TAG=cp27-cp27mu && \
/opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
/opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
source ./venv-$MB_PYTHON_TAG/bin/activate && \
pip install scikit-build cmake ninja
RUN MB_PYTHON_TAG=cp35-cp35m && \
/opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
/opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
source ./venv-$MB_PYTHON_TAG/bin/activate && \
pip install scikit-build cmake ninja
RUN MB_PYTHON_TAG=cp36-cp36m && \
/opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
/opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
source ./venv-$MB_PYTHON_TAG/bin/activate && \
pip install scikit-build cmake ninja
RUN MB_PYTHON_TAG=cp37-cp37m && \
/opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
/opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
source ./venv-$MB_PYTHON_TAG/bin/activate && \
pip install scikit-build cmake ninja
RUN MB_PYTHON_TAG=cp38-cp38 && \
/opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \
/opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \
source ./venv-$MB_PYTHON_TAG/bin/activate && \
pip install scikit-build cmake ninja
'''
)
docker_code2 = '\n\n'.join([ub.paragraph(p) for p in docker_code.split('\n\n')])
try:
print(ub.color_text('\n--- DOCKER CODE ---', 'white'))
print(ub.highlight_code(docker_code2, 'docker'))
print(ub.color_text('--- END DOCKER CODE ---\n', 'white'))
except Exception:
pass
with open(dockerfile_fpath, 'w') as file:
file.write(docker_code2)
docker_build_cli = ' '.join(
[
'docker',
'build',
'--tag {}'.format(DOCKER_TAG),
'-f {}'.format(dockerfile_fpath),
'.',
]
)
print('docker_build_cli = {!r}'.format(docker_build_cli))
if ub.argflag('--dry'):
print('DRY RUN')
print('WOULD RUN')
print(docker_build_cli)
else:
info = ub.cmd(docker_build_cli, verbose=3, shell=True)
if info['ret'] != 0:
print(ub.color_text('\n--- FAILURE ---', 'red'))
print('Failed command:')
print(info['command'])
print(info['err'])
print('NOTE: sometimes reruning the command manually works')
raise Exception(
'Building docker failed with exit code {}'.format(info['ret'])
)
else:
print(ub.color_text('\n--- SUCCESS ---', 'green'))
print(
ub.highlight_code(
ub.codeblock(
r'''
# Finished creating the docker image.
# To test / export / publish you can do something like this:
# Test that we can get a bash terminal
docker run -it {DOCKER_TAG} /bin/bash
# Create a tag for the docker image
docker tag {DOCKER_TAG} {DOCKER_URI}
# Export your docker image to a file
docker save -o ${ROOT}/{DOCKER_TAG}.docker.tar {DOCKER_TAG}
# Login to a docker registry (we are using quay)
# In some cases this works,
docker login
# But you may need to specify secret credentials
load_secrets
echo "QUAY_USERNAME = $QUAY_USERNAME"
docker login -u $QUAY_USERNAME -p $QUAY_PASSWORD quay.io
unload_secrets
# Upload the docker image to quay.io
docker push {DOCKER_URI}
'''
).format(NAME=NAME, ROOT=ROOT, DOCKER_TAG=DOCKER_TAG, DOCKER_URI=DOCKER_URI,),
'bash',
)
)
PUBLISH = 0
if PUBLISH:
cmd1 = 'docker tag {DOCKER_TAG} {DOCKER_URI}'.format(**locals())
cmd2 = 'docker push {DOCKER_URI}'.format(**locals())
print('-- <push cmds> ---')
print(cmd1)
print(cmd2)
print('-- </push cmds> ---')
if __name__ == '__main__':
"""
CommandLine:
python ~/code/flann/dev/docker/make_base_image.py --dry
python ~/code/flann/dev/docker/make_base_image.py
"""
main()
|
the-stack_106_21271
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class NotificationTemplateParameter(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
NotificationTemplateParameter - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'text': 'str'
}
self.attribute_map = {
'name': 'name',
'text': 'text'
}
self._name = None
self._text = None
@property
def name(self):
"""
Gets the name of this NotificationTemplateParameter.
Parameter name
:return: The name of this NotificationTemplateParameter.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this NotificationTemplateParameter.
Parameter name
:param name: The name of this NotificationTemplateParameter.
:type: str
"""
self._name = name
@property
def text(self):
"""
Gets the text of this NotificationTemplateParameter.
Parameter text value
:return: The text of this NotificationTemplateParameter.
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""
Sets the text of this NotificationTemplateParameter.
Parameter text value
:param text: The text of this NotificationTemplateParameter.
:type: str
"""
self._text = text
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_106_21272
|
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
HORIZONTAL MARGIN
Horizontal box margin.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2021 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
__all__ = ['HMargin']
import pygame
from pygame_menu.widgets.widget.none import NoneWidget
from pygame_menu._types import NumberType
# noinspection PyMissingOrEmptyDocstring
class HMargin(NoneWidget):
"""
Horizontal margin widget.
.. note::
HMargin does not accept any transformation.
:param margin: Horizontal margin in px
:param widget_id: ID of the widget
"""
def __init__(
self,
margin: NumberType,
widget_id: str = ''
) -> None:
super(HMargin, self).__init__(widget_id=widget_id)
self._rect.width = int(margin)
self._rect.height = 0
def get_rect(self, *args, **kwargs) -> 'pygame.Rect':
return self._rect.copy()
|
the-stack_106_21273
|
#!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
# - Task Assistance
#
# Author: Zhuo Chen <[email protected]>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from base64 import b64encode
import cv2
import numpy as np
from gabriel_lego.cv import zhuocv3 as zc
from gabriel_lego.lego_engine import config
def bitmap2syn_img(bitmap):
'''
Convert a bitmap to colored single-pixel-per-brick image
'''
palette = np.array(
[[128, 128, 128], [255, 255, 255], [0, 255, 0], [0, 255, 255],
[0, 0, 255], [255, 0, 0], [0, 0, 0], [255, 0, 255]], dtype=np.uint8)
img_syn = palette[bitmap]
# img_syn = cv2.resize(img_syn, (150,150), interpolation =
# cv2.INTER_NEAREST)
# img_syn_large = np.zeros([img_syn.shape[0] + 10, img_syn.shape[1] + 10,
# img_syn.shape[2]])
# img_syn_large[5:-5, 5:-5, :] = img_syn
return img_syn
def bitmap2guidance_img(bitmap, diff_piece, action, max_height=100,
max_width=100):
'''
Generate single image guidance based on the target bitmap and operating
piece (a piece that has been added/removed/moved)
Marks the operating piece using coloed boxes if it's add/remove operation
'''
# get dimentions in "lego bricks"
brick_h, brick_w = bitmap.shape
img_syn = bitmap2syn_img(bitmap)
scale1 = max_height // img_syn.shape[0]
scale2 = max_width // img_syn.shape[1]
scale = min(scale1, scale2)
scaled_w = img_syn.shape[1] * scale
scaled_h = img_syn.shape[0] * scale
img_guidance = cv2.resize(img_syn, (scaled_w, scaled_h),
interpolation=cv2.INTER_NEAREST)
# calculate anchors for guidelines, then draw them
col_anchors = np.rint(np.linspace(0, scaled_w, brick_w + 1))
row_anchors = np.rint(np.linspace(0, scaled_h, brick_h + 1))
for col in map(lambda x: int(x), col_anchors):
cv2.line(img_guidance, (col, 0), (col, scaled_h), (128, 128, 128, 1))
for row in map(lambda x: int(x), row_anchors):
cv2.line(img_guidance, (0, row), (scaled_w, row), (128, 128, 128, 1))
## highlight the new piece(s)
if diff_piece is not None:
row_idx, col_idx_start, col_idx_end, _, _ = diff_piece
row_idx_start = row_idx * scale
row_idx_end = row_idx_start + scale - 1
col_idx_start = col_idx_start * scale
col_idx_end = (col_idx_end + 1) * scale - 1
if action == config.ACTION_ADD:
cv2.line(img_guidance, (col_idx_start, row_idx_start),
(col_idx_start, row_idx_end), (255, 0, 255), 2)
cv2.line(img_guidance, (col_idx_end, row_idx_start),
(col_idx_end, row_idx_end), (255, 0, 255), 2)
cv2.line(img_guidance, (col_idx_start, row_idx_start),
(col_idx_end, row_idx_start), (255, 0, 255), 2)
cv2.line(img_guidance, (col_idx_start, row_idx_end),
(col_idx_end, row_idx_end), (255, 0, 255), 2)
elif action == config.ACTION_REMOVE:
# previous implementation drew a yellow outline, but I prefer a
# magenta cross to indicate removal (it's much more easy to see)
cv2.line(img_guidance,
(col_idx_start, row_idx_start),
(col_idx_end, row_idx_end),
(255, 0, 255), 2)
cv2.line(img_guidance,
(col_idx_start, row_idx_end),
(col_idx_end, row_idx_start),
(255, 0, 255), 2)
# cv2.line(img_guidance, (col_idx_start, row_idx_start),
# (col_idx_start, row_idx_end), (0, 255, 255), 2)
# cv2.line(img_guidance, (col_idx_end, row_idx_start),
# (col_idx_end, row_idx_end), (0, 255, 255), 2)
# cv2.line(img_guidance, (col_idx_start, row_idx_start),
# (col_idx_end, row_idx_start), (0, 255, 255), 2)
# cv2.line(img_guidance, (col_idx_start, row_idx_end),
# (col_idx_end, row_idx_end), (0, 255, 255), 2)
return img_guidance
def bitmap2guidance_animation(bitmap, action, diff_piece=None, diff_piece2=None,
max_height=200, max_width=200):
def enlarge_and_shift(img, row_idx, col_idx_start, col_idx_end, direction,
ratio):
height = img.shape[0]
width = img.shape[1]
shift_color = img[row_idx, col_idx_start, :]
scale1 = float(max_width) / width
scale2 = float(max_height) / height
scale = min(scale1, scale2)
width_large = int(width * scale)
height_large = int(height * scale)
img_large = np.ones((max_height, max_width, img.shape[2]),
dtype=np.uint8) * 128
img_stuff = img_large[(max_height - height_large) / 2: (
max_height - height_large) / 2 + height_large,
(max_width - width_large) / 2: (
max_width -
width_large) /
2 + width_large]
img_resized = cv2.resize(img, (width_large, height_large),
interpolation=cv2.INTER_NEAREST)
img_stuff[:, :, :] = img_resized # this is like copyTo in c++
if direction == config.DIRECTION_UP:
img_stuff[int(row_idx * scale): int((row_idx + 1) * scale),
int(col_idx_start * scale): int((col_idx_end + 1) * scale), :] = [
128, 128, 128]
img_stuff[
int((row_idx - ratio) * scale): int((row_idx + 1 - ratio) * scale),
int(col_idx_start * scale): int((col_idx_end + 1) * scale),
:] = shift_color
elif direction == config.DIRECTION_DOWN:
img_stuff[int(row_idx * scale): int((row_idx + 1) * scale),
int(col_idx_start * scale): int((col_idx_end + 1) * scale), :] = [
128, 128, 128]
img_stuff[
int((row_idx + ratio) * scale): int((row_idx + 1 + ratio) * scale),
int(col_idx_start * scale): int((col_idx_end + 1) * scale),
:] = shift_color
return img_large
def encode_images(img_animation):
img_animation_ret = []
for cv_img, duration in img_animation:
img_animation_ret.append(
(b64encode(zc.cv_image2raw(cv_img)), duration))
return img_animation_ret
img_animation = []
if diff_piece is not None:
row_idx, col_idx_start, col_idx_end, direction, label = diff_piece
if diff_piece2 is not None:
row_idx2, col_idx_start2, col_idx_end2, direction2, label2 = diff_piece2
if diff_piece is not None:
height = bitmap.shape[0]
width = bitmap.shape[1]
if (
row_idx == 0 or row_idx == height - 1) and direction != \
config.DIRECTION_NONE:
bitmap_new = np.zeros((bitmap.shape[0] + 1, bitmap.shape[1]),
dtype=int)
if row_idx == 0:
bitmap_new[1:, :] = bitmap
row_idx += 1
diff_piece = shift_piece(diff_piece, (1, 0))
if diff_piece2 is not None:
row_idx2 += 1
diff_piece2 = shift_piece(diff_piece2, (1, 0))
else:
bitmap_new[:-1, :] = bitmap
bitmap = bitmap_new
if diff_piece2 is not None:
height = bitmap.shape[0]
width = bitmap.shape[1]
if (
row_idx2 == 0 or row_idx2 == height - 1) and direction2 != \
config.DIRECTION_NONE:
bitmap_new = np.ones(
(bitmap.shape[0] + 1, bitmap.shape[1], bitmap.shape[2]),
dtype=np.uint8) * 128
if row_idx2 == 0:
bitmap_new[1:, :] = bitmap
row_idx += 1
diff_piece = shift_piece(diff_piece, (1, 0))
row_idx2 += 1
diff_piece2 = shift_piece(diff_piece2, (1, 0))
else:
bitmap_new[:-1, :] = bitmap
bitmap = bitmap_new
AUTM = 800 # animation_update_time_min
if action == config.ACTION_ADD:
img_show = bitmap2syn_img(bitmap)
img_animation.append((
enlarge_and_shift(img_show, row_idx, col_idx_start,
col_idx_end, direction, 1),
AUTM))
img_animation.append((
enlarge_and_shift(img_show, row_idx, col_idx_start,
col_idx_end, direction, 0.5),
AUTM))
img_animation.append((
enlarge_and_shift(img_show, row_idx, col_idx_start,
col_idx_end, direction, 0),
3 * AUTM))
elif action == config.ACTION_REMOVE:
img_show = bitmap2syn_img(bitmap)
img_animation.append((
enlarge_and_shift(img_show, row_idx, col_idx_start,
col_idx_end, direction, 0),
AUTM))
img_animation.append((
enlarge_and_shift(img_show, row_idx, col_idx_start,
col_idx_end, direction, 0.5),
AUTM))
img_animation.append((
enlarge_and_shift(img_show, row_idx, col_idx_start,
col_idx_end, direction, 1),
3 * AUTM))
elif action == config.ACTION_MOVE:
bitmap_tmp = bitmap.copy()
bitmap_tmp = remove_piece(bitmap_tmp, diff_piece2, do_shrink=False)
bitmap_tmp = add_piece(bitmap_tmp, diff_piece)
img_show = bitmap2syn_img(bitmap_tmp)
img_animation.append((
enlarge_and_shift(img_show, row_idx, col_idx_start,
col_idx_end, direction, 0),
AUTM))
img_animation.append((
enlarge_and_shift(img_show, row_idx, col_idx_start,
col_idx_end, direction, 0.25),
AUTM))
img_animation.append((
enlarge_and_shift(img_show, row_idx, col_idx_start,
col_idx_end, direction, 0.5),
AUTM))
bitmap_tmp = bitmap.copy()
bitmap_tmp = remove_piece(bitmap_tmp, diff_piece, do_shrink=False)
bitmap_tmp = add_piece(bitmap_tmp, diff_piece2)
img_show = bitmap2syn_img(bitmap_tmp)
img_animation.append((enlarge_and_shift(img_show, row_idx2,
col_idx_start2, col_idx_end2,
direction2, 0.5), AUTM))
img_animation.append((enlarge_and_shift(img_show, row_idx2,
col_idx_start2, col_idx_end2,
direction2, 0.25), AUTM))
img_animation.append((enlarge_and_shift(img_show, row_idx2,
col_idx_start2, col_idx_end2,
direction2, 0), 3 * AUTM))
else:
img_show = bitmap2syn_img(bitmap)
img_animation.append(
(enlarge_and_shift(img_show, 0, 0, 0, 0, 0), 5 * AUTM))
return encode_images(img_animation)
def get_piece_position(bm, piece):
row_idx, col_idx_start, col_idx_end, _, label = piece
is_top = row_idx == 0 or not np.any(
bm[row_idx - 1, col_idx_start: col_idx_end + 1])
is_bottom = row_idx == bm.shape[0] - 1 or not np.any(
bm[row_idx + 1, col_idx_start: col_idx_end + 1])
is_left = col_idx_start == 0 or not np.any(bm[row_idx, 0: col_idx_start])
is_right = col_idx_end == bm.shape[1] - 1 or not np.any(
bm[row_idx, col_idx_end + 1:])
position = None
if is_top:
if is_left and not is_right:
position = "top left"
elif is_right and not is_left:
position = "top right"
else:
position = "top"
elif is_bottom:
if is_left and not is_right:
position = "bottom left"
elif is_right and not is_left:
position = "bottom right"
else:
position = "bottom"
return position
def generate_message(bm_old, bm_new, action, diff_piece, diff_piece2=None,
step_time=0, good_word_idx=0):
row_idx, col_idx_start, col_idx_end, _, label = diff_piece
if action == config.ACTION_ADD:
message = "Now find a 1x%d %s piece and add it to " % (
(col_idx_end - col_idx_start + 1), config.COLOR_ORDER[label])
position = get_piece_position(bm_new, diff_piece)
if position is not None:
message += "the %s of the current model." % position
else:
message += "the current model."
p = 0.2
if step_time > 10: # magic number
p = 0.8
if random.random() < p:
message = config.GOOD_WORDS[good_word_idx] + message
elif action == config.ACTION_REMOVE:
message = "Remove the 1x%d %s piece from " % ((col_idx_end -
col_idx_start + 1),
config.COLOR_ORDER[label])
position = get_piece_position(bm_old, diff_piece)
if position is not None:
message += "the %s of the current model." % position
else:
message += "the current model."
elif action == config.ACTION_MOVE:
row_idx2, col_idx_start2, col_idx_end2, _, _ = diff_piece2
if row_idx == row_idx2:
if col_idx_start < col_idx_start2:
if (col_idx_start2 <= col_idx_end + 1 or np.all(bm_old[row_idx,
col_idx_end +
1:
col_idx_start2] == 0)) and \
col_idx_start2 - col_idx_start <= 3:
message = "Now slightly move the 1x%d %s piece to the " \
"right by %d brick size." % (
(col_idx_end - col_idx_start + 1),
config.COLOR_ORDER[label],
col_idx_start2 - col_idx_start)
if random.random() < 0.5:
message = "You are quite close. " + message
else:
message = "This is incorrect. The 1x%d %s piece should be " \
"" \
"placed more to the right." % (
(col_idx_end - col_idx_start + 1),
config.COLOR_ORDER[label])
else:
if (col_idx_start <= col_idx_end2 + 1 or np.all(bm_old[row_idx,
col_idx_end2
+ 1:
col_idx_start] == 0)) and \
col_idx_start - col_idx_start2 <= 3:
message = "Now slightly move the 1x%d %s piece to the " \
"left by %d brick size." % (
(col_idx_end - col_idx_start + 1),
config.COLOR_ORDER[label],
col_idx_start - col_idx_start2)
if random.random() < 0.5:
message = "You are quite close. " + message
else:
message = "This is incorrect. The 1x%d %s piece should be " \
"" \
"placed more to the left." % (
(col_idx_end - col_idx_start + 1),
config.COLOR_ORDER[label])
else:
message = "Now move the 1x%d %s piece " % (
(col_idx_end - col_idx_start + 1), config.COLOR_ORDER[label])
position = get_piece_position(bm_old, diff_piece)
position2 = get_piece_position(bm_new, diff_piece2)
if position is None or position2 is None: # shouldn't happen
message += "as shown on the screen"
elif position[0] == position2[0]: # remain on the top or bottom
message += "to the %s of the current model." % position2
else:
message += "from the %s to the %s of the current model." % (
position, position2)
return message
def get_piece_direction(bm, piece):
row_idx, col_idx_start, col_idx_end = piece
direction = config.DIRECTION_NONE
if row_idx == 0 or np.all(
bm[row_idx - 1, col_idx_start: col_idx_end + 1] == 0):
direction = config.DIRECTION_UP
elif row_idx == bm.shape[0] - 1 or np.all(
bm[row_idx + 1, col_idx_start: col_idx_end + 1] == 0):
direction = config.DIRECTION_DOWN
return direction
def bitmap_same(bm1, bm2):
'''
Detect if two bitmaps @bm1 and @bm2 are exactly the same
Return True if yes, False if no
'''
return np.array_equal(bm1, bm2)
def bitmap_more_equalsize(bm1, bm2):
'''
Detect the difference of bitmaps @bm1 and @bm2 which are of the same size
(and aligned right)
Only consider the case where bm2 has more pieces than bm1
Returns None if cannot find the more pieces
'''
shape = bm1.shape
if shape != bm2.shape:
return None
bm_diff = np.not_equal(bm1, bm2)
if not np.all(
bm1[bm_diff] == 0): # can only be the case that bm2 has more pieces
return None
# initialize
bm_more = None
bm_more_pieces = np.zeros(shape, dtype=int)
bm_more_labels = np.zeros(shape, dtype=int)
# now start...
i = 0
j = 0
n_diff_pieces = 0
while i < shape[0]:
if not bm_diff[i, j]:
j += 1
if j == shape[1]:
i += 1
j = 0
continue
n_diff_pieces += 1
current_label = bm2[i, j]
while j < shape[1] and bm2[i, j] == current_label and bm_diff[i, j]:
bm_more_pieces[i, j] = n_diff_pieces
bm_more_labels[i, j] = current_label
j += 1
if j == shape[1]:
i += 1
j = 0
bm_more = {'pieces' : bm_more_pieces,
'labels' : bm_more_labels,
'n_diff_pieces': n_diff_pieces}
# some info about the first piece
if n_diff_pieces >= 1:
row_idxs, col_idxs = np.where(bm_more['pieces'] == 1)
row_idx = row_idxs[0]
col_idx_start = col_idxs.min()
col_idx_end = col_idxs.max()
direction = get_piece_direction(bm2,
(row_idx, col_idx_start, col_idx_end))
bm_more['first_piece'] = [row_idx, col_idx_start, col_idx_end,
direction,
bm_more['labels'][row_idx, col_idx_start]]
else:
bm_more['first_piece'] = None
return bm_more
def bitmap_more(bm1, bm2):
'''
Assuming bitmap @bm2 has more pieces than bitmap @bm1, try to detect it
Returns None if cannot find the more pieces
'''
shape1 = bm1.shape
shape2 = bm2.shape
if shape1[0] > shape2[0] or shape1[1] > shape2[1]:
return None
for row_shift in range(shape2[0] - shape1[0] + 1):
for col_shift in range(shape2[1] - shape1[1] + 1):
bm1_large = shift_bitmap(bm1, (row_shift, col_shift), shape2)
bm_more = bitmap_more_equalsize(bm1_large, bm2)
if bm_more is not None:
bm_more['shift'] = (row_shift, col_shift)
return bm_more
return None
def bitmap_diff(bm1, bm2):
'''
Detect how the two bitmaps @bm1 and @bm2 differ
Currently can only detect the difference if one bitmap is strictly larger
than the other one
Returns @bm_diff = {
'pieces' : an array with size equal to the bitmap showing which parts are
new
'labels' : an array with size equal to the bitmap showing what the new
parts are
'n_diff_pieces' : an integer. the number of new pieces
'first_piece' : info about the first new piece in format [row_idx,
col_idx_start, col_idx_end, direction, label]
direction = 0 (in the middle of some pieces), 1 (on top) or 2 (on the
bottom)
This field is None if bm1 equals bm2
'shift' : number of rows and columns the smaller bitmaps to shift to best
match the big one
'larger' : an integer of either 1 or 2. 1 means bm2 is part of bm1,
and vice versa
'''
# if arrays are the same return None
if np.array_equal(bm1, bm2):
return None
# case 1: bm2 has one more piece
bm_diff = bitmap_more(bm1, bm2)
if bm_diff is not None:
bm_diff['larger'] = 2
return bm_diff
# case 2: bm1 has one more piece
bm_diff = bitmap_more(bm2, bm1)
if bm_diff is not None:
bm_diff['larger'] = 1
return bm_diff
return None
def shift_bitmap(bm, shift, final_shape):
shape = bm.shape
bm_shift = np.zeros(final_shape, dtype=int)
bm_shift[shift[0]: shift[0] + shape[0], shift[1]: shift[1] + shape[1]] = bm
return bm_shift
def shrink_bitmap(bm):
'''
Remove the all zero lines at the four sides of the bitmap
'''
shape = bm.shape
i_start = 0
while i_start <= shape[0] - 1 and np.all(bm[i_start, :] == 0):
i_start += 1
i_end = shape[0] - 1
while i_end >= 0 and np.all(bm[i_end, :] == 0):
i_end -= 1
j_start = 0
while j_start <= shape[1] - 1 and np.all(bm[:, j_start] == 0):
j_start += 1
j_end = shape[1] - 1
while j_end >= 0 and np.all(bm[:, j_end] == 0):
j_end -= 1
return bm[i_start: i_end + 1, j_start: j_end + 1]
def extend_piece(piece, bm):
'''
Given a piece and a bitmap, find if the piece can be part of a longer piece.
'''
row_idx, col_idx_start, col_idx_end, direction, label = piece
# extend toward left
j = col_idx_start - 1
while j >= 0 and bm[row_idx, j] == label and get_piece_direction(bm, (
row_idx, j, col_idx_end)) != config.DIRECTION_NONE:
j -= 1
col_idx_start = j + 1
# extend toward right
j = col_idx_end + 1
while j <= bm.shape[1] - 1 and bm[
row_idx, j] == label and get_piece_direction(bm, (
row_idx, col_idx_start, j)) != config.DIRECTION_NONE:
j += 1
col_idx_end = j - 1
return (row_idx, col_idx_start, col_idx_end,
get_piece_direction(bm, (row_idx, col_idx_start, col_idx_end)),
label)
def add_piece(bm, piece):
row_idx, col_idx_start, col_idx_end, direction, label = piece
bm_ret = bm.copy()
for j in range(col_idx_start, col_idx_end + 1):
bm_ret[row_idx, j] = label
return bm_ret
def remove_piece(bm, piece, do_shrink=True):
row_idx, col_idx_start, col_idx_end, direction, label = piece
bm_ret = bm.copy()
for j in range(col_idx_start, col_idx_end + 1):
bm_ret[row_idx, j] = 0
if do_shrink:
bm_ret = shrink_bitmap(bm_ret)
return bm_ret
def piece_same(piece1, piece2):
row_idx1, col_idx_start1, col_idx_end1, direction1, label1 = piece1
row_idx2, col_idx_start2, col_idx_end2, direction2, label2 = piece2
return col_idx_start1 - col_idx_end1 == col_idx_start2 - col_idx_end2 and\
label1 == label2
def shift_piece(piece, shift):
row_idx, col_idx_start, col_idx_end, direction, label = piece
return (
row_idx + shift[0], col_idx_start + shift[1], col_idx_end + shift[1],
direction, label)
def equalize_size(bm1, bm2, common_shift1, common_shift2):
shift1 = [0, 0]
shift2 = [0, 0]
if common_shift1[0] > common_shift2[0]:
shift2[0] = common_shift1[0] - common_shift2[0]
else:
shift1[0] = common_shift2[0] - common_shift1[0]
if common_shift1[1] > common_shift2[1]:
shift2[1] = common_shift1[1] - common_shift2[1]
else:
shift1[1] = common_shift2[1] - common_shift1[1]
final_shape = (max(bm1.shape[0] + shift1[0], bm2.shape[0] + shift2[0]),
max(bm1.shape[1] + shift1[1], bm2.shape[1] + shift2[1]))
return (shift_bitmap(bm1, shift1, final_shape),
shift_bitmap(bm2, shift2, final_shape), shift1, shift2)
|
the-stack_106_21275
|
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
import pandas
import SimpleITK as sitk
import radiomics
from radiomics import featureextractor
def main():
outPath = r''
inputCSV = os.path.join(outPath, 'testCases.csv')
outputFilepath = os.path.join(outPath, 'radiomics_features.csv')
progress_filename = os.path.join(outPath, 'pyrad_log.txt')
params = os.path.join(outPath, 'exampleSettings', 'Params.yaml')
# Configure logging
rLogger = logging.getLogger('radiomics')
# Set logging level
# rLogger.setLevel(logging.INFO) # Not needed, default log level of logger is INFO
# Create handler for writing to log file
handler = logging.FileHandler(filename=progress_filename, mode='w')
handler.setFormatter(logging.Formatter('%(levelname)s:%(name)s: %(message)s'))
rLogger.addHandler(handler)
# Initialize logging for batch log messages
logger = rLogger.getChild('batch')
# Set verbosity level for output to stderr (default level = WARNING)
radiomics.setVerbosity(logging.INFO)
logger.info('pyradiomics version: %s', radiomics.__version__)
logger.info('Loading CSV')
# ####### Up to this point, this script is equal to the 'regular' batchprocessing script ########
try:
# Use pandas to read and transpose ('.T') the input data
# The transposition is needed so that each column represents one test case. This is easier for iteration over
# the input cases
flists = pandas.read_csv(inputCSV).T
except Exception:
logger.error('CSV READ FAILED', exc_info=True)
exit(-1)
logger.info('Loading Done')
logger.info('Patients: %d', len(flists.columns))
if os.path.isfile(params):
extractor = featureextractor.RadiomicsFeatureExtractor(params)
else: # Parameter file not found, use hardcoded settings instead
settings = {}
settings['binWidth'] = 25
settings['resampledPixelSpacing'] = None # [3,3,3]
settings['interpolator'] = sitk.sitkBSpline
settings['enableCExtensions'] = True
extractor = featureextractor.RadiomicsFeatureExtractor(**settings)
# extractor.enableInputImages(wavelet= {'level': 2})
logger.info('Enabled input images types: %s', extractor.enabledImagetypes)
logger.info('Enabled features: %s', extractor.enabledFeatures)
logger.info('Current settings: %s', extractor.settings)
# Instantiate a pandas data frame to hold the results of all patients
results = pandas.DataFrame()
for entry in flists: # Loop over all columns (i.e. the test cases)
logger.info("(%d/%d) Processing Patient (Image: %s, Mask: %s)",
entry + 1,
len(flists),
flists[entry]['Image'],
flists[entry]['Mask'])
imageFilepath = flists[entry]['Image']
maskFilepath = flists[entry]['Mask']
label = flists[entry].get('Label', None)
if str(label).isdigit():
label = int(label)
else:
label = None
if (imageFilepath is not None) and (maskFilepath is not None):
featureVector = flists[entry] # This is a pandas Series
featureVector['Image'] = os.path.basename(imageFilepath)
featureVector['Mask'] = os.path.basename(maskFilepath)
try:
# PyRadiomics returns the result as an ordered dictionary, which can be easily converted to a pandas Series
# The keys in the dictionary will be used as the index (labels for the rows), with the values of the features
# as the values in the rows.
result = pandas.Series(extractor.execute(imageFilepath, maskFilepath, label))
featureVector = featureVector.append(result)
except Exception:
logger.error('FEATURE EXTRACTION FAILED:', exc_info=True)
# To add the calculated features for this case to our data frame, the series must have a name (which will be the
# name of the column.
featureVector.name = entry
# By specifying an 'outer' join, all calculated features are added to the data frame, including those not
# calculated for previous cases. This also ensures we don't end up with an empty frame, as for the first patient
# it is 'joined' with the empty data frame.
results = results.join(featureVector, how='outer') # If feature extraction failed, results will be all NaN
logger.info('Extraction complete, writing CSV')
# .T transposes the data frame, so that each line will represent one patient, with the extracted features as columns
results.T.to_csv(outputFilepath, index=False, na_rep='NaN')
logger.info('CSV writing complete')
if __name__ == '__main__':
main()
|
the-stack_106_21276
|
"""Test time schema implementation."""
import numpy as np
import pandas as pd
import pytest
from asdf import ValidationError
from weldx.asdf.util import _write_buffer, _write_read_buffer
@pytest.mark.parametrize(
"inputs",
[
pd.Timedelta("5m3ns"),
pd.Timedelta("106751 days 23:47:16.854775"),
pd.timedelta_range(start="-5s", end="25s", freq="3s"),
pd.TimedeltaIndex([0, 1e9, 5e9, 3e9]),
pd.Timestamp("2020-04-15T16:47:00.000000001"),
pd.Timestamp("2020-04-15T16:47:00.000000001", tz="Europe/Berlin"),
pd.date_range(start="2020-01-01", periods=5, freq="1D"),
pd.date_range(start="2020-01-01", periods=5, freq="1D", tz="Europe/Berlin"),
pd.DatetimeIndex(["2020-01-01", "2020-01-02", "2020-01-03", "2020-01-04"]),
pd.DatetimeIndex(["2020-01-01", "2020-01-02", "2020-01-04", "2020-01-05"]),
],
)
def test_time_classes(inputs):
data = _write_read_buffer({"root": inputs})
assert np.all(data["root"] == inputs)
def test_time_classes_max_inline():
with pytest.raises(ValidationError):
# cannot store large ints >52 bits inline in asdf
dti = pd.DatetimeIndex(["2020-01-01", "2020-01-02", "2020-01-04", "2020-01-05"])
_write_buffer(
{"root": dti},
write_kwargs={"all_array_storage": "inline"},
)
|
the-stack_106_21277
|
"""Print a summary of specialization stats for all files in the
default stats folders.
"""
import collections
import os.path
import opcode
from datetime import date
import itertools
import argparse
import sys
if os.name == "nt":
DEFAULT_DIR = "c:\\temp\\py_stats\\"
else:
DEFAULT_DIR = "/tmp/py_stats/"
#Create list of all instruction names
specialized = iter(opcode._specialized_instructions)
opname = ["<0>"]
for name in opcode.opname[1:]:
if name.startswith("<"):
try:
name = next(specialized)
except StopIteration:
pass
opname.append(name)
# opcode_name --> opcode
# Sort alphabetically.
opmap = {name: i for i, name in enumerate(opname)}
opmap = dict(sorted(opmap.items()))
TOTAL = "specialization.deferred", "specialization.hit", "specialization.miss", "execution_count"
def print_specialization_stats(name, family_stats, defines):
if "specializable" not in family_stats:
return
total = sum(family_stats.get(kind, 0) for kind in TOTAL)
if total == 0:
return
with Section(name, 3, f"specialization stats for {name} family"):
rows = []
for key in sorted(family_stats):
if key.startswith("specialization.failure_kinds"):
continue
if key in ("specialization.hit", "specialization.miss"):
label = key[len("specialization."):]
elif key == "execution_count":
label = "unquickened"
elif key in ("specialization.success", "specialization.failure", "specializable"):
continue
elif key.startswith("pair"):
continue
else:
label = key
rows.append((f"{label:>12}", f"{family_stats[key]:>12}", f"{100*family_stats[key]/total:0.1f}%"))
emit_table(("Kind", "Count", "Ratio"), rows)
print_title("Specialization attempts", 4)
total_attempts = 0
for key in ("specialization.success", "specialization.failure"):
total_attempts += family_stats.get(key, 0)
rows = []
for key in ("specialization.success", "specialization.failure"):
label = key[len("specialization."):]
label = label[0].upper() + label[1:]
val = family_stats.get(key, 0)
rows.append((label, val, f"{100*val/total_attempts:0.1f}%"))
emit_table(("", "Count:", "Ratio:"), rows)
total_failures = family_stats.get("specialization.failure", 0)
failure_kinds = [ 0 ] * 30
for key in family_stats:
if not key.startswith("specialization.failure_kind"):
continue
_, index = key[:-1].split("[")
index = int(index)
failure_kinds[index] = family_stats[key]
failures = [(value, index) for (index, value) in enumerate(failure_kinds)]
failures.sort(reverse=True)
rows = []
for value, index in failures:
if not value:
continue
rows.append((kind_to_text(index, defines, name), value, f"{100*value/total_failures:0.1f}%"))
emit_table(("Failure kind", "Count:", "Ratio:"), rows)
def gather_stats():
stats = collections.Counter()
for filename in os.listdir(DEFAULT_DIR):
with open(os.path.join(DEFAULT_DIR, filename)) as fd:
for line in fd:
try:
key, value = line.split(":")
except ValueError:
print (f"Unparsable line: '{line.strip()}' in {filename}", file=sys.stderr)
continue
key = key.strip()
value = int(value)
stats[key] += value
return stats
def extract_opcode_stats(stats):
opcode_stats = [ {} for _ in range(256) ]
for key, value in stats.items():
if not key.startswith("opcode"):
continue
n, _, rest = key[7:].partition("]")
opcode_stats[int(n)][rest.strip(".")] = value
return opcode_stats
def parse_kinds(spec_src, prefix="SPEC_FAIL"):
defines = collections.defaultdict(list)
start = "#define " + prefix + "_"
for line in spec_src:
line = line.strip()
if not line.startswith(start):
continue
line = line[len(start):]
name, val = line.split()
defines[int(val.strip())].append(name.strip())
return defines
def pretty(defname):
return defname.replace("_", " ").lower()
def kind_to_text(kind, defines, opname):
if kind < 7:
return pretty(defines[kind][0])
if opname.endswith("ATTR"):
opname = "ATTR"
if opname.endswith("SUBSCR"):
opname = "SUBSCR"
for name in defines[kind]:
if name.startswith(opname):
return pretty(name[len(opname)+1:])
return "kind " + str(kind)
def categorized_counts(opcode_stats):
basic = 0
specialized = 0
not_specialized = 0
specialized_instructions = {
op for op in opcode._specialized_instructions
if "__" not in op and "ADAPTIVE" not in op}
adaptive_instructions = {
op for op in opcode._specialized_instructions
if "ADAPTIVE" in op}
for i, opcode_stat in enumerate(opcode_stats):
if "execution_count" not in opcode_stat:
continue
count = opcode_stat['execution_count']
name = opname[i]
if "specializable" in opcode_stat:
not_specialized += count
elif name in adaptive_instructions:
not_specialized += count
elif name in specialized_instructions:
miss = opcode_stat.get("specialization.miss", 0)
not_specialized += miss
specialized += count - miss
else:
basic += count
return basic, not_specialized, specialized
def print_title(name, level=2):
print("#"*level, name)
print()
class Section:
def __init__(self, title, level=2, summary=None):
self.title = title
self.level = level
if summary is None:
self.summary = title.lower()
else:
self.summary = summary
def __enter__(self):
print_title(self.title, self.level)
print("<details>")
print("<summary>", self.summary, "</summary>")
print()
return self
def __exit__(*args):
print()
print("</details>")
print()
def emit_table(header, rows):
width = len(header)
header_line = "|"
under_line = "|"
for item in header:
under = "---"
if item.endswith(":"):
item = item[:-1]
under += ":"
header_line += item + " | "
under_line += under + "|"
print(header_line)
print(under_line)
for row in rows:
if width is not None and len(row) != width:
raise ValueError("Wrong number of elements in row '" + str(rows) + "'")
print("|", " | ".join(str(i) for i in row), "|")
print()
def emit_execution_counts(opcode_stats, total):
with Section("Execution counts", summary="execution counts for all instructions"):
counts = []
for i, opcode_stat in enumerate(opcode_stats):
if "execution_count" in opcode_stat:
count = opcode_stat['execution_count']
miss = 0
if "specializable" not in opcode_stat:
miss = opcode_stat.get("specialization.miss")
counts.append((count, opname[i], miss))
counts.sort(reverse=True)
cumulative = 0
rows = []
for (count, name, miss) in counts:
cumulative += count
if miss:
miss = f"{100*miss/count:0.1f}%"
else:
miss = ""
rows.append((name, count, f"{100*count/total:0.1f}%",
f"{100*cumulative/total:0.1f}%", miss))
emit_table(
("Name", "Count:", "Self:", "Cumulative:", "Miss ratio:"),
rows
)
def emit_specialization_stats(opcode_stats):
spec_path = os.path.join(os.path.dirname(__file__), "../../Python/specialize.c")
with open(spec_path) as spec_src:
defines = parse_kinds(spec_src)
with Section("Specialization stats", summary="specialization stats by family"):
for i, opcode_stat in enumerate(opcode_stats):
name = opname[i]
print_specialization_stats(name, opcode_stat, defines)
def emit_specialization_overview(opcode_stats, total):
basic, not_specialized, specialized = categorized_counts(opcode_stats)
with Section("Specialization effectiveness"):
emit_table(("Instructions", "Count:", "Ratio:"), (
("Basic", basic, f"{basic*100/total:0.1f}%"),
("Not specialized", not_specialized, f"{not_specialized*100/total:0.1f}%"),
("Specialized", specialized, f"{specialized*100/total:0.1f}%"),
))
def emit_call_stats(stats):
stats_path = os.path.join(os.path.dirname(__file__), "../../Include/pystats.h")
with open(stats_path) as stats_src:
defines = parse_kinds(stats_src, prefix="EVAL_CALL")
with Section("Call stats", summary="Inlined calls and frame stats"):
total = 0
for key, value in stats.items():
if "Calls to" in key:
total += value
rows = []
for key, value in stats.items():
if "Calls to" in key:
rows.append((key, value, f"{100*value/total:0.1f}%"))
elif key.startswith("Calls "):
name, index = key[:-1].split("[")
index = int(index)
label = name + " (" + pretty(defines[index][0]) + ")"
rows.append((label, value, f"{100*value/total:0.1f}%"))
for key, value in stats.items():
if key.startswith("Frame"):
rows.append((key, value, f"{100*value/total:0.1f}%"))
emit_table(("", "Count:", "Ratio:"), rows)
def emit_object_stats(stats):
with Section("Object stats", summary="allocations, frees and dict materializatons"):
total_materializations = stats.get("Object new values")
total_allocations = stats.get("Object allocations")
total_increfs = stats.get("Object interpreter increfs") + stats.get("Object increfs")
total_decrefs = stats.get("Object interpreter decrefs") + stats.get("Object decrefs")
rows = []
for key, value in stats.items():
if key.startswith("Object"):
if "materialize" in key:
ratio = f"{100*value/total_materializations:0.1f}%"
elif "allocations" in key:
ratio = f"{100*value/total_allocations:0.1f}%"
elif "increfs" in key:
ratio = f"{100*value/total_increfs:0.1f}%"
elif "decrefs" in key:
ratio = f"{100*value/total_decrefs:0.1f}%"
else:
ratio = ""
label = key[6:].strip()
label = label[0].upper() + label[1:]
rows.append((label, value, ratio))
emit_table(("", "Count:", "Ratio:"), rows)
def get_total(opcode_stats):
total = 0
for opcode_stat in opcode_stats:
if "execution_count" in opcode_stat:
total += opcode_stat['execution_count']
return total
def emit_pair_counts(opcode_stats, total):
pair_counts = []
for i, opcode_stat in enumerate(opcode_stats):
if i == 0:
continue
for key, value in opcode_stat.items():
if key.startswith("pair_count"):
x, _, _ = key[11:].partition("]")
if value:
pair_counts.append((value, (i, int(x))))
with Section("Pair counts", summary="Pair counts for top 100 pairs"):
pair_counts.sort(reverse=True)
cumulative = 0
rows = []
for (count, pair) in itertools.islice(pair_counts, 100):
i, j = pair
cumulative += count
rows.append((opname[i] + " " + opname[j], count, f"{100*count/total:0.1f}%",
f"{100*cumulative/total:0.1f}%"))
emit_table(("Pair", "Count:", "Self:", "Cumulative:"),
rows
)
with Section("Predecessor/Successor Pairs", summary="Top 5 predecessors and successors of each opcode"):
predecessors = collections.defaultdict(collections.Counter)
successors = collections.defaultdict(collections.Counter)
total_predecessors = collections.Counter()
total_successors = collections.Counter()
for count, (first, second) in pair_counts:
if count:
predecessors[second][first] = count
successors[first][second] = count
total_predecessors[second] += count
total_successors[first] += count
for name, i in opmap.items():
total1 = total_predecessors[i]
total2 = total_successors[i]
if total1 == 0 and total2 == 0:
continue
pred_rows = succ_rows = ()
if total1:
pred_rows = [(opname[pred], count, f"{count/total1:.1%}")
for (pred, count) in predecessors[i].most_common(5)]
if total2:
succ_rows = [(opname[succ], count, f"{count/total2:.1%}")
for (succ, count) in successors[i].most_common(5)]
with Section(name, 3, f"Successors and predecessors for {name}"):
emit_table(("Predecessors", "Count:", "Percentage:"),
pred_rows
)
emit_table(("Successors", "Count:", "Percentage:"),
succ_rows
)
def main():
stats = gather_stats()
opcode_stats = extract_opcode_stats(stats)
total = get_total(opcode_stats)
emit_execution_counts(opcode_stats, total)
emit_pair_counts(opcode_stats, total)
emit_specialization_stats(opcode_stats)
emit_specialization_overview(opcode_stats, total)
emit_call_stats(stats)
emit_object_stats(stats)
print("---")
print("Stats gathered on:", date.today())
if __name__ == "__main__":
main()
|
the-stack_106_21279
|
"""
PRACTICE Exam 1, problem 0.
These problems illustrate concepts that previous problems have not emphasized:
-- determining whether a number is odd or even (Problem 0a)
-- returning True or False (Problem 0a)
-- is_prime (Problem 0b)
-- animation (Problem 0c)
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Montgomery Winslow.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
import testing_helper
import time
def main():
""" Calls the TEST functions in this module. """
run_test_problem0a()
run_test_problem0b()
run_test_problem0c()
###############################################################################
# DONE: 2. READ the green doc-string for the:
# - is_prime
# - sum_of_digits
# functions defined below. You do NOT need to understand their
# implementations, just their specification (per the doc-string).
# You should ** CALL ** those functions as needed in implementing the
# other functions. After you have READ this, change its _TODO_ to DONE.
###############################################################################
def is_prime(n):
"""
What comes in: An integer n >= 2.
What goes out:
-- Returns True if the given integer is prime,
else returns False.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
Note: The algorithm used here is simple and clear but slow.
"""
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
# -------------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no _TODO_.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# -------------------------------------------------------------------------
def sum_of_digits(number):
"""
What comes in: An integer.
What goes out: Returns the sum of the digits in the given integer.
Side effects: None.
Example:
If the integer is 83135,
this function returns (8 + 3 + 1 + 3 + 5), which is 20.
"""
# -------------------------------------------------------------------------
# Students:
# Do NOT touch the above sum_of_digits function - it has no _TODO_.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# -------------------------------------------------------------------------
if number < 0:
number = -number
digit_sum = 0
while True:
if number == 0:
break
digit_sum = digit_sum + (number % 10)
number = number // 10
return digit_sum
def run_test_problem0a():
""" Tests the problem0a function. """
print()
print('--------------------------------------------------')
print('Testing the problem0a function:')
print('--------------------------------------------------')
format_string = ' problem0a( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
expected = False
print_expected_result_of_test([83135], expected, test_results,
format_string)
actual = problem0a(83135) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
if actual == 'False':
print('Your function returned the STRING "False",')
print('which is WRONG. It should have returned')
print('the built-in constant False.')
print('Ask for help as needed.')
# Test 2:
expected = True
print_expected_result_of_test([306], expected, test_results, format_string)
actual = problem0a(306) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
if actual == 'True':
print('Your function returned the STRING "True",')
print('which is WRONG. It should have returned')
print('the built-in constant True.')
print('Ask for help as needed.')
# Test 3:
expected = False
print_expected_result_of_test([246], expected, test_results, format_string)
actual = problem0a(246) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 4:
expected = False
print_expected_result_of_test([830931], expected, test_results,
format_string)
actual = problem0a(830931) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 5:
expected = True
print_expected_result_of_test([730931], expected, test_results,
format_string)
actual = problem0a(730931) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 6:
expected = False
print_expected_result_of_test([200], expected, test_results, format_string)
actual = problem0a(200) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 7:
expected = True
print_expected_result_of_test([562], expected, test_results,
format_string)
actual = problem0a(562) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 8:
expected = True
print_expected_result_of_test([555], expected, test_results,
format_string)
actual = problem0a(555) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 9:
expected = False
print_expected_result_of_test([13], expected, test_results,
format_string)
actual = problem0a(13) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
print_summary_of_test_results(test_results)
def problem0a(n):
"""
What comes in: An integer.
What goes out:
-- Returns True if the sum of the digits in the given integer
is odd, else returns False.
Side effects: None.
Examples:
-- If the given integer is 83135, this function returns False,
since (8 + 3 + 1 + 3 + 5) is 20, which is NOT odd.
-- If the given integer is 306, this function returns True,
since (3 + 0 + 6) is 9, which IS odd.
-- If the given integer is 246, this function returns False,
since (2 + 4 + 6) is 12, which is NOT odd.
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Tests have been written for you (above).
#
###########################################################################
# IMPORTANT:
# ** For full credit you must appropriately
# ** use (call) the sum_of_digits function
# ** that is DEFINED ABOVE.
###########################################################################
#
# HINT: To test whether a number m is even or odd,
# compute m % 2, i.e., the REMAINDER from m // 2.
# If that remainder is 0, the number is even.
# If that remainder is 1, the number is odd.
# Simply try a few examples to convince yourself of this.
# ASK FOR HELP if you do not understand this hint.
# -------------------------------------------------------------------------
if sum_of_digits(n) % 2 == 1:
return True
return False
def run_test_problem0b():
""" Tests the problem0b function. """
print()
print('--------------------------------------------------')
print('Testing the problem0b function:')
print('--------------------------------------------------')
format_string = ' problem0b( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
expected = 6
print_expected_result_of_test([13], expected, test_results, format_string)
actual = problem0b(13) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 2:
expected = 1
print_expected_result_of_test([2], expected, test_results, format_string)
actual = problem0b(2) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 3:
expected = 46
print_expected_result_of_test([200], expected, test_results, format_string)
actual = problem0b(200) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
# Test 4:
expected = 168
print_expected_result_of_test([997], expected, test_results, format_string)
actual = problem0b(997) # Run the code to test
print_actual_result_of_test(expected, actual, test_results)
print_summary_of_test_results(test_results)
def problem0b(n):
"""
What comes in: An integer n >= 2.
What goes out:
-- Returns the number of integers from 2 to n, inclusive,
that are prime.
Side effects: None.
Examples:
-- If n is 13, this function returns 6,
since there are 6 primes -- namely, 2, 3, 5, 7, 11, and 13 --
between 2 and 13.
-- If n is 2, this function returns 1,
since there is one prime (namely, 2) between 2 and 2.
-- If n is 200, the correct answer is 46,
since there are 46 primes between 2 and 200.
"""
# -------------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Tests have been written for you (above).
#
###########################################################################
# IMPORTANT:
# ** For full credit you must appropriately
# ** use (call) the is_prime function that is DEFINED ABOVE.
###########################################################################
# ------------------------------------------------------------------
total = 0
for k in range (2, n+1):
if is_prime(k) == True:
total = total + 1
return total
def run_test_problem0c():
""" Tests the problem0c function. """
print()
print('--------------------------------------------------')
print('Testing the problem0c function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# TWO tests on ONE window.
title = 'Tests 1 & 2 of problem0c: blue circle + 6 circles;'
title += ' then green circle + 3 circles'
window1 = rg.RoseWindow(650, 300, title)
circle1 = rg.Circle(rg.Point(100, 50), 30)
circle1.fill_color = 'blue'
problem0c(circle1, 6, window1)
window1.continue_on_mouse_click()
circle2 = rg.Circle(rg.Point(75, 200), 75)
circle2.fill_color = 'green'
problem0c(circle2, 3, window1)
window1.close_on_mouse_click()
# A third test on ANOTHER window.
title = 'Test 3 of problem0c: red circle + 10 circles'
window2 = rg.RoseWindow(600, 200, title)
circle3 = rg.Circle(rg.Point(50, 50), 20)
circle3.fill_color = 'red'
problem0c(circle3, 10, window2)
window2.close_on_mouse_click()
def problem0c(circle, n, window):
"""
See problem0c_picture.pdf in this project for pictures
that may help you better understand the following specification:
What comes in:
-- An rg.Circle.
-- A positive integer n.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws the given rg.Circle and n additional rg.Circles
on the given rg.RoseWindow such that:
-- The circles form a row of touching rg.Circles with the
leftmost circle being the given rg.Circle.
-- There is a 0.5 second pause after each rg.Circle is drawn.
Must ** NOT close ** the window.
Type hints:
:type circle: rg.Circle
:type n: int
:type window: rg.RoseWindow
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Tests have been written for you (above).
#
###########################################################################
# HINT: render(0.5)
# renders with a half-second pause after rendering.
###########################################################################
# -------------------------------------------------------------------------
circle.attach_to(window)
window.render(.5)
radius = circle.radius
centerx = circle.center.x + radius * 2
centery = circle.center.y
for k in range(n):
point = rg.Point(centerx,centery)
circle2 = rg.Circle(point, radius)
circle2.attach_to(window)
window.render(0.5)
centerx = circle2.center.x + radius * 2
###############################################################################
# Our tests use the following to print error messages in red.
# Do NOT change it. You do NOT have to do anything with it.
###############################################################################
def print_expected_result_of_test(arguments, expected,
test_results, format_string):
testing_helper.print_expected_result_of_test(arguments, expected,
test_results, format_string)
def print_actual_result_of_test(expected, actual, test_results,
precision=None):
testing_helper.print_actual_result_of_test(expected, actual,
test_results, precision)
def print_summary_of_test_results(test_results):
testing_helper.print_summary_of_test_results(test_results)
# To allow color-coding the output to the console:
USE_COLORING = True # Change to False to revert to OLD style coloring
testing_helper.USE_COLORING = USE_COLORING
if USE_COLORING:
# noinspection PyShadowingBuiltins
print = testing_helper.print_colored
else:
# noinspection PyShadowingBuiltins
print = testing_helper.print_uncolored
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# The try .. except prevents error messages on the console from being
# intermingled with ordinary output to the console.
# -----------------------------------------------------------------------------
try:
main()
except Exception:
print('ERROR - While running this test,', color='red')
print('your code raised the following exception:', color='red')
print()
time.sleep(1)
raise
|
the-stack_106_21280
|
#!/Users/harukii/PycharmProjects/InterfaceAutoTest/venv/bin/python
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
PSYCO = 0
import xlrd
import sys
import time
import glob
import traceback
import gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
sh.row_types(rowx)[nc-1]
sh.row_values(rowx)[nc-1]
sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows = sh.nrows
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options, PSYCO
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n",
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
if PSYCO:
import psyco
psyco.full()
PSYCO = 0
try:
t0 = time.time()
bk = xlrd.open_workbook(
fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot
import hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
elif firstarg == "psyco":
PSYCO = 1
main(av[1:])
else:
main(av)
|
the-stack_106_21281
|
from functools import lru_cache
import logging
import re
from lona import default_settings
ABSTRACT_ROUTE_RE = re.compile(r'<(?P<name>[^:>]+)(:(?P<pattern>[^>]+))?>')
ROUTE_PART_FORMAT_STRING = r'(?P<{}>{})'
DEFAULT_PATTERN = r'[^/]+'
OPTIONAL_TRAILING_SLASH_PATTERN = r'(/)'
MATCH_ALL = 1
logger = logging.getLogger('lona.routing')
class Route:
def __init__(self, raw_pattern, view, name='', interactive=True,
http_pass_through=False, frontend_view=None):
self.raw_pattern = raw_pattern
self.view = view
self.name = name
self.interactive = interactive
self.http_pass_through = http_pass_through
self.frontend_view = frontend_view
self.path = None
self.format_string = ''
self.optional_trailing_slash = False
# match all
if self.raw_pattern == MATCH_ALL:
self.path = MATCH_ALL
# string or regex
else:
raw_pattern = self.raw_pattern
if raw_pattern.endswith(OPTIONAL_TRAILING_SLASH_PATTERN):
self.optional_trailing_slash = True
raw_pattern = \
raw_pattern[:-len(OPTIONAL_TRAILING_SLASH_PATTERN)]
groups = ABSTRACT_ROUTE_RE.findall(raw_pattern)
# path is no pattern but simple string
if not groups:
self.path = raw_pattern
self.format_string = raw_pattern
return
pattern_names = [i[0] for i in groups]
patterns = [(i[0], i[2] or DEFAULT_PATTERN) for i in groups]
cleaned_pattern = ABSTRACT_ROUTE_RE.sub('{}', raw_pattern)
# setup format string
self.format_string = cleaned_pattern.format(
*['{' + i + '}' for i in pattern_names])
# compile pattern
self.pattern = re.compile(
r'^{}{}$'.format( # NOQA: FS002
cleaned_pattern.format(
*[ROUTE_PART_FORMAT_STRING.format(*i)
for i in patterns],
),
(r'(/)?'
if self.optional_trailing_slash else ''),
),
)
def match(self, path):
# match all
if self.path == MATCH_ALL:
return True, {}
# simple string
if self.path:
if self.optional_trailing_slash and path.endswith('/'):
path = path[:-1]
return path == self.path, {}
# pattern
match_object = self.pattern.match(path)
if not match_object:
return False, {}
return True, match_object.groupdict()
def __repr__(self):
raw_pattern = self.raw_pattern
if raw_pattern == MATCH_ALL:
raw_pattern = 'MATCH_ALL'
return f'<Route({raw_pattern}, {self.view})>'
class Router:
def __init__(self):
self.routes = []
self.resize_resolve_cache(
default_settings.ROUTING_RESOLVE_CACHE_MAX_SIZE,
)
self.resize_reverse_cache(
default_settings.ROUTING_REVERSE_CACHE_MAX_SIZE,
)
# caches ##################################################################
def resize_resolve_cache(self, max_size):
self._resolve_lru_cache = lru_cache(max_size)(self._resolve)
def resize_reverse_cache(self, max_size):
self._reverse_lru_cache = lru_cache(max_size)(self._reverse)
def get_resolve_cache_info(self):
return self._resolve_lru_cache.cache_info()
def get_reverse_cache_info(self):
return self._reverse_lru_cache.cache_info()
def clear_resolve_cache_info(self):
return self._resolve_lru_cache.cache_clear()
def clear_reverse_cache_info(self):
return self._reverse_lru_cache.cache_clear()
# routes ##################################################################
def add_route(self, route):
# check if route name already exists
if route.name:
for _route in self.routes:
if route.name == _route.name:
logger.warning(
"route name '%s' already exists",
route.name,
)
self.routes.append(route)
def add_routes(self, *routes):
for route in routes:
self.add_route(route)
# resolve #################################################################
def _resolve(self, path):
logger.debug("resolving '%s'", path)
for route in self.routes:
match, match_info = route.match(path)
if match:
logger.debug('%s matched', route)
return True, route, match_info
logger.debug("no match for '%s'", path)
return False, None, {}
def resolve(self, *args, **kwargs):
return self._resolve_lru_cache(*args, **kwargs)
# reverse #################################################################
def _reverse(self, route_name, *args, **kwargs):
route = None
for _route in self.routes:
if _route.name == route_name:
route = _route
break
if not route:
raise ValueError(f"no route named '{route_name}' found")
if route.path:
return route.path
try:
return route.format_string.format(*args, **kwargs)
except KeyError as e:
key_error = e
# raise is outside of except block to avoid stacking tracebacks
raise ValueError(f'missing URL arg: {key_error.args[0]}')
def reverse(self, *args, **kwargs):
return self._reverse_lru_cache(*args, **kwargs)
|
the-stack_106_21282
|
import pandas as pd
from eventstudy.naivemodel import EventStudyNaiveModel
from eventstudy.dpyahoo import DataProviderYahoo
import datetime as dt
def read_events(file_name, start_date, end_date, value_threshold=7):
"""Read a csv and return a list of events as a pandas DataFrame."""
event_list_df = pd.read_csv(file_name,
usecols=['ticker','eps_pct_diff_surp','asof_date','act_rpt_code'],
parse_dates=['asof_date'],
)
# Add index and sort by date
event_list_df = event_list_df.set_index('asof_date')
event_list_df = event_list_df.sort_index()
#print(event_list_df)
#print(event_list_df.loc['2001':'2002'])
# Select between certain dates.
event_list_df = event_list_df.loc[start_date:end_date]
# Drop events that don't meet a certain threshold
event_list_df = event_list_df[event_list_df['eps_pct_diff_surp'] <= value_threshold]
event_list_df = event_list_df.drop(['eps_pct_diff_surp'], axis=1)
# Reset index so day_0_date is a column again
event_list_df = event_list_df.reset_index()
event_list_df = event_list_df.rename(index=str, columns={'asof_date': 'day_0_date'})
#print(event_list_df)
#print(event_list_df.loc['2017':'2018'])
return event_list_df
def main():
start_date = dt.datetime(2017, 1, 1)
end_date = dt.datetime(2017, 12, 31)
value_threshold = -100.0
event_list_df = read_events('earnings_surprises_2017.csv', start_date, end_date, value_threshold)
#print('The event list:\n {}'.format(event_list_df))
data_provider = DataProviderYahoo()
event_study = EventStudyNaiveModel(data_provider, event_list_df)
# Run the event study looking 6 periods before the event and 6 periods after the event
num_pre_event_window_periods = num_post_event_window_periods = 6
market_ticker = 'SPY'
results = event_study.run_naive_model(market_ticker, num_pre_event_window_periods, num_post_event_window_periods)
print('\nStarted with {} events and processed {} events.'.format(results.num_starting_events,
results.num_events_processed))
print('\nAAR (%) for all the securities over the event window:\n{}'.format(
(results.aar * 100).round(2).to_frame().T.to_string(index=False)))
print('\nCAAR (%) for all the securities over the event window:\n{}'.format(
(results.caar * 100).round(2).to_frame().T.to_string(index=False)))
results.plot("Negative earning surprises and their impact on stock returns", False, 'negative_earnings_surprises.pdf')
if __name__ == '__main__':
main()
|
the-stack_106_21285
|
#!/usr/bin/env python
import os
import time
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import sys
if len(sys.argv) < 2:
print("Usage: killswitch.py /path/to/kill_script")
exit(1)
full_kill_script_path = ""
if os.path.isfile(sys.argv[1]):
full_kill_script_path = os.path.abspath(sys.argv[1])
print ("Killswitch script found at " + full_kill_script_path)
else:
print ("No script found at " + sys.argv[1])
exit(2)
RED_PIN = 15
GREEN_PIN = 13
BLUE_PIN = 11
INPUT_PIN = 10
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
GPIO.setup(RED_PIN, GPIO.OUT)
GPIO.setup(GREEN_PIN, GPIO.OUT)
GPIO.setup(BLUE_PIN, GPIO.OUT)
GPIO.setup(INPUT_PIN, GPIO.IN)
def lights_off():
GPIO.output(RED_PIN, GPIO.LOW)
GPIO.output(GREEN_PIN, GPIO.LOW)
GPIO.output(BLUE_PIN, GPIO.LOW)
def light_on(chosenLight):
GPIO.output(chosenLight, GPIO.HIGH)
def lights_cycle():
lights_off()
light_on(RED_PIN)
time.sleep(2)
lights_off()
light_on(GREEN_PIN)
time.sleep(2)
lights_off()
light_on(BLUE_PIN)
time.sleep(2)
lights_off()
def on_button_pushed(channel):
if GPIO.input(channel) == GPIO.HIGH:
print("Killswitch pushed!")
lights_off()
light_on(RED_PIN)
print("Running the kill script...")
os.system(full_kill_script_path) # User-defined executable script here which can do anything
print("Kill script completed.")
time.sleep(5)
lights_off()
light_on(BLUE_PIN)
time.sleep(2)
lights_off()
lights_cycle()
GPIO.add_event_detect(INPUT_PIN, GPIO.BOTH, callback=on_button_pushed) # On any kind of event for the input pin, trigger the callback
while(True):
time.sleep(1) # Just run forever
GPIO.cleanup()
|
the-stack_106_21286
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""CIFAR10 dataset."""
import numpy as np
import os
import pickle
import torch
import torch.utils.data
from sscls.core.config import cfg
import sscls.datasets.transforms as transforms
import sscls.utils.logging as lu
logger = lu.get_logger(__name__)
# Per-channel mean and SD values in BGR order
_MEAN = [125.3, 123.0, 113.9]
_SD = [63.0, 62.1, 66.7]
class Cifar10(torch.utils.data.Dataset):
"""CIFAR-10 dataset."""
def __init__(self, data_path, split):
assert os.path.exists(data_path), \
'Data path \'{}\' not found'.format(data_path)
assert split in ['train', 'test'], \
'Split \'{}\' not supported for cifar'.format(split)
logger.info('Constructing CIFAR-10 {}...'.format(split))
self._data_path = data_path
self._split = split
# Data format:
# self._inputs - (split_size, 3, im_size, im_size) ndarray
# self._labels - split_size list
self._inputs, self._labels = self._load_data()
def _load_batch(self, batch_path):
with open(batch_path, 'rb') as f:
d = pickle.load(f, encoding='bytes')
return d[b'data'], d[b'labels']
def _load_data(self):
"""Loads data in memory."""
logger.info('{} data path: {}'.format(self._split, self._data_path))
# Compute data batch names
if self._split == 'train':
batch_names = ['data_batch_{}'.format(i) for i in range(1, 6)]
else:
batch_names = ['test_batch']
# Load data batches
inputs, labels = [], []
for batch_name in batch_names:
batch_path = os.path.join(self._data_path, batch_name)
inputs_batch, labels_batch = self._load_batch(batch_path)
inputs.append(inputs_batch)
labels += labels_batch
# Combine and reshape the inputs
inputs = np.vstack(inputs).astype(np.float32)
inputs = inputs.reshape((-1, 3, cfg.TRAIN.IM_SIZE, cfg.TRAIN.IM_SIZE))
return inputs, labels
def _prepare_im(self, im):
"""Prepares the image for network input."""
im = transforms.color_norm(im, _MEAN, _SD)
if self._split == 'train':
im = transforms.horizontal_flip(im=im, p=0.5)
im = transforms.random_crop(
im=im, size=cfg.TRAIN.IM_SIZE, pad_size=4
)
return im
def __getitem__(self, index):
im, label = self._inputs[index, ...].copy(), self._labels[index]
im = self._prepare_im(im)
return im, label
def __len__(self):
return self._inputs.shape[0]
|
the-stack_106_21288
|
import cv2
import numpy as np
from mmpose.core.post_processing import transform_preds
def _calc_distances(preds, targets, mask, normalize):
"""Calculate the normalized distances between preds and target.
Note:
batch_size: N
num_keypoints: K
Args:
preds (np.ndarray[N, K, 2]): Predicted keypoint location.
targets (np.ndarray[N, K, 2]): Groundtruth keypoint location.
normalize (np.ndarray[N, 2]): Typical value is heatmap_size/10
Returns:
np.ndarray[K, N]: The normalized distances.
If target keypoints are missing, the distance is -1.
"""
N, K, _ = preds.shape
distances = np.full((K, N), -1, dtype=np.float32)
distances[mask.T] = np.linalg.norm(
((preds - targets) / normalize[:, None, :])[mask], axis=-1)
return distances
def _distance_acc(distances, thr=0.5):
"""Return the percentage below the distance threshold, while ignoring
distances values with -1.
Note:
batch_size: N
Args:
distances (np.ndarray[N, ]): The normalized distances.
thr (float): Threshold of the distances.
Returns:
float: Percentage of distances below the threshold.
If all target keypoints are missing, return -1.
"""
distance_valid = distances != -1
num_distance_valid = distance_valid.sum()
if num_distance_valid > 0:
return (distances[distance_valid] < thr).sum() / num_distance_valid
return -1
def _get_max_preds(heatmaps):
"""Get keypoint predictions from score maps.
Note:
batch_size: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps.
Returns:
tuple: A tuple containing aggregated results.
- preds (np.ndarray[N, K, 2]): Predicted keypoint location.
- maxvals (np.ndarray[N, K, 1]): Scores (confidence) of the keypoints.
"""
assert isinstance(heatmaps,
np.ndarray), ('heatmaps should be numpy.ndarray')
assert heatmaps.ndim == 4, 'batch_images should be 4-ndim'
N, K, _, W = heatmaps.shape
heatmaps_reshaped = heatmaps.reshape((N, K, -1))
idx = np.argmax(heatmaps_reshaped, 2).reshape((N, K, 1))
maxvals = np.amax(heatmaps_reshaped, 2).reshape((N, K, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = preds[:, :, 0] % W
preds[:, :, 1] = preds[:, :, 1] // W
preds = np.where(np.tile(maxvals, (1, 1, 2)) > 0.0, preds, -1)
return preds, maxvals
def pose_pck_accuracy(output, target, mask, thr=0.5, normalize=None):
"""Calculate the pose accuracy of PCK for each individual keypoint and the
averaged accuracy across all keypoints from heatmaps.
Note:
The PCK performance metric is the percentage of joints with
predicted locations that are no further than a normalized
distance of the ground truth. Here we use [w,h]/10.
batch_size: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
output (np.ndarray[N, K, H, W]): Model output heatmaps.
target (np.ndarray[N, K, H, W]): Groundtruth heatmaps.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
thr (float): Threshold of PCK calculation.
normalize (np.ndarray[N, 2]): Normalization factor for H&W.
Returns:
tuple: A tuple containing keypoint accuracy.
- np.ndarray[K]: Accuracy of each keypoint.
- float: Averaged accuracy across all keypoints.
- int: Number of valid keypoints.
"""
N, K, H, W = output.shape
if K == 0:
return None, 0, 0
if normalize is None:
normalize = np.tile(np.array([[H, W]]) / 10, (N, 1))
pred, _ = _get_max_preds(output)
gt, _ = _get_max_preds(target)
return keypoint_pck_accuracy(pred, gt, mask, thr, normalize)
def keypoint_pck_accuracy(pred, gt, mask, thr, normalize):
"""Calculate the pose accuracy of PCK for each individual keypoint and the
averaged accuracy across all keypoints for coordinates.
Note:
batch_size: N
num_keypoints: K
Args:
pred (np.ndarray[N, K, 2]): Predicted keypoint location.
gt (np.ndarray[N, K, 2]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
thr (float): Threshold of PCK calculation.
normalize (np.ndarray[N, 2]): Normalization factor.
Returns:
tuple: A tuple containing keypoint accuracy.
- acc (np.ndarray[K]): Accuracy of each keypoint.
- avg_acc (float): Averaged accuracy across all keypoints.
- cnt (int): Number of valid keypoints.
"""
distances = _calc_distances(pred, gt, mask, normalize)
acc = np.array([_distance_acc(d, thr) for d in distances])
valid_acc = acc[acc >= 0]
cnt = len(valid_acc)
avg_acc = valid_acc.mean() if cnt > 0 else 0
return acc, avg_acc, cnt
def keypoint_auc(pred, gt, mask, normalize, num_step=20):
"""Calculate the pose accuracy of PCK for each individual keypoint and the
averaged accuracy across all keypoints for coordinates.
Note:
batch_size: N
num_keypoints: K
Args:
pred (np.ndarray[N, K, 2]): Predicted keypoint location.
gt (np.ndarray[N, K, 2]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
normalize (float): Normalization factor.
Returns:
float: Area under curve.
"""
nor = np.tile(np.array([[normalize, normalize]]), (pred.shape[0], 1))
x = [1.0 * i / num_step for i in range(num_step)]
y = []
for thr in x:
_, avg_acc, _ = keypoint_pck_accuracy(pred, gt, mask, thr, nor)
y.append(avg_acc)
auc = 0
for i in range(num_step):
auc += 1.0 / num_step * y[i]
return auc
def keypoint_epe(pred, gt, mask):
"""Calculate the end-point error.
Note:
batch_size: N
num_keypoints: K
Args:
pred (np.ndarray[N, K, 2]): Predicted keypoint location.
gt (np.ndarray[N, K, 2]): Groundtruth keypoint location.
mask (np.ndarray[N, K]): Visibility of the target. False for invisible
joints, and True for visible. Invisible joints will be ignored for
accuracy calculation.
Returns:
float: Average end-point error.
"""
distances = _calc_distances(
pred, gt, mask, np.tile(np.array([[1, 1]]), (pred.shape[0], 1)))
distance_valid = distances[distances != -1]
valid_num = len(distance_valid)
return distance_valid.sum() / valid_num
def _taylor(heatmap, coord):
"""Distribution aware coordinate decoding method.
Note:
heatmap height: H
heatmap width: W
Args:
heatmap (np.ndarray[H, W]): Heatmap of a particular joint type.
coord (np.ndarray[2,]): Coordinates of the predicted keypoints.
Returns:
np.ndarray[2,]: Updated coordinates.
"""
H, W = heatmap.shape[:2]
px, py = int(coord[0]), int(coord[1])
if 1 < px < W - 2 and 1 < py < H - 2:
dx = 0.5 * (heatmap[py][px + 1] - heatmap[py][px - 1])
dy = 0.5 * (heatmap[py + 1][px] - heatmap[py - 1][px])
dxx = 0.25 * (
heatmap[py][px + 2] - 2 * heatmap[py][px] + heatmap[py][px - 2])
dxy = 0.25 * (
heatmap[py + 1][px + 1] - heatmap[py - 1][px + 1] -
heatmap[py + 1][px - 1] + heatmap[py - 1][px - 1])
dyy = 0.25 * (
heatmap[py + 2 * 1][px] - 2 * heatmap[py][px] +
heatmap[py - 2 * 1][px])
derivative = np.array([[dx], [dy]])
hessian = np.array([[dxx, dxy], [dxy, dyy]])
if dxx * dyy - dxy**2 != 0:
hessianinv = np.linalg.inv(hessian)
offset = -hessianinv @ derivative
offset = np.squeeze(np.array(offset.T), axis=0)
coord += offset
return coord
def _gaussian_blur(heatmaps, kernel=11):
"""Modulate heatmap distribution with Gaussian.
sigma = 0.3*((kernel_size-1)*0.5-1)+0.8
sigma~=3 if k=17
sigma=2 if k=11;
sigma~=1.5 if k=7;
sigma~=1 if k=3;
Note:
batch_size: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps.
kernel (int): Gaussian kernel size (K) for modulation, which should
match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
np.ndarray[N, K, H, W]: Modulated heatmap distribution.
"""
assert kernel % 2 == 1
border = (kernel - 1) // 2
batch_size = heatmaps.shape[0]
num_joints = heatmaps.shape[1]
height = heatmaps.shape[2]
width = heatmaps.shape[3]
for i in range(batch_size):
for j in range(num_joints):
origin_max = np.max(heatmaps[i, j])
dr = np.zeros((height + 2 * border, width + 2 * border),
dtype=np.float32)
dr[border:-border, border:-border] = heatmaps[i, j].copy()
dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)
heatmaps[i, j] = dr[border:-border, border:-border].copy()
heatmaps[i, j] *= origin_max / np.max(heatmaps[i, j])
return heatmaps
def keypoints_from_heatmaps(heatmaps,
center,
scale,
post_process=True,
unbiased=False,
kernel=11):
"""Get final keypoint predictions from heatmaps and transform them back to
the image.
Note:
batch_size: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
heatmaps (np.ndarray[N, K, H, W]): model predicted heatmaps.
center (np.ndarray[N, 2]): Center of the bounding box (x, y).
scale (np.ndarray[N, 2]): Scale of the bounding box
wrt height/width.
post_process (bool): Option to use post processing or not.
unbiased (bool): Option to use unbiased decoding.
Paper ref: Zhang et al. Distribution-Aware Coordinate
Representation for Human Pose Estimation (CVPR 2020).
kernel (int): Gaussian kernel size (K) for modulation, which should
match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
tuple: A tuple containing keypoint predictions and scores.
- preds (np.ndarray[N, K, 2]): Predicted keypoint location in images.
- maxvals (np.ndarray[N, K, 1]): Scores (confidence) of the keypoints.
"""
preds, maxvals = _get_max_preds(heatmaps)
N, K, H, W = heatmaps.shape
if post_process:
if unbiased: # alleviate biased coordinate
assert kernel > 0
# apply Gaussian distribution modulation.
heatmaps = _gaussian_blur(heatmaps, kernel)
heatmaps = np.maximum(heatmaps, 1e-10)
heatmaps = np.log(heatmaps)
for n in range(N):
for k in range(K):
preds[n][k] = _taylor(heatmaps[n][k], preds[n][k])
else:
# add +/-0.25 shift to the predicted locations for higher acc.
for n in range(N):
for k in range(K):
heatmap = heatmaps[n][k]
px = int(preds[n][k][0])
py = int(preds[n][k][1])
if 1 < px < W - 1 and 1 < py < H - 1:
diff = np.array([
heatmap[py][px + 1] - heatmap[py][px - 1],
heatmap[py + 1][px] - heatmap[py - 1][px]
])
preds[n][k] += np.sign(diff) * .25
# Transform back to the image
for i in range(N):
preds[i] = transform_preds(preds[i], center[i], scale[i], [W, H])
return preds, maxvals
|
the-stack_106_21290
|
number=int(input("enter the no to check prime or not"))
n=int(number/2)
flag=0
for i in range(2,n+1):
if number % 2 == 0:
flag = 1
if flag == 1:
print("not a prime number")
else:
print('prime number')
|
the-stack_106_21295
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, annotations, division, print_function
import argparse
import logging
import os
import re
import time
from collections.abc import Callable
from enum import Enum, unique
from typing import Any, Callable, Dict, List, Optional, Tuple
from rich.console import Console
from rich.logging import RichHandler
from rich.progress import BarColumn, Progress, SpinnerColumn, TimeRemainingColumn
listStr = List[str]
RENAME_DELAY = 0.1
__author__ = "Marcus Bruno Fernandes Silva"
__maintainer__ = __author__
__email__ = "[email protected]"
__version__ = "1.5.3"
console = Console()
cesp_logger = logging.getLogger("cesp")
root_logger = logging.getLogger("main")
@unique
class ChangeItemMode(Enum):
all = 1
files = 2
dirs = 3
class cesp:
_special_chars = {
"?": "_",
"$": "_",
"%": "_",
"°": "o",
"!": "_",
"@": "_",
'"': "_",
"´": "",
"'": "",
"¨": "_",
"#": "_",
"|": "_",
"<": "_",
">": "_",
"/": "_",
"§": "_",
"\\": "_",
"&": "_and_",
"*": "_",
":": "_",
";": "_",
",": "_",
"+": "_",
"=": "_",
"~": "",
"^": "",
"ª": "a",
"º": "o",
"°": "o",
}
_utf_chars = {
"ç": "c",
"ä": "a",
"ã": "a",
"â": "a",
"á": "a",
"à": "a",
"é": "e",
"ê": "e",
"è": "e",
"í": "i",
"î": "i",
"ì": "i",
"ó": "o",
"ô": "o",
"ò": "o",
"õ": "o",
"ú": "u",
"ü": "u",
"û": "u",
"ù": "u",
}
def __init__(self) -> None:
self.logger = logging.getLogger("cesp")
self.logger.debug("Constructing object")
self._path = os.path.realpath(os.getcwd())
self._recursive = False
self._ignored_dirs: listStr = []
self._ignored_exts: listStr = []
self._convert_utf = False
self._convert_dots = False
self._convert_brackets = False
self._remove_special_chars = False
self._quiet = False
self._no_change = True
self._change: ChangeItemMode = ChangeItemMode.files
self._print: Callable[[Any], None] = lambda x: None
self._update_print()
self.original_path = os.getcwd()
self._append_upper_to_dict(self._utf_chars)
# Commands
def fetch(
self, callback: Optional[Callable[[str, int], None]] = None
) -> Tuple[listStr, listStr]:
self.logger.debug('"fetch" called')
original_files = []
renamed_files = []
if not os.path.isdir(self._path):
raise ValueError("Invalid path.")
self.logger.debug('Original path "{}"'.format(self.original_path))
self.logger.debug('Changing path to "{}"'.format(self._path))
os.chdir(self._path)
self.logger.debug("Walking directory tree and collecting names to be renamed")
total = 0
for root, dirs, files in os.walk(".", topdown=True):
files = [
f
for f in files
if not f.startswith(".") and self._isPathGood(os.path.join(root, f))
]
dirs = [
d
for d in dirs
if not d.startswith(".") and self._isPathGood(os.path.join(root, d))
]
if self._change == ChangeItemMode.files:
wd = files
elif self._change == ChangeItemMode.dirs:
wd = dirs
else:
wd = files + dirs
for f in wd:
new_f = self._get_converted_name(f)
if f != new_f:
original_files.append(os.path.join(root, f))
renamed_files.append(os.path.join(root, new_f))
total += 1
if callback is not None:
callback(f, total)
if not self._recursive:
break
self.logger.debug("Collected {} files to be renamed".format(len(renamed_files)))
return list(reversed(original_files)), list(reversed(renamed_files))
def return_to_original_path(self) -> None:
self.logger.debug('Returning to path "{}"'.format(self.original_path))
os.chdir(self.original_path)
def rename_item(self, f: str, new_f: str, print_rename: bool = True) -> None:
if os.path.exists(new_f):
self._print(f"[bold]{new_f}[/] already exists")
else:
if print_rename:
base_new_f = os.path.basename(new_f)
base_old_f = os.path.basename(f)
sep = r"\\"
fmt_old_f = (
f"[dim]{os.path.dirname(f)}{sep}[/dim][bold red]{base_old_f}[/]"
)
fmt_new_f = f"[dim]{os.path.dirname(new_f)}{sep}[/dim][bold green]{base_new_f}[/]"
self._print(f"{fmt_old_f} -> {fmt_new_f}")
if not self._no_change:
os.rename(f, new_f)
def rename_list(self, original_files: listStr, renamed_files: listStr) -> int:
self.logger.debug("Renaming files")
for f, new_f in zip(original_files, renamed_files):
self.rename_item(f, new_f)
if not self._no_change:
self.logger.debug("Renamed {} files".format(len(renamed_files)))
self.logger.debug("rename_list method finished")
return 0
# helper Functions
def _oslistdir(
self, path: str, ignoredDirs: listStr = [], ignoredExts: listStr = []
) -> listStr:
self.logger.debug("_oslistdir called")
list_dirs = []
ignoredDirs = [f[:-1] if f[-1] == "/" else f for f in ignoredDirs]
ignoredExts = ["." + f for f in ignoredExts if not f.startswith(".")]
list_dirs = [
f
for f in os.listdir(path)
if (self._isPathGood(f) and not f.startswith("."))
]
return list_dirs
def _isPathGood(self, path: str) -> bool:
return self._isDirGood(path) and self._isExtensionGood(path)
def _isDirGood(self, dir: str) -> bool:
full_dir = os.path.realpath(dir)
for ignored_dir in self._ignored_dirs:
if ignored_dir in full_dir:
return False
return True
def _isExtensionGood(self, file: str) -> bool:
ext = os.path.splitext(file)[-1]
return ext not in self._ignored_exts
def _update_print(self) -> None:
self.logger.debug("_update_print called")
if self._quiet:
self._print = lambda *args, **kwargs: None
else:
self._print = lambda *args, **kwargs: console.print(*args, **kwargs)
def _get_converted_name(self, name: str) -> str:
if self._convert_utf:
name = self._convertUTF(name)
if self._convert_dots:
name = self._convertDots(name)
if self._convert_brackets:
name = self._convertBrackets(name)
if self._remove_special_chars:
name = self._removeSpecialChars(name)
name = self._removeBlankSpaces(name)
return name
def _removeBlankSpaces(self, name: str) -> str:
name = re.sub(r"\s+", r"_", name)
name = re.sub(r"_+", r"_", name)
name = re.sub(r"(^_|_$)", r"", name)
name = re.sub(r"_\.", r".", name)
return name
def _removeSpecialChars(self, name: str) -> str:
for char in name:
if char in self._special_chars:
name = name.replace(char, self._special_chars[char])
return name
def _convertUTF(self, name: str) -> str:
for char in name:
if char in self._utf_chars:
name = name.replace(char, self._utf_chars[char])
return name
def _convertDots(self, name: str) -> str:
base_name = os.path.splitext(name)[0]
name_extension = os.path.splitext(name)[-1]
name = base_name.replace(".", "_").replace(",", "_") + name_extension
return name
def _convertBrackets(self, name: str) -> str:
return re.sub(r"\(|\)|\[|\]|\{|\}", r"", name)
def _fixDotInIgnoredExtensions(self) -> None:
for i in range(len(self._ignored_exts)):
ext = self._ignored_exts[i]
if not ext.startswith("."):
self._ignored_exts[i] = "." + ext
def _fullPathIgnoredDirs(self) -> None:
for i in range(len(self._ignored_dirs)):
self._ignored_dirs[i] = os.path.realpath(
os.path.join(self._path, self._ignored_dirs[i])
)
def _append_upper_to_dict(self, d: Dict[str, str]) -> None:
keys = list(self._utf_chars.keys())
for key in keys:
key_upper = key.upper()
value_upper = self._utf_chars[key].upper()
self._utf_chars[key_upper] = value_upper
# Setters
def setRecursive(self, recursive: bool) -> None:
self._recursive = recursive
def setIgnoredDirs(self, ignoredDirs: listStr) -> None:
self._ignored_dirs = ignoredDirs
self._fullPathIgnoredDirs()
def setIgnoredExts(self, ignoredExts: listStr) -> None:
self._ignored_exts = ignoredExts
self._fixDotInIgnoredExtensions()
def setUTF(self, convertUTF: bool) -> None:
self._convert_utf = convertUTF
def setDots(self, convertDots: bool) -> None:
self._convert_dots = convertDots
def setBrackets(self, convertBrackets: bool) -> None:
self._convert_brackets = convertBrackets
def setSpecialChars(self, removeSpecialChars: bool) -> None:
self._remove_special_chars = removeSpecialChars
def setQuiet(self, quiet: bool) -> None:
self._quiet = quiet
self._update_print()
def setNoChange(self, noChange: bool) -> None:
self._no_change = noChange
def setChange(self, changeOption: ChangeItemMode) -> None:
self._change = changeOption
def setPath(self, path: str) -> None:
self._path = os.path.realpath(path)
# Getters
@staticmethod
def getVersion() -> str:
return __version__
def getPath(self) -> str:
return self._path
def isRecursive(self) -> bool:
return self._recursive
def getIgnoredDirs(self) -> listStr:
return self._ignored_dirs
def getIgnoredExtensions(self) -> listStr:
return self._ignored_exts
def whatToChange(self) -> ChangeItemMode:
return self._change
def isNoChange(self) -> bool:
return self._no_change
def main() -> None:
start_time = time.time()
version_message = (
"cesp "
+ cesp.getVersion()
+ os.linesep
+ os.linesep
+ "Author: "
+ __maintainer__
+ os.linesep
+ "email: "
+ __email__
)
desc = (
version_message
+ os.linesep
+ os.linesep
+ "Converts blank space to underscore and other characters to avoid problems"
)
list_of_choices = {
"files": ChangeItemMode.files,
"dirs": ChangeItemMode.dirs,
"all": ChangeItemMode.all,
}
choices_keys = list(list_of_choices.keys())
parser = argparse.ArgumentParser(
description=desc, formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("path", nargs="?", default=os.getcwd(), help="path")
parser.add_argument(
"-c",
"--change",
dest="change",
nargs=1,
default=[choices_keys[0]],
help="rename files, directories or all",
choices=choices_keys,
)
parser.add_argument(
"-r", dest="recursive", help="recursive action", action="store_true"
)
parser.add_argument(
"-d", "--dots", dest="dots", help="replace dots", action="store_true"
)
parser.add_argument(
"-u", "--UTF", dest="UTF", help="subs. UTF-8 chars", action="store_true"
)
parser.add_argument(
"-b", dest="brackets", help="remove brackets", action="store_true"
)
parser.add_argument(
"-s",
"--special-chars",
dest="special_chars",
help="remove special characters",
action="store_true",
)
parser.add_argument(
"-i",
"--ignore-dirs",
dest="ignoredirs",
default=[],
help="ignore dirs",
nargs="+",
)
parser.add_argument(
"-I",
"--ignore-exts",
dest="ignoreexts",
default=[],
help="ignore exts",
nargs="+",
)
parser.add_argument(
"-q", "--quiet", dest="quiet", help="no verbosity", action="store_true"
)
parser.add_argument(
"-n",
"--no-change",
dest="nochange",
help="do not make actual changes",
action="store_true",
)
parser.add_argument(
"--debug",
help="display debug level information",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument("-v", "--version", action="version", version=version_message)
args = parser.parse_args()
FORMAT = "%(message)s"
logging.basicConfig(
level=args.loglevel,
format=FORMAT,
datefmt="[%X]",
handlers=[RichHandler(console=console)],
)
root_logger.debug("Args passed: {}".format(args))
cesper: cesp = cesp()
root_logger.debug("Passings args to cesper object")
cesper.setRecursive(args.recursive)
cesper.setIgnoredDirs(args.ignoredirs)
cesper.setIgnoredExts(args.ignoreexts)
cesper.setUTF(args.UTF)
cesper.setDots(args.dots)
cesper.setBrackets(args.brackets)
cesper.setQuiet(args.quiet)
cesper.setNoChange(args.nochange)
cesper.setChange(list_of_choices[args.change[0]])
cesper.setSpecialChars(args.special_chars)
cesper.setPath(args.path)
root_logger.debug("Calling cesper.fetch()")
og_files: listStr = []
ren_files: listStr = []
fetching_message = "Fetching files..."
with Progress(
SpinnerColumn(),
fetching_message,
"[dim]{task.fields[extra]}[/]",
transient=True,
) as progress:
task = progress.add_task(description="", start=False, extra="")
og_files, ren_files = cesper.fetch(
lambda f, total: progress.update(task, extra=f"found {total} - latest: {f}")
)
files_num = len(og_files)
console.print(f"[bold green]OK![/] {fetching_message}")
console.print(f"Found {files_num} files")
if files_num > 0:
if args.nochange:
cesper.rename_list(og_files, ren_files)
console.print("[bold red]No changes were made[/]")
else:
with Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
TimeRemainingColumn(),
"{task.fields[file]}",
console=console,
) as progress:
task = progress.add_task(
description="Renaming...", total=files_num, file=""
)
for f, new_f in zip(og_files, ren_files):
f_name = os.path.basename(f)
progress.update(task, file=f"- [dim]{f_name}[/]")
cesper.rename_item(f, new_f)
time.sleep(RENAME_DELAY)
progress.advance(task, 1)
progress.update(task, file="")
cesper.return_to_original_path()
elapsed_time = time.time() - start_time
console.print(f"Finished in {elapsed_time:.2f} seconds")
if __name__ == "__main__":
main()
|
the-stack_106_21299
|
from src.loader.LoaderInterface import LoaderInterface
from src.utility.Utility import Utility
class ObjectLoader(LoaderInterface):
""" Just imports the objects for the given file path
The import will load all materials into cycle nodes.
**Configuration**:
.. list-table::
:widths: 25 100 10
:header-rows: 1
* - Parameter
- Description
- Type
* - path
- The path to the 3D data file to load. Can be either path or paths not both.
- string
* - paths
- A list of paths of 3D data files to load. Can be either path or paths not both.
- list
"""
def __init__(self, config):
LoaderInterface.__init__(self, config)
def run(self):
if self.config.has_param('path') and self.config.has_param('paths'):
raise Exception("Objectloader can not use path and paths in the same module!")
if self.config.has_param('path'):
file_path = Utility.resolve_path(self.config.get_string("path"))
loaded_objects = Utility.import_objects(filepath=file_path)
elif self.config.has_param('paths'):
file_paths = self.config.get_list('paths')
loaded_objects = []
# the file paths are mapped here to object names
cache_objects = {}
for file_path in file_paths:
resolved_file_path = Utility.resolve_path(file_path)
current_objects = Utility.import_objects(filepath=resolved_file_path, cached_objects=cache_objects)
loaded_objects.extend(current_objects)
else:
raise Exception("Loader module needs either a path or paths config value")
if not loaded_objects:
raise Exception("No objects have been loaded here, check the config.")
# Set the add_properties of all imported objects
self._set_properties(loaded_objects)
|
the-stack_106_21302
|
from dask.distributed import get_client, futures_of
from dask import delayed
from toolz import partition_all
import numpy as np
def _cluster_mode():
try:
get_client()
return True
except ValueError:
return False
def get_futures(lst):
""" Loop through items in list to keep order of delayed objects
when transforming to futures. firect call of futures_of does
not keep the order of the objects
Parameters
----------
lst : array-like
array containing delayed objects
"""
f = []
for i in lst:
f.append(futures_of(i)[0])
return f
@delayed
def delay_func_chunk(func, chunk):
res = []
for x in chunk:
res.append(func(x))
return res
def get_summaries(data, func, chunk_size):
# assumed data is large, make chunks
#assert len(data)/chunk_size > 1.0, "With chunk_size: {0} will only create 1 chunk, choose a sampler chunk_size".format(chunk_size)
data_chunked = partition_all(chunk_size, data)
stats_final = [delayed(func)(chunk) for chunk in data_chunked]
return stats_final
def get_fixed_mean(data, func, chunk_size):
"""
Computes the mean over summary statistics on fixed data
Parameters
----------
chunk_size : int
the partition size when splitting the fixed data. For avoiding many individual tasks
in dask if the data is large
Returns
-------
ndarray
scaled distance
"""
# compute summary stats on fixed data
stats = get_summaries(data, func, chunk_size)
mean = delayed(np.mean)
# reducer 1 mean for each batch
stats_mean = mean(stats, axis=0)
# reducer 2 mean over batches
stats_mean = mean(stats_mean, axis=0, keepdims=True)
return stats_mean
def get_graph_chunked(param_func, sim_func, summaries_func=None,
batch_size=10, chunk_size=2):
"""
Constructs the dask computational graph involving sampling, simulation,
summary statistics and distances.
Parameters
----------
param_func : callable
the parameter sampling function, see sciope.designs, sciope.sampling
and sciope.utilities.priors
sim_func : callable
the simulator function which takes a parameter point as argument
summaries_func : callable, optional
the summaries statistics function which takes a simulation result,
by default None
dist_func : callable, optional
the distance function between , by default None
batch_size : int, optional
the number of points being sampled in each batch, by default 10
chunk_size : int, optional
decription, by default 2
ensemble_size : int, optional
[description], by default 1
Returns
-------
dict
with keys 'parameters', 'trajectories', 'summarystats' and 'distances'
values being dask delayed objects
"""
# worflow sampling with batch size = batch_size
# Draw from the prior/design
trial_param = param_func(batch_size, chunk_size=chunk_size)
#params_chunked = partition_all(chunk_size, trial_param)
params_chunked = trial_param
# Perform the simulation
sim_result = [delay_func_chunk(sim_func, chunk) for chunk in params_chunked]
# Get the statistic(s)
if summaries_func is not None:
stats_final = [delay_func_chunk(summaries_func, chunk) for chunk in sim_result]
# Calculate the distance between the dataset and the simulated result
else:
stats_final = None
return {"parameters": trial_param, "trajectories": sim_result,
"summarystats": stats_final}
def get_distance(dist_func, X, chunked=True):
if chunked:
sim_dist = [delay_func_chunk(dist_func, chunk) for chunk in X]
else:
sim_dist = [delayed(dist_func)(x) for x in X]
return sim_dist
def get_prediction(pred_func, X, chunked=True):
if chunked:
pred = [delay_func_chunk(pred_func, chunk) for chunk in X]
else:
pred = [delayed(pred_func)(x) for x in X]
return pred
def _reshape_chunks(data):
data = np.asarray(data)
if len(data.shape) > 1:
data = data.reshape(-1, data.shape[-1])
return data
else:
new = []
for chunk in data:
for point in chunk:
new.append(point)
new = np.asarray(new)
assert len(new.shape) > 1
return np.asarray(new)
def get_graph_unchunked(param_func, sim_func, summaries_func=None, dist_func=None,
fixed=None, batch_size=10, ensemble_size=1):
"""
Constructs the dask computational graph involving sampling, simulation,
summary statistics and distances.
Parameters
----------
param_func : callable
the parameter sampling function, see sciope.designs, sciope.sampling
and sciope.utilities.priors
sim_func : callable
the simulator function which takes a parameter point as argument
summaries_func : callable, optional
the summaries statistics function which takes a simulation result,
by default None
dist_func : callable, optional
the distance function between , by default None
batch_size : int, optional
the number of points being sampled in each batch, by default 10
chunk_size : int, optional
decription, by default 2
ensemble_size : int, optional
[description], by default 1
Returns
-------
dict
with keys 'parameters', 'trajectories', 'summarystats' and 'distances'
values being dask delayed objects
"""
if dist_func is not None:
assert fixed is not None, "If using distance function, parameter 'fixed' can not be None"
# worflow sampling with batch size = batch_size
# Draw from the prior/design
trial_param = param_func(batch_size) * ensemble_size
# Perform the simulation
sim_result = [delayed(sim_func)(x) for x in trial_param]
# Get the statistic(s)
if summaries_func is not None:
sim_stats = [delayed(summaries_func)(x) for x in sim_result]
if ensemble_size > 1:
stats_final = [delayed(np.mean)(sim_stats[i:i + ensemble_size],
axis=0) for i in range(0, len(sim_stats), ensemble_size)]
else:
stats_final = sim_stats
# Calculate the distance between the dataset and the simulated result
if dist_func is not None:
sim_dist = [delayed(dist_func)(fixed, stats)
for stats in stats_final]
else:
sim_dist = None
else:
stats_final = None
sim_dist = None
return {"parameters": trial_param[:batch_size], "trajectories": sim_result,
"summarystats": stats_final, "distances": sim_dist}
|
the-stack_106_21305
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.test import TestCase
from ralph.ui.tests.global_utils import login_as_su
from ralph_assets.tests.utils import MessagesTestMixin, UserFactory
from ralph_assets.tests.utils.licences import LicenceFactory
class TestUserListView(MessagesTestMixin, TestCase):
def setUp(self):
self.client = login_as_su()
def test_users_view(self):
user = UserFactory(**{'username': 'test_user'})
user_page_url = reverse('user_view', args=(user.username,))
response = self.client.get(reverse('user_list'))
self.assertEqual(response.status_code, 200)
self.assertTrue(user.username in response.content)
self.assertTrue(user_page_url in response.content)
class TestUserDetailView(MessagesTestMixin, TestCase):
def setUp(self):
self.client = login_as_su()
def test_users_view_not_found(self):
invalid_url = reverse('user_view', args=('invalid_username',))
response = self.client.get(invalid_url, follow=True)
self.assertMessageEqual(
response,
'User {} not found'.format('invalid_username'),
)
class TestUserEditRelations(TestCase):
def setUp(self):
self.client = login_as_su()
def test_users_view(self):
user = UserFactory(**{'username': 'test_user'})
url = reverse('edit_user_relations', args=(user.username,))
self.assertEqual(user.licences.count(), 0)
post_data = {
'licences': '|'.join([
str(LicenceFactory().id) for i in range(5)]
)
}
self.client.post(url, post_data, follow=True)
self.assertEqual(user.licences.count(), 5)
|
the-stack_106_21307
|
import getopt
import os
import sys
import time
import matplotlib.pyplot as plt
import pandas as pd
import netifaces as ni
CONNECTIONS = 'Connections'
REQS_PER_SEC = 'Requests/Second'
DATA_FILENAME = 'benchmark_output.csv'
PLOT_FILENAME = 'benchmark_plot.png'
PORT = 8081
# retrieve the ip address of the swissknife0 interface
ip = list(
filter(lambda ip: "swissknife1" in ip,
(map(lambda ip: ip["addr"],
ni.ifaddresses('swissknife1')[ni.AF_INET6])
)
)
).pop()
URL = f"http://[{ip}]"
def benchmark(data_filename, plot_filename, port):
with open(data_filename, 'w') as f:
f.write(f'{CONNECTIONS},{REQS_PER_SEC}\n')
f.flush()
for i in range(10, 101, 10):
print(f'Running wrk with {i} connections')
f.write(f'{i},')
f.flush()
out = os.popen(
f'wrk -t 10 -c {i} {URL}:{port} | grep Requests/sec')
line = out.read()
recsPerSec = line.split(' ')[-1]
print('requests per second', recsPerSec)
f.write(recsPerSec)
f.flush()
df = pd.read_csv(data_filename)
print(df)
df.plot(x=CONNECTIONS, y=REQS_PER_SEC)
plt.savefig('./plots/' + plot_filename)
def flamegraph(flamegraph_filename, port):
out = os.popen(
f'wrk -t 10 -c 1000 -d 10 {URL}:{port} &')
out = os.popen(
f'sudo profile -F 99 -adf 10 > ./plots/{flamegraph_filename}_folded &')
time.sleep(15)
out = os.popen(
f'perl ./FlameGraph/flamegraph.pl --colors=java ./plots/{flamegraph_filename}_folded > ./plots/{flamegraph_filename}.svg')
def main(argv):
try:
opts, args = getopt.getopt(argv, "p:f:d:",
["port=" "plot_filename=" "data_filename="])
except getopt.GetoptError:
print("Syntax Error.")
sys.exit(2)
for opt, arg in opts:
if opt in ("-p", "--port"):
global port
port = arg
elif opt in ("-f", "--filename_plot"):
global plot_filename
plot_filename = arg + ".png"
flamegraph_filename = arg
elif opt in ("-d", "--filename_data"):
global data_filename
data_filename = './plots/' + arg + ".csv"
benchmark(data_filename, plot_filename, port)
flamegraph(flamegraph_filename, port)
if __name__ == '__main__':
main(sys.argv[1:])
|
the-stack_106_21308
|
from typing import List, Dict
from overrides import overrides
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.data.fields import FlagField, TextField, SequenceLabelField
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
@Predictor.register("sentence_tagger")
class SentenceTaggerPredictor(Predictor):
"""
Predictor for any model that takes in a sentence and returns
a single set of tags for it. In particular, it can be used with
the [`CrfTagger`](https://docs.allennlp.org/models/main/models/tagging/models/crf_tagger/)
model and also the [`SimpleTagger`](../models/simple_tagger.md) model.
Registered as a `Predictor` with name "sentence_tagger".
"""
def __init__(
self, model: Model, dataset_reader: DatasetReader, language: str = "en_core_web_sm"
) -> None:
super().__init__(model, dataset_reader)
self._tokenizer = SpacyTokenizer(language=language, pos_tags=True)
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like `{"sentence": "..."}`.
Runs the underlying model, and adds the `"words"` to the output.
"""
sentence = json_dict["sentence"]
tokens = self._tokenizer.tokenize(sentence)
return self._dataset_reader.text_to_instance(tokens)
@overrides
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
"""
This function currently only handles BIOUL tags.
Imagine an NER model predicts three named entities (each one with potentially
multiple tokens). For each individual entity, we create a new Instance that has
the label set to only that entity and the rest of the tokens are labeled as outside.
We then return a list of those Instances.
For example:
```text
Mary went to Seattle to visit Microsoft Research
U-Per O O U-Loc O O B-Org L-Org
```
We create three instances.
```text
Mary went to Seattle to visit Microsoft Research
U-Per O O O O O O O
Mary went to Seattle to visit Microsoft Research
O O O U-LOC O O O O
Mary went to Seattle to visit Microsoft Research
O O O O O O B-Org L-Org
```
We additionally add a flag to these instances to tell the model to only compute loss on
non-O tags, so that we get gradients that are specific to the particular span prediction
that each instance represents.
"""
predicted_tags = outputs["tags"]
predicted_spans = []
i = 0
while i < len(predicted_tags):
tag = predicted_tags[i]
# if its a U, add it to the list
if tag[0] == "U":
current_tags = [t if idx == i else "O" for idx, t in enumerate(predicted_tags)]
predicted_spans.append(current_tags)
# if its a B, keep going until you hit an L.
elif tag[0] == "B":
begin_idx = i
while tag[0] != "L":
i += 1
tag = predicted_tags[i]
end_idx = i
current_tags = [
t if begin_idx <= idx <= end_idx else "O"
for idx, t in enumerate(predicted_tags)
]
predicted_spans.append(current_tags)
i += 1
# Creates a new instance for each contiguous tag
instances = []
for labels in predicted_spans:
new_instance = instance.duplicate()
text_field: TextField = instance["tokens"] # type: ignore
new_instance.add_field(
"tags", SequenceLabelField(labels, text_field), self._model.vocab
)
new_instance.add_field("ignore_loss_on_o_tags", FlagField(True))
instances.append(new_instance)
return instances
|
the-stack_106_21309
|
# The MIT License (MIT)
# Copyright (c) 2021-present EQUENOS
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Repsonsible for handling Params for slash commands"""
from __future__ import annotations
import inspect
import math
from enum import Enum, EnumMeta
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
List,
Literal,
Optional,
Type,
TypeVar,
Union,
cast,
get_origin,
get_type_hints,
)
import disnake
from disnake.app_commands import Option, OptionChoice
from disnake.channel import _channel_type_factory
from disnake.enums import ChannelType, OptionType, try_enum_to_int
from . import errors
from .converter import CONVERTER_MAPPING
if TYPE_CHECKING:
from disnake.interactions import ApplicationCommandInteraction as Interaction
from .slash_core import InvokableSlashCommand, SubCommand
AnySlashCommand = Union[InvokableSlashCommand, SubCommand]
T = TypeVar("T", bound=Any)
ChoiceValue = Union[str, int, float]
Choices = Union[List[OptionChoice], List[ChoiceValue], Dict[str, ChoiceValue]]
TChoice = TypeVar("TChoice", bound=ChoiceValue)
__all__ = (
"ParamInfo",
"Param",
"param",
"option_enum",
)
def _xt_to_xe(xe: Optional[float], xt: Optional[float], direction: float = 1) -> Optional[float]:
"""Function for combining xt and xe
* x > xt && x >= xe ; x >= f(xt, xe, 1)
* x < xt && x <= xe ; x <= f(xt, xe, -1)
"""
if xe is not None:
if xt is not None:
raise TypeError("Cannot combine lt and le or gt and le")
return xe
elif xt is not None:
epsilon = math.ldexp(1.0, -1024)
return xt + (epsilon * direction)
else:
return None
class ParamInfo:
"""
Parameters
----------
default: Union[:class:`str`, Callable[[:class:`ApplicationCommandInteraction`, Any], Any]]
default value or a default value factory
name: :class:`str`
option's name, the parameter name by default
description: :class:`str`
option's description
converter: Callable[[:class:`ApplicationCommandInteraction`, Any], Any]
the option's converter, takes in an interaction and the argument
"""
TYPES: ClassVar[Dict[type, int]] = {
str: 3,
int: 4,
bool: 5,
disnake.abc.User: 6,
disnake.User: 6,
disnake.Member: 6,
# channels handled separately
disnake.abc.GuildChannel: 7,
disnake.Role: 8,
Union[disnake.Member, disnake.Role]: 9,
disnake.abc.Snowflake: 9,
float: 10,
}
def __init__(
self,
default: Any = ...,
*,
name: str = "",
description: str = None,
converter: Callable[[Interaction, Any], Any] = None,
autcomplete: Callable[[Interaction, str], Any] = None,
choices: Choices = None,
type: type = None,
channel_types: List[ChannelType] = None,
lt: float = None,
le: float = None,
gt: float = None,
ge: float = None,
) -> None:
self.default = default
self.name = name
self.param_name = name
self.description = description
self.converter = converter
self.autocomplete = autcomplete
self.choices = choices or []
self.type = type or str
self.channel_types = channel_types or []
self.le = _xt_to_xe(le, lt, -1)
self.ge = _xt_to_xe(ge, gt, 1)
@property
def required(self) -> bool:
return self.default is ...
@property
def discord_type(self) -> OptionType:
return OptionType(self.TYPES.get(self.type, OptionType.string.value))
@discord_type.setter
def discord_type(self, discord_type: OptionType) -> None:
value = try_enum_to_int(discord_type)
for t, v in self.TYPES.items():
if value == v:
self.type = t
return
raise TypeError(f"Type {discord_type} is not a valid Param type")
def __repr__(self):
return f"<Param default={self.default!r} name={self.name!r} description={self.description!r}>"
async def get_default(self, inter: Interaction) -> Any:
"""Gets the default for an interaction"""
if not callable(self.default):
return self.default
default = self.default(inter)
if inspect.isawaitable(default):
return await default
return default
async def verify_type(self, inter: Interaction, argument: Any) -> Any:
"""Check if a type of an argument is correct and possibly fix it"""
# these types never need to be verified
if self.discord_type.value in [3, 4, 5, 8, 9, 10]:
return argument
if issubclass(self.type, disnake.Member):
if isinstance(argument, disnake.Member):
return argument
raise errors.MemberNotFound(str(argument.id))
if issubclass(self.type, disnake.abc.GuildChannel):
if isinstance(argument, self.type):
return argument
raise errors.ChannelNotFound(str(argument.id))
# unexpected types may just be ignored
return argument
async def convert_argument(self, inter: Interaction, argument: Any) -> Any:
"""Convert a value if a converter is given"""
if self.converter is None:
return await self.verify_type(inter, argument)
try:
argument = self.converter(inter, argument)
if inspect.isawaitable(argument):
return await argument
return argument
except Exception as e:
raise errors.ConversionError(self.converter, e) from e
def _parse_enum(self, annotation: Any) -> None:
if isinstance(annotation, (EnumMeta, disnake.enums.EnumMeta)):
self.choices = [OptionChoice(name, value.value) for name, value in annotation.__members__.items()] # type: ignore
else:
self.choices = [OptionChoice(str(i), i) for i in annotation.__args__]
self.type = type(self.choices[0].value)
def parse_annotation(self, annotation: Any) -> None: # sourcery no-metrics
# TODO: Clean up whatever the fuck this is
if isinstance(annotation, ParamInfo):
default = "..." if annotation.default is ... else repr(annotation.default)
r = f'Param({default}, description={annotation.description or "description"!r})'
raise TypeError(f'Param must be a parameter default, not an annotation: "option: type = {r}"')
# Get rid of Optionals
if get_origin(annotation) is Union:
args = [i for i in annotation.__args__ if i not in (None, type(None))]
if len(args) == 1:
annotation = args[0]
else:
annotation.__args__ = args
if self.converter is not None:
# try to parse the converter's annotation, fall back on the annotation itself
parameters = list(inspect.signature(self.converter).parameters.values())
parameter = parameters[2] if inspect.ismethod(self.converter) else parameters[1]
conv_annot = get_type_hints(self.converter).get(parameter.name, Any)
if conv_annot in self.TYPES:
self.type = conv_annot
return
elif isinstance(conv_annot, EnumMeta) or get_origin(conv_annot) is Literal:
self._parse_enum(conv_annot)
return
elif conv_annot is not Any:
raise TypeError("Converters cannot use converter annotations")
elif annotation in CONVERTER_MAPPING:
raise TypeError(
"Cannot use an implicit converter annotation and an unnanotated converter at the same time"
)
# otherwise just parse the annotation normally and hope for the best
if annotation is inspect.Parameter.empty or annotation is Any:
pass
elif get_origin(annotation) is list:
if self.converter:
raise TypeError("Converter detected with custom annotation")
arg = annotation.__args__[0] if annotation.__args__ else str
if arg in [str, int, float]:
conv = arg
elif arg in CONVERTER_MAPPING:
# TODO: Define our own converters?
raise TypeError("Discord's api is not mature enough to handle member conversion with models")
else:
raise TypeError(f"{arg!r} is not a valid List subscript for Param")
self.converter = lambda inter, arg: list(map(conv, arg.split()))
elif isinstance(annotation, (EnumMeta, disnake.enums.EnumMeta)) or get_origin(annotation) is Literal:
self._parse_enum(annotation)
elif get_origin(annotation) is Union:
args = annotation.__args__
if all(issubclass(channel, disnake.abc.GuildChannel) for channel in args):
self.type = disnake.abc.GuildChannel
channel_types = set()
for channel in args:
channel_types.union(_channel_type_factory(channel))
self.channel_types = list(channel_types)
elif annotation in self.TYPES:
self.type = annotation
elif any(get_origin(arg) for arg in args):
raise TypeError("Unions do not support nesting")
else:
raise TypeError("Unions for anything else other than channels are not supported")
elif isinstance(annotation, type) and issubclass(annotation, disnake.abc.GuildChannel):
self.type = disnake.abc.GuildChannel
self.channel_types = _channel_type_factory(annotation)
elif annotation in self.TYPES:
self.type = annotation
elif annotation in CONVERTER_MAPPING:
self.converter = CONVERTER_MAPPING[annotation]().convert
else:
raise TypeError(f"{annotation!r} is not a valid Param annotation")
def parse_parameter(self, param: inspect.Parameter) -> None:
self.name = self.name or param.name
self.param_name = param.name
def parse_doc(self, doc_type: Any, doc_description: str) -> None:
self.description = self.description or doc_description
if self.type == str and doc_type is not None:
self.parse_annotation(doc_type)
def to_option(self) -> Option:
if self.name == "":
raise TypeError("Param must be parsed first")
return Option(
name=self.name,
description=self.description or "\u200b",
type=self.discord_type,
required=self.required,
choices=self.choices or None,
channel_types=self.channel_types,
autocomplete=self.autocomplete is not None,
min_value=self.ge,
max_value=self.le,
)
def expand_params(command: AnySlashCommand) -> List[Option]:
"""Update an option with its params *in-place*
Returns the created options
"""
# parse annotations:
sig = inspect.signature(command.callback)
parameters = list(sig.parameters.values())
# hacky I suppose
cog = parameters[0].name == "self" if command.cog is None else True
inter_param = parameters[1] if cog else parameters[0]
parameters = parameters[2:] if cog else parameters[1:]
type_hints = get_type_hints(command.callback)
# extract params:
params = []
for parameter in parameters:
if parameter.kind in [inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD]:
continue
param = parameter.default
if not isinstance(param, ParamInfo):
param = ParamInfo(param if param is not parameter.empty else ...)
doc_param = command.docstring["params"].get(parameter.name)
param.parse_parameter(parameter)
if doc_param:
param.parse_doc(doc_param["type"], doc_param["description"])
param.parse_annotation(type_hints.get(parameter.name, Any))
params.append(param)
# update connectors and autocompleters
for param in params:
if param.name != param.param_name:
command.connectors[param.name] = param.param_name
if param.autocomplete:
command.autocompleters[param.name] = param.autocomplete
# add custom decorators
inter_annot = type_hints.get(inter_param.name, Any)
if isinstance(inter_annot, type) and issubclass(inter_annot, disnake.GuildCommandInteraction):
command.guild_only = True
return [param.to_option() for param in params]
async def resolve_param_kwargs(func: Callable, inter: Interaction, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Resolves a call with kwargs and transforms into normal kwargs
Depends on the fact that optionparams already contain all info.
"""
sig = inspect.signature(func)
type_hints = get_type_hints(func)
for parameter in sig.parameters.values():
if parameter.kind in [inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD]:
continue
param = parameter.default
if not isinstance(param, ParamInfo):
param = ParamInfo(param if param is not parameter.empty else ...)
try:
param.parse_parameter(parameter)
param.parse_annotation(type_hints.get(param.param_name, Any))
except TypeError:
# invalid annotations with old-style options
continue
if param.param_name in kwargs:
kwargs[param.param_name] = await param.convert_argument(inter, kwargs[param.param_name])
elif param.default is not ...:
kwargs[param.param_name] = await param.get_default(inter)
return kwargs
# NOTE: This is not worth overloading anymore unless we take
# an sqlmodel approach and create overloads dynamically using templating
def Param(
default: Any = ...,
*,
name: str = "",
desc: str = None,
description: str = None,
choices: Choices = None,
conv: Callable[[Interaction, Any], Any] = None,
converter: Callable[[Interaction, Any], Any] = None,
autocomp: Callable[[Interaction, str], Any] = None,
autocomplete: Callable[[Interaction, str], Any] = None,
lt: float = None,
le: float = None,
gt: float = None,
ge: float = None,
min_value: float = None,
max_value: float = None,
) -> Any:
return ParamInfo(
default,
name=name,
description=desc or description,
choices=choices,
converter=conv or converter,
autcomplete=autocomp or autocomplete,
lt=lt,
le=le if max_value is None else max_value,
gt=gt,
ge=ge if min_value is None else min_value,
)
param = Param
def option_enum(choices: Union[Dict[str, TChoice], List[TChoice]], **kwargs: TChoice) -> Type[TChoice]:
if isinstance(choices, list):
# invariance issue, please fix
choices = cast(Dict[str, TChoice], {str(i): i for i in choices})
choices = choices or kwargs
first, *_ = choices.values()
return Enum("", choices, type=type(first))
|
the-stack_106_21311
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Core logic for the inverse transformation."""
import functools
from typing import Iterable
import jax
from jax import abstract_arrays
from jax import core as jax_core
from jax import tree_util
from jax import util as jax_util
from jax.interpreters import pxla
import jax.numpy as np
from oryx.core import primitive
from oryx.core import trace_util
from oryx.core.interpreters import propagate
from oryx.core.interpreters.inverse import slice as slc
__all__ = [
'ildj_registry',
'InverseAndILDJ',
'inverse_and_ildj',
'inverse',
'register_elementwise',
'register_binary',
]
safe_map = jax_core.safe_map
safe_zip = jax_core.safe_zip
Cell = propagate.Cell
NDSlice = slc.NDSlice
Slice = slc.Slice
class InverseAndILDJ(Cell):
"""Propagates inverse value slices and their ILDJs.
An InverseAndILDJ instance keeps track of a set of slices of a value. In the
simplest case, the slice's indices capture the entire value, in which case the
cell is "top". Partial information is represented with slices that do not
capture the entire value. No information, i.e. "bottom', is represented with a
cell that has no slices.
Joining two cells creates set of slices, and if we detect that the slices can
be concatenated, we combine them into a single slice. As propagation
progresses, we hope to accumulate enough slices to concatenate them all into
this cell's `val`. ILDJs are also kept track of in the same way, except we
keep track of the diagonal of the Jacobian since split operations may also
split up the Jacobian.
"""
def __init__(self,
aval: jax_core.AbstractValue,
slices: Iterable[NDSlice]):
super().__init__(aval)
self.slices = frozenset(slices)
def top(self) -> bool:
"""Returns if this cell represents the top of the slice lattice.
An InverseAndILDJ is at the top if its slice represents the entire array.
"""
if len(self.slices) != 1:
return False
return list(self.slices)[0].value.shape == self.aval.shape
def bottom(self) -> bool:
"""Returns if this cell represents the bottom of the slice lattice.
An InverseAndILDJ is at the bottom if we have no slices.
"""
return len(self.slices) == 0 # pylint: disable=g-explicit-length-test
def __lt__(self, other: 'InverseAndILDJ') -> bool:
if self.top() or other.bottom():
return False
return all(any(s1 < s2 for s2 in other.slices) for s1 in self.slices)
def __eq__(self, other: 'InverseAndILDJ') -> bool:
if self.aval != other.aval:
return False
return self.slices == other.slices
def join(self, other: 'InverseAndILDJ') -> 'InverseAndILDJ':
if other.top():
return other
if other.bottom():
return self
if self == other:
return self
if other < self:
return self
if self < other:
return other
all_slices = sorted(self.slices | other.slices,
key=lambda slc: tuple(s.start for s in slc.slices))
new_slices = set()
active = all_slices.pop(0)
while all_slices:
for dim in range(len(self.aval.shape)):
if active.can_concatenate(all_slices[0], dim):
active = active.concatenate(all_slices.pop(0), dim)
break
else:
new_slices.add(active)
active = all_slices.pop(0)
new_slices.add(active)
return InverseAndILDJ(self.aval, new_slices)
@property
def val(self):
if not self.top():
raise AssertionError('Cannot get value from non-top lattice value: ',
f'{self.aval}, {self.slices}')
return list(self.slices)[0].value
@property
def ildj(self):
if not self.top():
raise AssertionError('Cannot get ildj from non-top lattice value: ',
f'{self.aval}, {self.slices}')
return list(self.slices)[0].ildj
@classmethod
def unknown(cls, aval):
return InverseAndILDJ(aval, [])
@classmethod
def new(cls, val):
val = np.array(val)
aval = jax_core.get_aval(val)
aval = abstract_arrays.raise_to_shaped(aval)
ndslice = NDSlice.new(val, np.zeros_like(val))
return InverseAndILDJ(aval, frozenset([ndslice]))
def flatten(self):
slices = list(sorted(self.slices))
return slices, (self.aval,)
@classmethod
def unflatten(cls, data, slices):
return InverseAndILDJ(data[0], frozenset(slices))
def inverse_and_ildj(f, *trace_args, reduce_ildj=True):
"""Inverse and ILDJ function transformation."""
def wrapped(*args, **kwargs):
"""Function wrapper that takes in inverse arguments."""
forward_args = trace_args if len(trace_args) else args
jaxpr, (in_tree, _) = trace_util.stage(f, dynamic=False)(
*forward_args, **kwargs)
flat_forward_args, _ = tree_util.tree_flatten(forward_args)
flat_args, _ = tree_util.tree_flatten(args)
flat_constcells = safe_map(InverseAndILDJ.new, jaxpr.literals)
flat_forward_avals = [
trace_util.get_shaped_aval(arg)
for arg in flat_forward_args]
flat_incells = [InverseAndILDJ.unknown(aval) for aval in flat_forward_avals]
flat_outcells = safe_map(InverseAndILDJ.new, flat_args)
env, _ = propagate.propagate(InverseAndILDJ, ildj_registry, jaxpr.jaxpr,
flat_constcells, flat_incells, flat_outcells) # pytype: disable=wrong-arg-types
flat_incells = [env.read(invar) for invar in jaxpr.jaxpr.invars]
if any(not flat_incell.top() for flat_incell in flat_incells):
raise ValueError('Cannot invert function.')
flat_vals, flat_ildjs = jax_util.unzip2([
(flat_incell.val, flat_incell.ildj) for flat_incell in flat_incells
])
vals = tree_util.tree_unflatten(in_tree, flat_vals)
if reduce_ildj:
ildj_ = sum(np.sum(i) for i in flat_ildjs)
else:
ildj_ = tree_util.tree_unflatten(in_tree, flat_ildjs)
if len(forward_args) == 1:
vals = vals[0]
ildj_ = ildj_ if reduce_ildj else ildj_[0]
return vals, ildj_
return wrapped
def inverse(f, *trace_args, **inverse_kwargs):
def wrapped(*args, **kwargs):
return inverse_and_ildj(f, *trace_args, **inverse_kwargs)(
*args, **kwargs)[0]
return wrapped
def ildj(f, *trace_args, **inverse_kwargs):
def wrapped(*args, **kwargs):
return inverse_and_ildj(f, *trace_args, **inverse_kwargs)(
*args, **kwargs)[1]
return wrapped
def default_rule(prim, invals, outvals, **params):
"""Default inversion rule that only does forward eval."""
if all(outval.bottom() for outval in outvals):
if all(inval.top() for inval in invals):
vals = [inval.val for inval in invals]
ans = prim.bind(*vals, **params)
if not prim.multiple_results:
ans = [ans]
# Propagate can only invert functions that are constructed
# autoregressively, and therefore the Jacobians of propagate-invertible
# functions are lower-triangular. We are therefore safe assign outvals an
# ILDJ value of 0 as they are part of forward propagation that will fill
# in an off-diagonal entry of the Jacobian and will not contribute to the
# log-det Jacobian.
outvals = safe_map(InverseAndILDJ.new, ans)
return invals, outvals, None
if any(outval.bottom() for outval in outvals):
return invals, outvals, None
raise NotImplementedError(f'No registered inverse for `{prim}`.')
class InverseDict(object):
"""Default rules dictionary that uses a default rule for inverse."""
def __init__(self):
self.rules = {}
def __getitem__(self, prim):
if prim not in self.rules:
self[prim] = functools.partial(default_rule, prim)
return self.rules[prim]
def __setitem__(self, prim, val):
self.rules[prim] = val
def __contains__(self, prim):
return prim in self.rules
def register_elementwise(prim):
"""Registers an elementwise primitive with ILDJ."""
def make_rule(f):
"""Accepts an inverse function for a primitive."""
def ildj_rule(incells, outcells, **params):
"""General InverseAndILDJ rule for elementwise functions."""
outcell, = outcells
incell, = incells
if not incell.top() and outcell.top():
val = outcell.val
f_sum = lambda x: f(x, **params).sum()
ildj_ = outcell.ildj + np.log(np.abs(jax.grad(f_sum)(val)))
ndslice = NDSlice.new(f(val, **params), ildj_)
incells = [InverseAndILDJ(outcell.aval, [ndslice])]
elif not outcell.top() and incell.top():
outcells = [InverseAndILDJ.new(prim.bind(incell.val, **params))]
return incells, outcells, None
ildj_registry[prim] = ildj_rule
return make_rule
def register_binary(prim):
"""Registers an binary primitive with ILDJ."""
def make_rule(f_left, f_right):
def ildj_rule(incells, outcells, **params):
outcell, = outcells
left, right = incells
if outcell.top():
val, ildj_ = outcell.val, outcell.ildj
if left.top():
right_val, right_ildj = f_left(left.val, val, ildj_)
ndslice = NDSlice.new(right_val, right_ildj)
incells = [left, InverseAndILDJ(right.aval, [ndslice])]
elif right.top():
left_val, left_ildj = f_right(right.val, val, ildj_)
ndslice = NDSlice.new(left_val, left_ildj)
incells = [InverseAndILDJ(left.aval, [ndslice]), right]
elif (not outcell.top() and left.top() and
right.top()):
out_val = prim.bind(left.val, right.val, **params)
outcells = [InverseAndILDJ.new(out_val)]
return incells, outcells, None
ildj_registry[prim] = ildj_rule
return make_rule
ildj_registry = InverseDict()
def hop_inverse_rule(prim):
ildj_registry[prim] = functools.partial(propagate.call_rule, prim)
primitive.register_hop_transformation_rule('inverse', hop_inverse_rule)
def initial_ildj(incells, outcells, *, jaxpr, num_consts, **_):
const_cells, incells = jax_util.split_list(incells, [num_consts])
env, state = propagate.propagate(
InverseAndILDJ, ildj_registry, jaxpr, const_cells,
incells, outcells) # pytype: disable=wrong-arg-types
new_incells = [env.read(invar) for invar in jaxpr.invars]
new_outcells = [env.read(outvar) for outvar in jaxpr.outvars]
return const_cells + new_incells, new_outcells, state
def initial_inverse_rule(prim):
ildj_registry[prim] = initial_ildj
primitive.register_initial_transformation_rule('inverse', initial_inverse_rule)
def map_ildj(prim, incells, outcells, **params):
"""InverseAndILDJ rule for the map primitives."""
f, incells = incells[0], incells[1:]
def slice_aval(aval):
return abstract_arrays.ShapedArray(aval.shape[1:], aval.dtype,
aval.weak_type)
def add_slice(cell, old_cell):
new_slices = [
NDSlice(ndslice.value, ndslice.ildj, Slice(0, old_cell.aval.shape[0]),
*ndslice.slices) for ndslice in cell.slices
]
return InverseAndILDJ(old_cell.aval, new_slices)
def remove_slice(cell):
new_slices = [
NDSlice(ndslice.value, ndslice.ildj, *ndslice.slices[1:])
for ndslice in cell.slices
]
aval = slice_aval(cell.aval)
return InverseAndILDJ(aval, new_slices)
mapped_incells = safe_map(remove_slice, incells)
mapped_outcells = safe_map(remove_slice, outcells)
flat_vals, in_tree = tree_util.tree_flatten((mapped_incells, mapped_outcells))
f, aux = propagate.flat_propagate(f, in_tree)
# Assume all invars as mapped
new_in_axes = (0,) * len(flat_vals)
new_params = dict(params, in_axes=new_in_axes)
if 'donated_invars' in params:
new_params['donated_invars'] = (False,) * len(flat_vals)
if 'out_axes' in params:
assert all(out_axis == 0 for out_axis in params['out_axes'])
new_params['out_axes_thunk'] = jax_util.HashableFunction(
lambda: (0,) * aux().num_leaves,
closure=('ildj', params['out_axes']))
del new_params['out_axes']
flat_out = prim.bind(f, *flat_vals, **new_params)
out_tree = aux()
new_incells, new_outcells, state = tree_util.tree_unflatten(
out_tree, flat_out)
new_incells = [add_slice(v, old_v)
for old_v, v in safe_zip(incells, new_incells)]
new_outcells = [add_slice(v, old_v)
for old_v, v in safe_zip(outcells, new_outcells)]
return new_incells, new_outcells, state
ildj_registry[pxla.xla_pmap_p] = functools.partial(map_ildj, pxla.xla_pmap_p)
|
the-stack_106_21312
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/prismstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *prism_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("prism-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
the-stack_106_21313
|
from shablbot.components.chat import Chat
def command(processed_chat: Chat) -> None:
processed_chat.turn_off()
command_settings = {
"code": "bot_off",
"name": "Выключить бота",
"templates": ["выкл бот", "бот выкл"],
"answer": "Теперь бот не читает сообщения в чате",
"description": "Команда для выключения бота в чате (Внутри чата)",
"method": "normal",
"need": ["processed_chat",],
"entry_point": command
}
|
the-stack_106_21314
|
from collections import Counter
# Just stick some data there
with open('email_addresses.txt', 'w') as f:
f.write("[email protected]\n")
f.write("[email protected]\n")
f.write("[email protected]\n")
def get_domain(email_address: str) -> str:
"""Split on '@' and return the last piece"""
return email_address.lower().split("@")[-1]
# A couple of tests
assert get_domain('[email protected]') == 'gmail.com'
assert get_domain('[email protected]') == 'm.datasciencester.com'
with open('email_addresses.txt', 'r') as f:
domain_counts = Counter(get_domain(line.strip())
for line in f
if "@" in line)
print(domain_counts)
|
the-stack_106_21315
|
# day 12 challenge 1
from collections import deque
# get input data
instructions = []
with open('input.txt', 'r') as file:
for line in file:
instructions.append((line[0], int(line[1:])))
# x and y are 0, facing is 0 = east
curr = {'x' : 0, 'y' : 0, 'f' : deque(['E', 'S', 'W', 'N'])}
for instruction in instructions:
op, val = instruction
# handle NSEW just go in direction
if op in 'NSEW':
if op == 'N':
curr['y'] += val
elif op == 'S':
curr['y'] -= val
elif op == 'E':
curr['x'] += val
else: # == W
curr['x'] -= val
# handle changing boat angle
elif op in 'LR':
turns = val // 90
if op == 'L':
curr['f'].rotate(turns)
else: # == R
curr['f'].rotate(-turns)
# handle going forward in whatever the current angle is
else:
facing = curr['f'][0]
if facing == 'N':
curr['y'] += val
elif facing == 'S':
curr['y'] -= val
elif facing == 'E':
curr['x'] += val
else: # == W
curr['x'] -= val
print(abs(curr['x']) + abs(curr['y']))
|
the-stack_106_21318
|
# Default configuration where the problem is regression and the agent is Kalman Filter
import ml_collections
# Local imports
from configs.utils import PriorKnowledge
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.problem = "classification"
config.env_model = "MLP"
config.agent = "KalmanFilter"
config.seed = 0
config.nsteps = 20
config.ntrials = 20
config.train_batch_size = 1
config.test_batch_size = 1
input_dim, num_train, tau, ouput_dim, hidden = 20, 100, 1, 1,10
config.prior_knowledge = PriorKnowledge(input_dim, num_train, tau, ouput_dim, hidden=hidden)
return config
|
the-stack_106_21319
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn.functional as F
import torch
import torch.nn as nn
import util.util as util
from util.Selfpatch import Selfpatch
# SE MODEL
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Conv2d(channel, channel // reduction, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, kernel_size=1, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c, 1, 1)
y = self.fc(y)
return x * y.expand_as(x)
class Convnorm(nn.Module):
def __init__(self, in_ch, out_ch, sample='none-3', activ='leaky'):
super().__init__()
self.bn = nn.InstanceNorm2d(out_ch, affine=True)
if sample == 'down-3':
self.conv = nn.Conv2d(in_ch, out_ch, 3, 2, 1, bias=False)
else:
self.conv = nn.Conv2d(in_ch, out_ch, 3, 1)
if activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input):
out = input
out = self.conv(out)
out = self.bn(out)
if hasattr(self, 'activation'):
out = self.activation(out[0])
return out
class PCBActiv(nn.Module):
def __init__(self, in_ch, out_ch, bn=True, sample='none-3', activ='leaky',
conv_bias=False, innorm=False, inner=False, outer=False):
super().__init__()
if sample == 'same-5':
self.conv = PartialConv(in_ch, out_ch, 5, 1, 2, bias=conv_bias)
elif sample == 'same-7':
self.conv = PartialConv(in_ch, out_ch, 7, 1, 3, bias=conv_bias)
elif sample == 'down-3':
self.conv = PartialConv(in_ch, out_ch, 3, 2, 1, bias=conv_bias)
else:
self.conv = PartialConv(in_ch, out_ch, 3, 1, 1, bias=conv_bias)
if bn:
self.bn = nn.InstanceNorm2d(out_ch, affine=True)
if activ == 'relu':
self.activation = nn.ReLU()
elif activ == 'leaky':
self.activation = nn.LeakyReLU(negative_slope=0.2)
self.innorm = innorm
self.inner = inner
self.outer = outer
def forward(self, input):
out = input
if self.inner:
out[0] = self.bn(out[0])
out[0] = self.activation(out[0])
out = self.conv(out)
out[0] = self.bn(out[0])
out[0] = self.activation(out[0])
elif self.innorm:
out = self.conv(out)
out[0] = self.bn(out[0])
out[0] = self.activation(out[0])
elif self.outer:
out = self.conv(out)
out[0] = self.bn(out[0])
else:
out = self.conv(out)
out[0] = self.bn(out[0])
if hasattr(self, 'activation'):
out[0] = self.activation(out[0])
return out
class ConvDown(nn.Module):
def __init__(self, in_c, out_c, kernel, stride, padding=0, dilation=1, groups=1, bias=False, layers=1, activ=True):
super().__init__()
nf_mult = 1
nums = out_c / 64
sequence = []
for i in range(1, layers + 1):
nf_mult_prev = nf_mult
if nums == 8:
if in_c == 512:
nfmult = 1
else:
nf_mult = 2
else:
nf_mult = min(2 ** i, 8)
if kernel != 1:
if activ == False and layers == 1:
sequence += [
nn.Conv2d(nf_mult_prev * in_c, nf_mult * in_c,
kernel_size=kernel, stride=stride, padding=padding, bias=bias),
nn.InstanceNorm2d(nf_mult * in_c)
]
else:
sequence += [
nn.Conv2d(nf_mult_prev * in_c, nf_mult * in_c,
kernel_size=kernel, stride=stride, padding=padding, bias=bias),
nn.InstanceNorm2d(nf_mult * in_c),
nn.LeakyReLU(0.2, True)
]
else:
sequence += [
nn.Conv2d(in_c, out_c,
kernel_size=kernel, stride=stride, padding=padding, bias=bias),
nn.InstanceNorm2d(out_c),
nn.LeakyReLU(0.2, True)
]
if activ == False:
if i + 1 == layers:
if layers == 2:
sequence += [
nn.Conv2d(nf_mult * in_c, nf_mult * in_c,
kernel_size=kernel, stride=stride, padding=padding, bias=bias),
nn.InstanceNorm2d(nf_mult * in_c)
]
else:
sequence += [
nn.Conv2d(nf_mult_prev * in_c, nf_mult * in_c,
kernel_size=kernel, stride=stride, padding=padding, bias=bias),
nn.InstanceNorm2d(nf_mult * in_c)
]
break
self.model = nn.Sequential(*sequence)
def forward(self, input):
return self.model(input)
class ConvUp(nn.Module):
def __init__(self, in_c, out_c, kernel, stride, padding=0, dilation=1, groups=1, bias=False):
super().__init__()
self.conv = nn.Conv2d(in_c, out_c, kernel,
stride, padding, dilation, groups, bias)
self.bn = nn.InstanceNorm2d(out_c)
self.relu = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input, size):
out = F.interpolate(input=input, size=size, mode='bilinear')
out = self.conv(out)
out = self.bn(out)
out = self.relu(out)
return out
class BASE(nn.Module):
def __init__(self, inner_nc):
super(BASE, self).__init__()
se = SELayer(inner_nc, 16)
model = [se]
gus = util.gussin(1.5).cuda()
self.gus = torch.unsqueeze(gus, 1).double()
self.model = nn.Sequential(*model)
self.down = nn.Sequential(
nn.Conv2d(1024, 512, 1, 1, 0, bias=False),
nn.InstanceNorm2d(512),
nn.LeakyReLU(negative_slope=0.2)
)
def forward(self, x):
Nonparm = Selfpatch()
out_32 = self.model(x)
b, c, h, w = out_32.size()
gus = self.gus.float()
gus_out = out_32[0].expand(h * w, c, h, w)
gus_out = gus * gus_out
gus_out = torch.sum(gus_out, -1)
gus_out = torch.sum(gus_out, -1)
gus_out = gus_out.contiguous().view(b, c, h, w)
csa2_in = F.sigmoid(out_32)
csa2_f = torch.nn.functional.pad(csa2_in, (1, 1, 1, 1))
csa2_ff = torch.nn.functional.pad(out_32, (1, 1, 1, 1))
csa2_fff, csa2_f, csa2_conv = Nonparm.buildAutoencoder(csa2_f[0], csa2_in[0], csa2_ff[0], 3, 1)
csa2_conv = csa2_conv.expand_as(csa2_f)
csa_a = csa2_conv * csa2_f
csa_a = torch.mean(csa_a, 1)
a_c, a_h, a_w = csa_a.size()
csa_a = csa_a.contiguous().view(a_c, -1)
csa_a = F.softmax(csa_a, dim=1)
csa_a = csa_a.contiguous().view(a_c, 1, a_h, a_h)
out = csa_a * csa2_fff
out = torch.sum(out, -1)
out = torch.sum(out, -1)
out_csa = out.contiguous().view(b, c, h, w)
out_32 = torch.cat([gus_out, out_csa], 1)
out_32 = self.down(out_32)
return out_32
class PartialConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__()
self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, False)
torch.nn.init.constant_(self.mask_conv.weight, 1.0)
# mask is not updated
for param in self.mask_conv.parameters():
param.requires_grad = False
def forward(self, inputt):
# http://masc.cs.gmu.edu/wiki/partialconv
# C(X) = W^T * X + b, C(0) = b, D(M) = 1 * M + 0 = sum(M)
# W^T* (M .* X) / sum(M) + b = [C(M .* X) – C(0)] / D(M) + C(0)
input = inputt[0]
mask = inputt[1].float().cuda()
output = self.input_conv(input * mask)
if self.input_conv.bias is not None:
output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as(
output)
else:
output_bias = torch.zeros_like(output)
with torch.no_grad():
output_mask = self.mask_conv(mask)
no_update_holes = output_mask == 0
mask_sum = output_mask.masked_fill_(no_update_holes.bool(), 1.0)
output_pre = (output - output_bias) / mask_sum + output_bias
output = output_pre.masked_fill_(no_update_holes.bool(), 0.0)
new_mask = torch.ones_like(output)
new_mask = new_mask.masked_fill_(no_update_holes.bool(), 0.0)
out = []
out.append(output)
out.append(new_mask)
return out
class PCconv(nn.Module):
def __init__(self):
super(PCconv, self).__init__()
self.down_128 = ConvDown(64, 128, 4, 2, padding=1, layers=2)
self.down_64 = ConvDown(128, 256, 4, 2, padding=1)
self.down_32 = ConvDown(256, 256, 1, 1)
self.down_16 = ConvDown(512, 512, 4, 2, padding=1, activ=False)
self.down_8 = ConvDown(512, 512, 4, 2, padding=1, layers=2, activ=False)
self.down_4 = ConvDown(512, 512, 4, 2, padding=1, layers=3, activ=False)
self.down = ConvDown(768, 256, 1, 1)
self.fuse = ConvDown(512, 512, 1, 1)
self.up = ConvUp(512, 256, 1, 1)
self.up_128 = ConvUp(512, 64, 1, 1)
self.up_64 = ConvUp(512, 128, 1, 1)
self.up_32 = ConvUp(512, 256, 1, 1)
self.base= BASE(512)
seuqence_3 = []
seuqence_5 = []
seuqence_7 = []
for i in range(5):
seuqence_3 += [PCBActiv(256, 256, innorm=True)]
seuqence_5 += [PCBActiv(256, 256, sample='same-5', innorm=True)]
seuqence_7 += [PCBActiv(256, 256, sample='same-7', innorm=True)]
self.cov_3 = nn.Sequential(*seuqence_3)
self.cov_5 = nn.Sequential(*seuqence_5)
self.cov_7 = nn.Sequential(*seuqence_7)
self.activation = nn.LeakyReLU(negative_slope=0.2)
def forward(self, input, mask):
mask = util.cal_feat_mask(mask, 3, 1)
# input[2]:256 32 32
b, c, h, w = input[2].size()
mask_1 = torch.add(torch.neg(mask.float()), 1)
mask_1 = mask_1.expand(b, c, h, w)
x_1 = self.activation(input[0])
x_2 = self.activation(input[1])
x_3 = self.activation(input[2])
x_4 = self.activation(input[3])
x_5 = self.activation(input[4])
x_6 = self.activation(input[5])
# Change the shape of each layer and intergrate low-level/high-level features
x_1 = self.down_128(x_1)
x_2 = self.down_64(x_2)
x_3 = self.down_32(x_3)
x_4 = self.up(x_4, (32, 32))
x_5 = self.up(x_5, (32, 32))
x_6 = self.up(x_6, (32, 32))
# The first three layers are Texture/detail
# The last three layers are Structure
x_DE = torch.cat([x_1, x_2, x_3], 1)
x_ST = torch.cat([x_4, x_5, x_6], 1)
x_ST = self.down(x_ST)
x_DE = self.down(x_DE)
x_ST = [x_ST, mask_1]
x_DE = [x_DE, mask_1]
# Multi Scale PConv fill the Details
x_DE_3 = self.cov_3(x_DE)
x_DE_5 = self.cov_5(x_DE)
x_DE_7 = self.cov_7(x_DE)
x_DE_fuse = torch.cat([x_DE_3[0], x_DE_5[0], x_DE_7[0]], 1)
x_DE_fi = self.down(x_DE_fuse)
# Multi Scale PConv fill the Structure
x_ST_3 = self.cov_3(x_ST)
x_ST_5 = self.cov_5(x_ST)
x_ST_7 = self.cov_7(x_ST)
x_ST_fuse = torch.cat([x_ST_3[0], x_ST_5[0], x_ST_7[0]], 1)
x_ST_fi = self.down(x_ST_fuse)
x_cat = torch.cat([x_ST_fi, x_DE_fi], 1)
x_cat_fuse = self.fuse(x_cat)
# Feature equalizations
x_final = self.base(x_cat_fuse)
# Add back to the input
x_ST = x_final
x_DE = x_final
x_1 = self.up_128(x_DE, (128, 128)) + input[0]
x_2 = self.up_64(x_DE, (64, 64)) + input[1]
x_3 = self.up_32(x_DE, (32, 32)) + input[2]
x_4 = self.down_16(x_ST) + input[3]
x_5 = self.down_8(x_ST) + input[4]
x_6 = self.down_4(x_ST) + input[5]
out = [x_1, x_2, x_3, x_4, x_5, x_6]
loss = [x_ST_fi, x_DE_fi]
out_final = [out, loss]
return out_final
|
the-stack_106_21321
|
from collections import OrderedDict
from sqlalchemy.inspection import inspect as sqlalchemyinspect
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.exc import NoResultFound
from graphene import Field # , annotate, ResolveInfo
from graphene.relay import Connection, Node
from graphene.types.objecttype import ObjectType, ObjectTypeOptions
from graphene.types.utils import yank_fields_from_attrs
from .converter import (convert_sqlalchemy_column,
convert_sqlalchemy_composite,
convert_sqlalchemy_relationship,
convert_sqlalchemy_hybrid_method)
from .registry import Registry, get_global_registry
from .utils import get_query, is_mapped_class, is_mapped_instance
def construct_fields(model, registry, only_fields, exclude_fields):
inspected_model = sqlalchemyinspect(model)
fields = OrderedDict()
for name, column in inspected_model.columns.items():
is_not_in_only = only_fields and name not in only_fields
# is_already_created = name in options.fields
is_excluded = name in exclude_fields # or is_already_created
if is_not_in_only or is_excluded:
# We skip this field if we specify only_fields and is not
# in there. Or when we exclude this field in exclude_fields
continue
converted_column = convert_sqlalchemy_column(column, registry)
fields[name] = converted_column
for name, composite in inspected_model.composites.items():
is_not_in_only = only_fields and name not in only_fields
# is_already_created = name in options.fields
is_excluded = name in exclude_fields # or is_already_created
if is_not_in_only or is_excluded:
# We skip this field if we specify only_fields and is not
# in there. Or when we exclude this field in exclude_fields
continue
converted_composite = convert_sqlalchemy_composite(composite, registry)
fields[name] = converted_composite
for hybrid_item in inspected_model.all_orm_descriptors:
if type(hybrid_item) == hybrid_property:
name = hybrid_item.__name__
is_not_in_only = only_fields and name not in only_fields
# is_already_created = name in options.fields
is_excluded = name in exclude_fields # or is_already_created
if is_not_in_only or is_excluded:
# We skip this field if we specify only_fields and is not
# in there. Or when we exclude this field in exclude_fields
continue
converted_hybrid_property = convert_sqlalchemy_hybrid_method(
hybrid_item
)
fields[name] = converted_hybrid_property
# Get all the columns for the relationships on the model
for relationship in inspected_model.relationships:
is_not_in_only = only_fields and relationship.key not in only_fields
# is_already_created = relationship.key in options.fields
is_excluded = relationship.key in exclude_fields # or is_already_created
if is_not_in_only or is_excluded:
# We skip this field if we specify only_fields and is not
# in there. Or when we exclude this field in exclude_fields
continue
converted_relationship = convert_sqlalchemy_relationship(relationship, registry)
name = relationship.key
fields[name] = converted_relationship
return fields
class SQLAlchemyObjectTypeOptions(ObjectTypeOptions):
model = None # type: Model
registry = None # type: Registry
connection = None # type: Type[Connection]
id = None # type: str
class SQLAlchemyObjectType(ObjectType):
@classmethod
def __init_subclass_with_meta__(cls, model=None, registry=None, skip_registry=False,
only_fields=(), exclude_fields=(), connection=None,
use_connection=None, interfaces=(), id=None, **options):
assert is_mapped_class(model), (
'You need to pass a valid SQLAlchemy Model in '
'{}.Meta, received "{}".'
).format(cls.__name__, model)
if not registry:
registry = get_global_registry()
assert isinstance(registry, Registry), (
'The attribute registry in {} needs to be an instance of '
'Registry, received "{}".'
).format(cls.__name__, registry)
sqla_fields = yank_fields_from_attrs(
construct_fields(model, registry, only_fields, exclude_fields),
_as=Field,
)
if use_connection is None and interfaces:
use_connection = any((issubclass(interface, Node) for interface in interfaces))
if use_connection and not connection:
# We create the connection automatically
connection = Connection.create_type('{}Connection'.format(cls.__name__), node=cls)
if connection is not None:
assert issubclass(connection, Connection), (
"The connection must be a Connection. Received {}"
).format(connection.__name__)
_meta = SQLAlchemyObjectTypeOptions(cls)
_meta.model = model
_meta.registry = registry
_meta.fields = sqla_fields
_meta.connection = connection
_meta.id = id or 'id'
super(SQLAlchemyObjectType, cls).__init_subclass_with_meta__(_meta=_meta, interfaces=interfaces, **options)
if not skip_registry:
registry.register(cls)
@classmethod
def is_type_of(cls, root, info):
if isinstance(root, cls):
return True
if not is_mapped_instance(root):
raise Exception((
'Received incompatible instance "{}".'
).format(root))
return isinstance(root, cls._meta.model)
@classmethod
def get_query(cls, info):
model = cls._meta.model
return get_query(model, info.context)
@classmethod
def get_node(cls, info, id):
try:
return cls.get_query(info).get(id)
except NoResultFound:
return None
def resolve_id(self, info):
# graphene_type = info.parent_type.graphene_type
keys = self.__mapper__.primary_key_from_instance(self)
return tuple(keys) if len(keys) > 1 else keys[0]
|
the-stack_106_21322
|
"""
2019-06957 Michael Benjamin C. Morco
CS 150 Extra Lab 1
Wordle Clone
"""
from ctypes import alignment
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW, CENTER
import random
class get_word:
def __init__(self, words):
self.rando = random.randint(0,2314)
self.words = words
def __repr__(self):
return self.words[self.rando]
#return input()
class make_grid:
def __init__(self):
self.guess_row = toga.Box(style=Pack(direction=ROW,alignment=CENTER,flex=1,padding_bottom=5))
for j in range(5):
if j == 0:
self.guess_row.add(toga.Button('',style=Pack(alignment=CENTER,font_size=15,width = 50, height = 50,background_color="white")))
else:
self.guess_row.add(toga.Button('',style=Pack(alignment=CENTER,font_size=15,width = 50, height = 50,padding_left=5,background_color="white")))
class color_classification:
def __init__(self, guess: str, answer: str):
self.guess = guess
self.answer = answer
self.color_grid = ["transparent","transparent","transparent","transparent","transparent"]
self.correct = 0
def color_check(self):
if self.guess == self.answer:
self.correct = 1
self.color_grid = ['#6aaa64','#6aaa64','#6aaa64','#6aaa64','#6aaa64']
else:
guess_list = []
guess_list[:0] = str(self.guess).upper()
#print(guess_list)
ans_list = []
ans_list[:0] = str(self.answer).upper()
#print(ans_list)
for i in range(5):
if guess_list[i] == ans_list[i]:
#print("green: " + guess_list[i])
self.color_grid[i] = "#6aaa64"
guess_list[i] = "#6aaa64"
ans_list[i] = "#6aaa64"
for i in range(5):
if guess_list[i] in ans_list:
if guess_list[i] != "#6aaa64":
#print("yellow: " + guess_list[i])
self.color_grid[i] = "#c9b458"
ans_list[ans_list.index(guess_list[i])] = "#c9b458"
guess_list[i] = "#c9b458"
for i in range(5):
if guess_list[i] != "#6aaa64" and guess_list[i] != "#c9b458":
#print("grey: " + guess_list[i])
self.color_grid[i] = "#787c7e"
guess_list[i] = "#787c7e"
class WordleClone(toga.App):
def startup(self):
#Opens words.txt
file = open(str(self.paths.app)+"\\..\\words.txt","r")
self.words = file.read()
self.words = self.words.split("\n")
file.close()
#Gets random word
self.chosen_word = str(get_word(self.words))
#print("Chosen word is",self.chosen_word)
#Opens allowed_guesses.txt
file = open(str(self.paths.app)+"\\..\\allowed_guesses.txt","r")
self.allowed_guesses = file.read()
self.allowed_guesses = self.allowed_guesses.split("\n")
file.close()
#initialize variables
self.green_letters = []
self.yellow_letters = []
self.guess_no = 0
main_box = toga.Box(style=Pack(direction=COLUMN, alignment = CENTER, text_align = CENTER,padding = 5))
guess_label = toga.Label('Guess: ')
self.guess_input = toga.TextInput(style=Pack(flex=1))
#Makea box containing guess_label and self.guess_input
guess_box = toga.Box(style=Pack(direction=ROW,padding=(5, 0)))
guess_box.add(guess_label)
guess_box.add(self.guess_input)
self.button = toga.Button(
'Guess',
on_press=self.guess_answer,
style=Pack(alignment=CENTER,padding=(5,0))
)
#set up the alphabet list, z is separated because we do not want it to have padding to the right
self.alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
self.alpha_list = toga.Box(style=Pack(direction=ROW, padding=(5,0), alignment = CENTER))
for i in range(26):
self.alpha_list.add(toga.Label(self.alphabet[i],style=Pack(direction = ROW, text_align=CENTER,padding_right = 1,padding_left = 1)))
#Restart button
button2 = toga.Button(
'Restart',
on_press=self.restart_game,
style=Pack(padding=(5,0))
)
main_box.add(guess_box)
main_box.add(self.button)
main_box.add(self.alpha_list)
self.master = []
for i in range(6):
ii = make_grid().guess_row
self.master.append(ii)
main_box.add(ii)
main_box.add(button2)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def guess_answer(self, widget):
if self.guess_no > 5:
msg = "Game over! The word is "+self.chosen_word
self.main_window.info_dialog("Wordle",msg)
elif len(self.guess_input.value) != 5:
#print("Not 5 characters!")
self.main_window.error_dialog("Wordle","Guess does not have five characters")
elif self.guess_input.value.isalpha() == False:
#print("Not alphabet")
self.main_window.error_dialog("Wordle","Guess has non-letter characters")
elif self.guess_input.value not in self.allowed_guesses:
#print("not a valid guess")
self.main_window.error_dialog("Wordle","Not a valid guess")
else:
colors = color_classification(self.guess_input.value, self.chosen_word)
colors.color_check()
#print(colors.correct)
for i in range(5):
#setting colors for the tiles
self.master[self.guess_no].children[i].label = str(self.guess_input.value[i]).upper()
self.master[self.guess_no].children[i].style.background_color = colors.color_grid[i]
self.master[self.guess_no].children[i].style.color = "white"
#setting colors for the alphabet list
#yellow -> green, grey-> green, transparent -> green
#if green already, no change is needed
#if yellow or transparent, it can be updated (grey and green remains grey/green always)
#print(self.alpha_list.children[self.alphabet.index(self.guess_input.value[i].upper())].style.background_color)
if (str(self.alpha_list.children[self.alphabet.index(self.guess_input.value[i].upper())].style.background_color) == "None") or (str(self.alpha_list.children[self.alphabet.index(self.guess_input.value[i].upper())].style.background_color) == "rgb(201, 180, 88)" and str(colors.color_grid[i]) == "#6aaa64"):
#print("Updating "+str(self.alpha_list.children[self.alphabet.index(self.guess_input.value[i].upper())].text)+ " to " + str(colors.color_grid[i]))
self.alpha_list.children[self.alphabet.index(self.guess_input.value[i].upper())].style.background_color = colors.color_grid[i]
self.alpha_list.children[self.alphabet.index(self.guess_input.value[i].upper())].style.color = "white"
if colors.correct == 1:
#print("Correct!")
self.main_window.info_dialog("Wordle","Congratulations!")
self.button.enabled = False
self.guess_no+=1
if self.guess_no > 5 and colors.correct == 0:
msg = "Game over! The word is "+self.chosen_word
self.main_window.info_dialog("Wordle",msg)
#Erase value of text input box
self.guess_input.value=""
def restart_game(self, widget):
self.guess_no = 0
self.button.enabled = True
self.guess_input.value=""
for i in range(6):
for ii in range(5):
self.master[i].children[ii].label = ''
self.master[i].children[ii].style.background_color = "white"
self.master[i].children[ii].style.color = "black"
for i in range(26):
self.alpha_list.children[i].style.color = "black"
self.alpha_list.children[i].style.background_color = None
self.chosen_word = str(get_word(self.words))
#print("Chosen word is",self.chosen_word)
def main():
return WordleClone()
|
the-stack_106_21323
|
import torch
from mmdet.core import bbox2result, bbox2roi
from ..builder import HEADS, build_head, build_roi_extractor
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class GridRoIHead(StandardRoIHead):
"""Grid roi head for Grid R-CNN.
https://arxiv.org/abs/1811.12030
"""
def __init__(self, grid_roi_extractor, grid_head, **kwargs):
assert grid_head is not None
super(GridRoIHead, self).__init__(**kwargs)
if grid_roi_extractor is not None:
self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.grid_roi_extractor = self.bbox_roi_extractor
self.grid_head = build_head(grid_head)
def init_weights(self, pretrained):
super(GridRoIHead, self).init_weights(pretrained)
self.grid_head.init_weights()
if not self.share_roi_extractor:
self.grid_roi_extractor.init_weights()
def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):
"""Ramdom jitter positive proposals for training."""
for sampling_result, img_meta in zip(sampling_results, img_metas):
bboxes = sampling_result.pos_bboxes
random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(
-amplitude, amplitude)
# before jittering
cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2
wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()
# after jittering
new_cxcy = cxcy + wh * random_offsets[:, :2]
new_wh = wh * (1 + random_offsets[:, 2:])
# xywh to xyxy
new_x1y1 = (new_cxcy - new_wh / 2)
new_x2y2 = (new_cxcy + new_wh / 2)
new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)
# clip bboxes
max_shape = img_meta['img_shape']
if max_shape is not None:
new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)
new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)
sampling_result.pos_bboxes = new_bboxes
return sampling_results
def forward_dummy(self, x, proposals):
# bbox head
outs = ()
rois = bbox2roi([proposals])
if self.with_bbox:
bbox_results = self._bbox_forward(x, rois)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# grid head
grid_rois = rois[:100]
grid_feats = self.grid_roi_extractor(
x[:self.grid_roi_extractor.num_inputs], grid_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
grid_pred = self.grid_head(grid_feats)
outs = outs + (grid_pred, )
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
outs = outs + (mask_results['mask_pred'], )
return outs
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
bbox_results = super(GridRoIHead,
self)._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
# Grid head forward and loss
sampling_results = self._random_jitter(sampling_results, img_metas)
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
# GN in head does not support zero shape input
if pos_rois.shape[0] == 0:
return bbox_results
grid_feats = self.grid_roi_extractor(
x[:self.grid_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
# Accelerate training
max_sample_num_grid = self.train_cfg.get('max_num_grid', 192)
sample_idx = torch.randperm(
grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid
)]
grid_feats = grid_feats[sample_idx]
grid_pred = self.grid_head(grid_feats)
grid_targets = self.grid_head.get_targets(sampling_results,
self.train_cfg)
grid_targets = grid_targets[sample_idx]
loss_grid = self.grid_head.loss(grid_pred, grid_targets)
bbox_results['loss_bbox'].update(loss_grid)
return bbox_results
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=False)
# pack rois into bboxes
grid_rois = bbox2roi([det_bboxes[:, :4]])
grid_feats = self.grid_roi_extractor(
x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)
if grid_rois.shape[0] != 0:
self.grid_head.test_mode = True
grid_pred = self.grid_head(grid_feats)
det_bboxes = self.grid_head.get_bboxes(det_bboxes,
grid_pred['fused'],
img_metas)
if rescale:
scale_factor = img_metas[0]['scale_factor']
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = det_bboxes.new_tensor(scale_factor)
det_bboxes[:, :4] /= scale_factor
else:
det_bboxes = torch.Tensor([])
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
if not self.with_mask:
return bbox_results
else:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
return bbox_results, segm_results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.